Commit 0e5b044c authored by Konstantin Komarov's avatar Konstantin Komarov

fs/ntfs3: Refactoring attr_set_size to restore after errors

Added comments to code
Added two undo labels for restoring after errors
Signed-off-by: default avatarKonstantin Komarov <almaz.alexandrovich@paragon-software.com>
parent c12df45e
...@@ -173,7 +173,6 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run, ...@@ -173,7 +173,6 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
{ {
int err; int err;
CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0; CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0;
struct wnd_bitmap *wnd = &sbi->used.bitmap;
size_t cnt = run->count; size_t cnt = run->count;
for (;;) { for (;;) {
...@@ -196,9 +195,7 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run, ...@@ -196,9 +195,7 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run,
/* Add new fragment into run storage. */ /* Add new fragment into run storage. */
if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) { if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) {
/* Undo last 'ntfs_look_for_free_space' */ /* Undo last 'ntfs_look_for_free_space' */
down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS); mark_as_free_ex(sbi, lcn, len, false);
wnd_set_free(wnd, lcn, flen);
up_write(&wnd->rw_lock);
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
...@@ -419,40 +416,44 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -419,40 +416,44 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
struct mft_inode *mi, *mi_b; struct mft_inode *mi, *mi_b;
CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn; CLST alen, vcn, lcn, new_alen, old_alen, svcn, evcn;
CLST next_svcn, pre_alloc = -1, done = 0; CLST next_svcn, pre_alloc = -1, done = 0;
bool is_ext; bool is_ext, is_bad = false;
u32 align; u32 align;
struct MFT_REC *rec; struct MFT_REC *rec;
again: again:
alen = 0;
le_b = NULL; le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL, attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, NULL,
&mi_b); &mi_b);
if (!attr_b) { if (!attr_b) {
err = -ENOENT; err = -ENOENT;
goto out; goto bad_inode;
} }
if (!attr_b->non_res) { if (!attr_b->non_res) {
err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run, err = attr_set_size_res(ni, attr_b, le_b, mi_b, new_size, run,
&attr_b); &attr_b);
if (err || !attr_b->non_res) if (err)
goto out; return err;
/* Return if file is still resident. */
if (!attr_b->non_res)
goto ok1;
/* Layout of records may be changed, so do a full search. */ /* Layout of records may be changed, so do a full search. */
goto again; goto again;
} }
is_ext = is_attr_ext(attr_b); is_ext = is_attr_ext(attr_b);
again_1:
align = sbi->cluster_size; align = sbi->cluster_size;
if (is_ext) if (is_ext)
align <<= attr_b->nres.c_unit; align <<= attr_b->nres.c_unit;
old_valid = le64_to_cpu(attr_b->nres.valid_size); old_valid = le64_to_cpu(attr_b->nres.valid_size);
old_size = le64_to_cpu(attr_b->nres.data_size); old_size = le64_to_cpu(attr_b->nres.data_size);
old_alloc = le64_to_cpu(attr_b->nres.alloc_size); old_alloc = le64_to_cpu(attr_b->nres.alloc_size);
again_1:
old_alen = old_alloc >> cluster_bits; old_alen = old_alloc >> cluster_bits;
new_alloc = (new_size + align - 1) & ~(u64)(align - 1); new_alloc = (new_size + align - 1) & ~(u64)(align - 1);
...@@ -475,24 +476,27 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -475,24 +476,27 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
mi = mi_b; mi = mi_b;
} else if (!le_b) { } else if (!le_b) {
err = -EINVAL; err = -EINVAL;
goto out; goto bad_inode;
} else { } else {
le = le_b; le = le_b;
attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn, attr = ni_find_attr(ni, attr_b, &le, type, name, name_len, &vcn,
&mi); &mi);
if (!attr) { if (!attr) {
err = -EINVAL; err = -EINVAL;
goto out; goto bad_inode;
} }
next_le_1: next_le_1:
svcn = le64_to_cpu(attr->nres.svcn); svcn = le64_to_cpu(attr->nres.svcn);
evcn = le64_to_cpu(attr->nres.evcn); evcn = le64_to_cpu(attr->nres.evcn);
} }
/*
* Here we have:
* attr,mi,le - last attribute segment (containing 'vcn').
* attr_b,mi_b,le_b - base (primary) attribute segment.
*/
next_le: next_le:
rec = mi->mrec; rec = mi->mrec;
err = attr_load_runs(attr, ni, run, NULL); err = attr_load_runs(attr, ni, run, NULL);
if (err) if (err)
goto out; goto out;
...@@ -507,6 +511,13 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -507,6 +511,13 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
goto ok; goto ok;
} }
/*
* Add clusters. In simple case we have to:
* - allocate space (vcn, lcn, len)
* - update packed run in 'mi'
* - update attr->nres.evcn
* - update attr_b->nres.data_size/attr_b->nres.alloc_size
*/
to_allocate = new_alen - old_alen; to_allocate = new_alen - old_alen;
add_alloc_in_same_attr_seg: add_alloc_in_same_attr_seg:
lcn = 0; lcn = 0;
...@@ -520,9 +531,11 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -520,9 +531,11 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
pre_alloc = 0; pre_alloc = 0;
if (type == ATTR_DATA && !name_len && if (type == ATTR_DATA && !name_len &&
sbi->options->prealloc) { sbi->options->prealloc) {
CLST new_alen2 = bytes_to_cluster( pre_alloc =
sbi, get_pre_allocated(new_size)); bytes_to_cluster(
pre_alloc = new_alen2 - new_alen; sbi,
get_pre_allocated(new_size)) -
new_alen;
} }
/* Get the last LCN to allocate from. */ /* Get the last LCN to allocate from. */
...@@ -580,7 +593,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -580,7 +593,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
pack_runs: pack_runs:
err = mi_pack_runs(mi, attr, run, vcn - svcn); err = mi_pack_runs(mi, attr, run, vcn - svcn);
if (err) if (err)
goto out; goto undo_1;
next_svcn = le64_to_cpu(attr->nres.evcn) + 1; next_svcn = le64_to_cpu(attr->nres.evcn) + 1;
new_alloc_tmp = (u64)next_svcn << cluster_bits; new_alloc_tmp = (u64)next_svcn << cluster_bits;
...@@ -614,7 +627,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -614,7 +627,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
if (type == ATTR_LIST) { if (type == ATTR_LIST) {
err = ni_expand_list(ni); err = ni_expand_list(ni);
if (err) if (err)
goto out; goto undo_2;
if (next_svcn < vcn) if (next_svcn < vcn)
goto pack_runs; goto pack_runs;
...@@ -624,8 +637,9 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -624,8 +637,9 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
if (!ni->attr_list.size) { if (!ni->attr_list.size) {
err = ni_create_attr_list(ni); err = ni_create_attr_list(ni);
/* In case of error layout of records is not changed. */
if (err) if (err)
goto out; goto undo_2;
/* Layout of records is changed. */ /* Layout of records is changed. */
} }
...@@ -638,47 +652,56 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -638,47 +652,56 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
err = ni_insert_nonresident(ni, type, name, name_len, run, err = ni_insert_nonresident(ni, type, name, name_len, run,
next_svcn, vcn - next_svcn, next_svcn, vcn - next_svcn,
attr_b->flags, &attr, &mi, NULL); attr_b->flags, &attr, &mi, NULL);
if (err)
goto out;
if (!is_mft)
run_truncate_head(run, evcn + 1);
svcn = le64_to_cpu(attr->nres.svcn);
evcn = le64_to_cpu(attr->nres.evcn);
le_b = NULL;
/* /*
* Layout of records maybe changed. * Layout of records maybe changed.
* Find base attribute to update. * Find base attribute to update.
*/ */
le_b = NULL;
attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len, attr_b = ni_find_attr(ni, NULL, &le_b, type, name, name_len,
NULL, &mi_b); NULL, &mi_b);
if (!attr_b) { if (!attr_b) {
err = -ENOENT; err = -EINVAL;
goto out; goto bad_inode;
} }
attr_b->nres.alloc_size = cpu_to_le64((u64)vcn << cluster_bits); if (err) {
attr_b->nres.data_size = attr_b->nres.alloc_size; /* ni_insert_nonresident failed. */
attr_b->nres.valid_size = attr_b->nres.alloc_size; attr = NULL;
goto undo_2;
}
if (!is_mft)
run_truncate_head(run, evcn + 1);
svcn = le64_to_cpu(attr->nres.svcn);
evcn = le64_to_cpu(attr->nres.evcn);
/*
* Attribute is in consistency state.
* Save this point to restore to if next steps fail.
*/
old_valid = old_size = old_alloc = (u64)vcn << cluster_bits;
attr_b->nres.valid_size = attr_b->nres.data_size =
attr_b->nres.alloc_size = cpu_to_le64(old_size);
mi_b->dirty = true; mi_b->dirty = true;
goto again_1; goto again_1;
} }
if (new_size != old_size || if (new_size != old_size ||
(new_alloc != old_alloc && !keep_prealloc)) { (new_alloc != old_alloc && !keep_prealloc)) {
/*
* Truncate clusters. In simple case we have to:
* - update packed run in 'mi'
* - update attr->nres.evcn
* - update attr_b->nres.data_size/attr_b->nres.alloc_size
* - mark and trim clusters as free (vcn, lcn, len)
*/
CLST dlen = 0;
vcn = max(svcn, new_alen); vcn = max(svcn, new_alen);
new_alloc_tmp = (u64)vcn << cluster_bits; new_alloc_tmp = (u64)vcn << cluster_bits;
alen = 0;
err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &alen,
true);
if (err)
goto out;
run_truncate(run, vcn);
if (vcn > svcn) { if (vcn > svcn) {
err = mi_pack_runs(mi, attr, run, vcn - svcn); err = mi_pack_runs(mi, attr, run, vcn - svcn);
if (err) if (err)
...@@ -697,7 +720,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -697,7 +720,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
if (!al_remove_le(ni, le)) { if (!al_remove_le(ni, le)) {
err = -EINVAL; err = -EINVAL;
goto out; goto bad_inode;
} }
le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz); le = (struct ATTR_LIST_ENTRY *)((u8 *)le - le_sz);
...@@ -723,12 +746,20 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -723,12 +746,20 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
attr_b->nres.valid_size = attr_b->nres.valid_size =
attr_b->nres.alloc_size; attr_b->nres.alloc_size;
} }
mi_b->dirty = true;
if (is_ext) err = run_deallocate_ex(sbi, run, vcn, evcn - vcn + 1, &dlen,
true);
if (err)
goto out;
if (is_ext) {
/* dlen - really deallocated clusters. */
le64_sub_cpu(&attr_b->nres.total_size, le64_sub_cpu(&attr_b->nres.total_size,
((u64)alen << cluster_bits)); ((u64)dlen << cluster_bits));
}
mi_b->dirty = true; run_truncate(run, vcn);
if (new_alloc_tmp <= new_alloc) if (new_alloc_tmp <= new_alloc)
goto ok; goto ok;
...@@ -747,7 +778,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -747,7 +778,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
if (le->type != type || le->name_len != name_len || if (le->type != type || le->name_len != name_len ||
memcmp(le_name(le), name, name_len * sizeof(short))) { memcmp(le_name(le), name, name_len * sizeof(short))) {
err = -EINVAL; err = -EINVAL;
goto out; goto bad_inode;
} }
err = ni_load_mi(ni, le, &mi); err = ni_load_mi(ni, le, &mi);
...@@ -757,7 +788,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -757,7 +788,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id); attr = mi_find_attr(mi, NULL, type, name, name_len, &le->id);
if (!attr) { if (!attr) {
err = -EINVAL; err = -EINVAL;
goto out; goto bad_inode;
} }
goto next_le_1; goto next_le_1;
} }
...@@ -772,13 +803,13 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -772,13 +803,13 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
} }
} }
out: ok1:
if (!err && attr_b && ret) if (ret)
*ret = attr_b; *ret = attr_b;
/* Update inode_set_bytes. */ /* Update inode_set_bytes. */
if (!err && ((type == ATTR_DATA && !name_len) || if (((type == ATTR_DATA && !name_len) ||
(type == ATTR_ALLOC && name == I30_NAME))) { (type == ATTR_ALLOC && name == I30_NAME))) {
bool dirty = false; bool dirty = false;
if (ni->vfs_inode.i_size != new_size) { if (ni->vfs_inode.i_size != new_size) {
...@@ -786,7 +817,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -786,7 +817,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
dirty = true; dirty = true;
} }
if (attr_b && attr_b->non_res) { if (attr_b->non_res) {
new_alloc = le64_to_cpu(attr_b->nres.alloc_size); new_alloc = le64_to_cpu(attr_b->nres.alloc_size);
if (inode_get_bytes(&ni->vfs_inode) != new_alloc) { if (inode_get_bytes(&ni->vfs_inode) != new_alloc) {
inode_set_bytes(&ni->vfs_inode, new_alloc); inode_set_bytes(&ni->vfs_inode, new_alloc);
...@@ -800,6 +831,47 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, ...@@ -800,6 +831,47 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type,
} }
} }
return 0;
undo_2:
vcn -= alen;
attr_b->nres.data_size = cpu_to_le64(old_size);
attr_b->nres.valid_size = cpu_to_le64(old_valid);
attr_b->nres.alloc_size = cpu_to_le64(old_alloc);
/* Restore 'attr' and 'mi'. */
if (attr)
goto restore_run;
if (le64_to_cpu(attr_b->nres.svcn) <= svcn &&
svcn <= le64_to_cpu(attr_b->nres.evcn)) {
attr = attr_b;
le = le_b;
mi = mi_b;
} else if (!le_b) {
err = -EINVAL;
goto bad_inode;
} else {
le = le_b;
attr = ni_find_attr(ni, attr_b, &le, type, name, name_len,
&svcn, &mi);
if (!attr)
goto bad_inode;
}
restore_run:
if (mi_pack_runs(mi, attr, run, evcn - svcn + 1))
is_bad = true;
undo_1:
run_deallocate_ex(sbi, run, vcn, alen, NULL, false);
run_truncate(run, vcn);
out:
if (is_bad) {
bad_inode:
_ntfs_bad_inode(&ni->vfs_inode);
}
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment