Commit a87a08e3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs

Pull JFFS2, UBI and UBIFS updates from Richard Weinberger:
 "JFFS2:
   - Fixes for various memory issues

  UBI:
   - Fix for a race condition in cdev ioctl handler

  UBIFS:
   - Fixes for O_TMPFILE and whiteout handling

   - Fixes for various memory issues"

* tag 'for-linus-5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs:
  ubifs: rename_whiteout: correct old_dir size computing
  jffs2: fix memory leak in jffs2_scan_medium
  jffs2: fix memory leak in jffs2_do_mount_fs
  jffs2: fix use-after-free in jffs2_clear_xattr_subsystem
  fs/jffs2: fix comments mentioning i_mutex
  ubi: fastmap: Return error code if memory allocation fails in add_aeb()
  ubifs: Fix to add refcount once page is set private
  ubifs: Fix read out-of-bounds in ubifs_wbuf_write_nolock()
  ubifs: setflags: Make dirtied_ino_d 8 bytes aligned
  ubifs: Rectify space amount budget for mkdir/tmpfile operations
  ubifs: Fix 'ui->dirty' race between do_tmpfile() and writeback work
  ubifs: Rename whiteout atomically
  ubifs: Add missing iput if do_tmpfile() failed in rename whiteout
  ubifs: Fix wrong number of inodes locked by ui_mutex in ubifs_inode comment
  ubifs: Fix deadlock in concurrent rename whiteout and inode writeback
  ubifs: rename_whiteout: Fix double free for whiteout_ui->data
  ubi: Fix race condition between ctrl_cdev_ioctl and ubi_cdev_ioctl
parents 3d198e42 70575727
...@@ -351,9 +351,6 @@ static ssize_t dev_attribute_show(struct device *dev, ...@@ -351,9 +351,6 @@ static ssize_t dev_attribute_show(struct device *dev,
* we still can use 'ubi->ubi_num'. * we still can use 'ubi->ubi_num'.
*/ */
ubi = container_of(dev, struct ubi_device, dev); ubi = container_of(dev, struct ubi_device, dev);
ubi = ubi_get_device(ubi->ubi_num);
if (!ubi)
return -ENODEV;
if (attr == &dev_eraseblock_size) if (attr == &dev_eraseblock_size)
ret = sprintf(buf, "%d\n", ubi->leb_size); ret = sprintf(buf, "%d\n", ubi->leb_size);
...@@ -382,7 +379,6 @@ static ssize_t dev_attribute_show(struct device *dev, ...@@ -382,7 +379,6 @@ static ssize_t dev_attribute_show(struct device *dev,
else else
ret = -EINVAL; ret = -EINVAL;
ubi_put_device(ubi);
return ret; return ret;
} }
...@@ -979,9 +975,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, ...@@ -979,9 +975,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
goto out_detach; goto out_detach;
} }
/* Make device "available" before it becomes accessible via sysfs */
ubi_devices[ubi_num] = ubi;
err = uif_init(ubi); err = uif_init(ubi);
if (err) if (err)
goto out_detach; goto out_detach;
...@@ -1026,6 +1019,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, ...@@ -1026,6 +1019,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
wake_up_process(ubi->bgt_thread); wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
ubi_devices[ubi_num] = ubi;
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num; return ubi_num;
...@@ -1034,7 +1028,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, ...@@ -1034,7 +1028,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
out_uif: out_uif:
uif_close(ubi); uif_close(ubi);
out_detach: out_detach:
ubi_devices[ubi_num] = NULL;
ubi_wl_close(ubi); ubi_wl_close(ubi);
ubi_free_all_volumes(ubi); ubi_free_all_volumes(ubi);
vfree(ubi->vtbl); vfree(ubi->vtbl);
......
...@@ -468,7 +468,9 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai, ...@@ -468,7 +468,9 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (err == UBI_IO_FF_BITFLIPS) if (err == UBI_IO_FF_BITFLIPS)
scrub = 1; scrub = 1;
add_aeb(ai, free, pnum, ec, scrub); ret = add_aeb(ai, free, pnum, ec, scrub);
if (ret)
goto out;
continue; continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) { } else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum); dbg_bld("Found non empty PEB:%i in pool", pnum);
...@@ -638,8 +640,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, ...@@ -638,8 +640,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size) if (fm_pos >= fm_size)
goto fail_bad; goto fail_bad;
add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum), ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0); be32_to_cpu(fmec->ec), 0);
if (ret)
goto fail;
} }
/* read EC values from used list */ /* read EC values from used list */
...@@ -649,8 +653,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, ...@@ -649,8 +653,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size) if (fm_pos >= fm_size)
goto fail_bad; goto fail_bad;
add_aeb(ai, &used, be32_to_cpu(fmec->pnum), ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0); be32_to_cpu(fmec->ec), 0);
if (ret)
goto fail;
} }
/* read EC values from scrub list */ /* read EC values from scrub list */
...@@ -660,8 +666,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, ...@@ -660,8 +666,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size) if (fm_pos >= fm_size)
goto fail_bad; goto fail_bad;
add_aeb(ai, &used, be32_to_cpu(fmec->pnum), ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1); be32_to_cpu(fmec->ec), 1);
if (ret)
goto fail;
} }
/* read EC values from erase list */ /* read EC values from erase list */
...@@ -671,8 +679,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi, ...@@ -671,8 +679,10 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
if (fm_pos >= fm_size) if (fm_pos >= fm_size)
goto fail_bad; goto fail_bad;
add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum), ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1); be32_to_cpu(fmec->ec), 1);
if (ret)
goto fail;
} }
ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
......
...@@ -56,16 +56,11 @@ static ssize_t vol_attribute_show(struct device *dev, ...@@ -56,16 +56,11 @@ static ssize_t vol_attribute_show(struct device *dev,
{ {
int ret; int ret;
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
struct ubi_device *ubi; struct ubi_device *ubi = vol->ubi;
ubi = ubi_get_device(vol->ubi->ubi_num);
if (!ubi)
return -ENODEV;
spin_lock(&ubi->volumes_lock); spin_lock(&ubi->volumes_lock);
if (!ubi->volumes[vol->vol_id]) { if (!ubi->volumes[vol->vol_id]) {
spin_unlock(&ubi->volumes_lock); spin_unlock(&ubi->volumes_lock);
ubi_put_device(ubi);
return -ENODEV; return -ENODEV;
} }
/* Take a reference to prevent volume removal */ /* Take a reference to prevent volume removal */
...@@ -103,7 +98,6 @@ static ssize_t vol_attribute_show(struct device *dev, ...@@ -103,7 +98,6 @@ static ssize_t vol_attribute_show(struct device *dev,
vol->ref_count -= 1; vol->ref_count -= 1;
ubi_assert(vol->ref_count >= 0); ubi_assert(vol->ref_count >= 0);
spin_unlock(&ubi->volumes_lock); spin_unlock(&ubi->volumes_lock);
ubi_put_device(ubi);
return ret; return ret;
} }
......
...@@ -415,13 +415,15 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c) ...@@ -415,13 +415,15 @@ int jffs2_do_mount_fs(struct jffs2_sb_info *c)
jffs2_free_ino_caches(c); jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c); jffs2_free_raw_node_refs(c);
ret = -EIO; ret = -EIO;
goto out_free; goto out_sum_exit;
} }
jffs2_calc_trigger_levels(c); jffs2_calc_trigger_levels(c);
return 0; return 0;
out_sum_exit:
jffs2_sum_exit(c);
out_free: out_free:
kvfree(c->blocks); kvfree(c->blocks);
......
...@@ -603,8 +603,8 @@ int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -603,8 +603,8 @@ int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc)
jffs2_free_ino_caches(c); jffs2_free_ino_caches(c);
jffs2_free_raw_node_refs(c); jffs2_free_raw_node_refs(c);
kvfree(c->blocks); kvfree(c->blocks);
out_inohash:
jffs2_clear_xattr_subsystem(c); jffs2_clear_xattr_subsystem(c);
out_inohash:
kfree(c->inocache_list); kfree(c->inocache_list);
out_wbuf: out_wbuf:
jffs2_flash_cleanup(c); jffs2_flash_cleanup(c);
......
...@@ -18,11 +18,11 @@ ...@@ -18,11 +18,11 @@
#include <linux/mutex.h> #include <linux/mutex.h>
struct jffs2_inode_info { struct jffs2_inode_info {
/* We need an internal mutex similar to inode->i_mutex. /* We need an internal mutex similar to inode->i_rwsem.
Unfortunately, we can't used the existing one, because Unfortunately, we can't used the existing one, because
either the GC would deadlock, or we'd have to release it either the GC would deadlock, or we'd have to release it
before letting GC proceed. Or we'd have to put ugliness before letting GC proceed. Or we'd have to put ugliness
into the GC code so it didn't attempt to obtain the i_mutex into the GC code so it didn't attempt to obtain the i_rwsem
for the inode(s) which are already locked */ for the inode(s) which are already locked */
struct mutex sem; struct mutex sem;
......
...@@ -136,7 +136,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) ...@@ -136,7 +136,7 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
if (!s) { if (!s) {
JFFS2_WARNING("Can't allocate memory for summary\n"); JFFS2_WARNING("Can't allocate memory for summary\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out_buf;
} }
} }
...@@ -275,13 +275,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) ...@@ -275,13 +275,15 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
} }
ret = 0; ret = 0;
out: out:
jffs2_sum_reset_collected(s);
kfree(s);
out_buf:
if (buf_size) if (buf_size)
kfree(flashbuf); kfree(flashbuf);
#ifndef __ECOS #ifndef __ECOS
else else
mtd_unpoint(c->mtd, 0, c->mtd->size); mtd_unpoint(c->mtd, 0, c->mtd->size);
#endif #endif
kfree(s);
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -570,7 +570,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, ...@@ -570,7 +570,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
} }
if (!PagePrivate(page)) { if (!PagePrivate(page)) {
SetPagePrivate(page); attach_page_private(page, (void *)1);
atomic_long_inc(&c->dirty_pg_cnt); atomic_long_inc(&c->dirty_pg_cnt);
__set_page_dirty_nobuffers(page); __set_page_dirty_nobuffers(page);
} }
...@@ -947,7 +947,7 @@ static int do_writepage(struct page *page, int len) ...@@ -947,7 +947,7 @@ static int do_writepage(struct page *page, int len)
release_existing_page_budget(c); release_existing_page_budget(c);
atomic_long_dec(&c->dirty_pg_cnt); atomic_long_dec(&c->dirty_pg_cnt);
ClearPagePrivate(page); detach_page_private(page);
ClearPageChecked(page); ClearPageChecked(page);
kunmap(page); kunmap(page);
...@@ -1304,7 +1304,7 @@ static void ubifs_invalidate_folio(struct folio *folio, size_t offset, ...@@ -1304,7 +1304,7 @@ static void ubifs_invalidate_folio(struct folio *folio, size_t offset,
release_existing_page_budget(c); release_existing_page_budget(c);
atomic_long_dec(&c->dirty_pg_cnt); atomic_long_dec(&c->dirty_pg_cnt);
folio_clear_private(folio); folio_detach_private(folio);
folio_clear_checked(folio); folio_clear_checked(folio);
} }
...@@ -1471,8 +1471,8 @@ static int ubifs_migrate_page(struct address_space *mapping, ...@@ -1471,8 +1471,8 @@ static int ubifs_migrate_page(struct address_space *mapping,
return rc; return rc;
if (PagePrivate(page)) { if (PagePrivate(page)) {
ClearPagePrivate(page); detach_page_private(page);
SetPagePrivate(newpage); attach_page_private(newpage, (void *)1);
} }
if (mode != MIGRATE_SYNC_NO_COPY) if (mode != MIGRATE_SYNC_NO_COPY)
...@@ -1496,7 +1496,7 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags) ...@@ -1496,7 +1496,7 @@ static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
return 0; return 0;
ubifs_assert(c, PagePrivate(page)); ubifs_assert(c, PagePrivate(page));
ubifs_assert(c, 0); ubifs_assert(c, 0);
ClearPagePrivate(page); detach_page_private(page);
ClearPageChecked(page); ClearPageChecked(page);
return 1; return 1;
} }
...@@ -1567,7 +1567,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf) ...@@ -1567,7 +1567,7 @@ static vm_fault_t ubifs_vm_page_mkwrite(struct vm_fault *vmf)
else { else {
if (!PageChecked(page)) if (!PageChecked(page))
ubifs_convert_page_budget(c); ubifs_convert_page_budget(c);
SetPagePrivate(page); attach_page_private(page, (void *)1);
atomic_long_inc(&c->dirty_pg_cnt); atomic_long_inc(&c->dirty_pg_cnt);
__set_page_dirty_nobuffers(page); __set_page_dirty_nobuffers(page);
} }
......
...@@ -854,16 +854,42 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -854,16 +854,42 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
*/ */
n = aligned_len >> c->max_write_shift; n = aligned_len >> c->max_write_shift;
if (n) { if (n) {
n <<= c->max_write_shift; int m = n - 1;
dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
wbuf->offs); wbuf->offs);
err = ubifs_leb_write(c, wbuf->lnum, buf + written,
wbuf->offs, n); if (m) {
/* '(n-1)<<c->max_write_shift < len' is always true. */
m <<= c->max_write_shift;
err = ubifs_leb_write(c, wbuf->lnum, buf + written,
wbuf->offs, m);
if (err)
goto out;
wbuf->offs += m;
aligned_len -= m;
len -= m;
written += m;
}
/*
* The non-written len of buf may be less than 'n' because
* parameter 'len' is not 8 bytes aligned, so here we read
* min(len, n) bytes from buf.
*/
n = 1 << c->max_write_shift;
memcpy(wbuf->buf, buf + written, min(len, n));
if (n > len) {
ubifs_assert(c, n - len < 8);
ubifs_pad(c, wbuf->buf + len, n - len);
}
err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, n);
if (err) if (err)
goto out; goto out;
wbuf->offs += n; wbuf->offs += n;
aligned_len -= n; aligned_len -= n;
len -= n; len -= min(len, n);
written += n; written += n;
} }
......
...@@ -108,7 +108,7 @@ static int setflags(struct inode *inode, int flags) ...@@ -108,7 +108,7 @@ static int setflags(struct inode *inode, int flags)
struct ubifs_inode *ui = ubifs_inode(inode); struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_budget_req req = { .dirtied_ino = 1, struct ubifs_budget_req req = { .dirtied_ino = 1,
.dirtied_ino_d = ui->data_len }; .dirtied_ino_d = ALIGN(ui->data_len, 8) };
err = ubifs_budget_space(c, &req); err = ubifs_budget_space(c, &req);
if (err) if (err)
......
...@@ -1207,9 +1207,9 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir, ...@@ -1207,9 +1207,9 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
* @sync: non-zero if the write-buffer has to be synchronized * @sync: non-zero if the write-buffer has to be synchronized
* *
* This function implements the re-name operation which may involve writing up * This function implements the re-name operation which may involve writing up
* to 4 inodes and 2 directory entries. It marks the written inodes as clean * to 4 inodes(new inode, whiteout inode, old and new parent directory inodes)
* and returns zero on success. In case of failure, a negative error code is * and 2 directory entries. It marks the written inodes as clean and returns
* returned. * zero on success. In case of failure, a negative error code is returned.
*/ */
int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
const struct inode *old_inode, const struct inode *old_inode,
...@@ -1222,14 +1222,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, ...@@ -1222,14 +1222,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
void *p; void *p;
union ubifs_key key; union ubifs_key key;
struct ubifs_dent_node *dent, *dent2; struct ubifs_dent_node *dent, *dent2;
int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0; int err, dlen1, dlen2, ilen, wlen, lnum, offs, len, orphan_added = 0;
int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ;
int last_reference = !!(new_inode && new_inode->i_nlink == 0); int last_reference = !!(new_inode && new_inode->i_nlink == 0);
int move = (old_dir != new_dir); int move = (old_dir != new_dir);
struct ubifs_inode *new_ui; struct ubifs_inode *new_ui, *whiteout_ui;
u8 hash_old_dir[UBIFS_HASH_ARR_SZ]; u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
u8 hash_new_dir[UBIFS_HASH_ARR_SZ]; u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
u8 hash_new_inode[UBIFS_HASH_ARR_SZ]; u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
u8 hash_whiteout_inode[UBIFS_HASH_ARR_SZ];
u8 hash_dent1[UBIFS_HASH_ARR_SZ]; u8 hash_dent1[UBIFS_HASH_ARR_SZ];
u8 hash_dent2[UBIFS_HASH_ARR_SZ]; u8 hash_dent2[UBIFS_HASH_ARR_SZ];
...@@ -1249,9 +1250,20 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, ...@@ -1249,9 +1250,20 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
} else } else
ilen = 0; ilen = 0;
if (whiteout) {
whiteout_ui = ubifs_inode(whiteout);
ubifs_assert(c, mutex_is_locked(&whiteout_ui->ui_mutex));
ubifs_assert(c, whiteout->i_nlink == 1);
ubifs_assert(c, !whiteout_ui->dirty);
wlen = UBIFS_INO_NODE_SZ;
wlen += whiteout_ui->data_len;
} else
wlen = 0;
aligned_dlen1 = ALIGN(dlen1, 8); aligned_dlen1 = ALIGN(dlen1, 8);
aligned_dlen2 = ALIGN(dlen2, 8); aligned_dlen2 = ALIGN(dlen2, 8);
len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8); len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) +
ALIGN(wlen, 8) + ALIGN(plen, 8);
if (move) if (move)
len += plen; len += plen;
...@@ -1313,6 +1325,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, ...@@ -1313,6 +1325,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
p += ALIGN(ilen, 8); p += ALIGN(ilen, 8);
} }
if (whiteout) {
pack_inode(c, p, whiteout, 0);
err = ubifs_node_calc_hash(c, p, hash_whiteout_inode);
if (err)
goto out_release;
p += ALIGN(wlen, 8);
}
if (!move) { if (!move) {
pack_inode(c, p, old_dir, 1); pack_inode(c, p, old_dir, 1);
err = ubifs_node_calc_hash(c, p, hash_old_dir); err = ubifs_node_calc_hash(c, p, hash_old_dir);
...@@ -1352,6 +1373,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, ...@@ -1352,6 +1373,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
if (new_inode) if (new_inode)
ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
new_inode->i_ino); new_inode->i_ino);
if (whiteout)
ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf,
whiteout->i_ino);
} }
release_head(c, BASEHD); release_head(c, BASEHD);
...@@ -1368,8 +1392,6 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, ...@@ -1368,8 +1392,6 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm); err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm);
if (err) if (err)
goto out_ro; goto out_ro;
ubifs_delete_orphan(c, whiteout->i_ino);
} else { } else {
err = ubifs_add_dirt(c, lnum, dlen2); err = ubifs_add_dirt(c, lnum, dlen2);
if (err) if (err)
...@@ -1390,6 +1412,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, ...@@ -1390,6 +1412,15 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
offs += ALIGN(ilen, 8); offs += ALIGN(ilen, 8);
} }
if (whiteout) {
ino_key_init(c, &key, whiteout->i_ino);
err = ubifs_tnc_add(c, &key, lnum, offs, wlen,
hash_whiteout_inode);
if (err)
goto out_ro;
offs += ALIGN(wlen, 8);
}
ino_key_init(c, &key, old_dir->i_ino); ino_key_init(c, &key, old_dir->i_ino);
err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir); err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir);
if (err) if (err)
...@@ -1410,6 +1441,11 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, ...@@ -1410,6 +1441,11 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
new_ui->synced_i_size = new_ui->ui_size; new_ui->synced_i_size = new_ui->ui_size;
spin_unlock(&new_ui->ui_lock); spin_unlock(&new_ui->ui_lock);
} }
/*
* No need to mark whiteout inode clean.
* Whiteout doesn't have non-zero size, no need to update
* synced_i_size for whiteout_ui.
*/
mark_inode_clean(c, ubifs_inode(old_dir)); mark_inode_clean(c, ubifs_inode(old_dir));
if (move) if (move)
mark_inode_clean(c, ubifs_inode(new_dir)); mark_inode_clean(c, ubifs_inode(new_dir));
......
...@@ -381,7 +381,7 @@ struct ubifs_gced_idx_leb { ...@@ -381,7 +381,7 @@ struct ubifs_gced_idx_leb {
* @ui_mutex exists for two main reasons. At first it prevents inodes from * @ui_mutex exists for two main reasons. At first it prevents inodes from
* being written back while UBIFS changing them, being in the middle of an VFS * being written back while UBIFS changing them, being in the middle of an VFS
* operation. This way UBIFS makes sure the inode fields are consistent. For * operation. This way UBIFS makes sure the inode fields are consistent. For
* example, in 'ubifs_rename()' we change 3 inodes simultaneously, and * example, in 'ubifs_rename()' we change 4 inodes simultaneously, and
* write-back must not write any of them before we have finished. * write-back must not write any of them before we have finished.
* *
* The second reason is budgeting - UBIFS has to budget all operations. If an * The second reason is budgeting - UBIFS has to budget all operations. If an
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment