Commit 744983d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs

Pull JFFS2, UBI and UBIFS updates from Richard Weinberger:
 "JFFS2:
   - Fixes for a memory leak

  UBI:
   - Fixes for fastmap (UAF, high CPU usage)

  UBIFS:
   - Minor cleanups"

* tag 'for-linus-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs:
  ubi: ubi_create_volume: Fix use-after-free when volume creation failed
  ubi: fastmap: Check wl_pool for free peb before wear leveling
  ubi: fastmap: Fix high cpu usage of ubi_bgt by making sure wl_pool not empty
  ubifs: Use NULL instead of using plain integer as pointer
  ubifs: Simplify the return expression of run_gc()
  jffs2: fix memory leak in jffs2_do_fill_super
  jffs2: Use kzalloc instead of kmalloc/memset
parents 4e583ff9 8c03a1c2
...@@ -97,6 +97,33 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor) ...@@ -97,6 +97,33 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
return e; return e;
} }
/*
* has_enough_free_count - whether ubi has enough free pebs to fill fm pools
* @ubi: UBI device description object
* @is_wl_pool: whether UBI is filling wear leveling pool
*
* This helper function checks whether there are enough free pebs (deducted
* by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
* there is at least one of free pebs is filled into fm_wl_pool.
* For wear leveling pool, UBI should also reserve free pebs for bad pebs
* handling, because there maybe no enough free pebs for user volumes after
* producing new bad pebs.
*/
static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
{
int fm_used = 0; // fastmap non anchor pebs.
int beb_rsvd_pebs;
if (!ubi->free.rb_node)
return false;
beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
fm_used = ubi->fm_size / ubi->leb_size - 1;
return ubi->free_count - beb_rsvd_pebs > fm_used;
}
/** /**
* ubi_refill_pools - refills all fastmap PEB pools. * ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object * @ubi: UBI device description object
...@@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi) ...@@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi)
wl_tree_add(ubi->fm_anchor, &ubi->free); wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++; ubi->free_count++;
} }
if (ubi->fm_next_anchor) {
wl_tree_add(ubi->fm_next_anchor, &ubi->free);
ubi->free_count++;
}
/* All available PEBs are in ubi->free, now is the time to get /*
* All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs. * the best anchor PEBs.
*/ */
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) { for (;;) {
enough = 0; enough = 0;
if (pool->size < pool->max_size) { if (pool->size < pool->max_size) {
if (!ubi->free.rb_node) if (!has_enough_free_count(ubi, false))
break; break;
e = wl_get_wle(ubi); e = wl_get_wle(ubi);
...@@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi) ...@@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
enough++; enough++;
if (wl_pool->size < wl_pool->max_size) { if (wl_pool->size < wl_pool->max_size) {
if (!ubi->free.rb_node || if (!has_enough_free_count(ubi, true))
(ubi->free_count - ubi->beb_rsvd_pebs < 5))
break; break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
...@@ -253,6 +275,58 @@ int ubi_wl_get_peb(struct ubi_device *ubi) ...@@ -253,6 +275,58 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
return ret; return ret;
} }
/**
* next_peb_for_wl - returns next PEB to be used internally by the
* WL sub-system.
*
* @ubi: UBI device description object
*/
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
if (pool->used == pool->size)
return NULL;
pnum = pool->pebs[pool->used];
return ubi->lookuptbl[pnum];
}
/**
* need_wear_leveling - checks whether to trigger a wear leveling work.
* UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
* and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
* 'wl_pool' by ubi_refill_pools().
*
* @ubi: UBI device description object
*/
static bool need_wear_leveling(struct ubi_device *ubi)
{
int ec;
struct ubi_wl_entry *e;
if (!ubi->used.rb_node)
return false;
e = next_peb_for_wl(ubi);
if (!e) {
if (!ubi->free.rb_node)
return false;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
ec = e->ec;
} else {
ec = e->ec;
if (ubi->free.rb_node) {
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
ec = max(ec, e->ec);
}
}
e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
return ec - e->ec >= UBI_WL_THRESHOLD;
}
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system. /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
* *
* @ubi: UBI device description object * @ubi: UBI device description object
...@@ -286,20 +360,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) ...@@ -286,20 +360,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{ {
struct ubi_work *wrk; struct ubi_work *wrk;
struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock); spin_lock(&ubi->wl_lock);
/* Do we have a next anchor? */ /* Do we already have an anchor? */
if (!ubi->fm_next_anchor) { if (ubi->fm_anchor) {
ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); spin_unlock(&ubi->wl_lock);
if (!ubi->fm_next_anchor) return 0;
/* Tell wear leveling to produce a new anchor PEB */
ubi->fm_do_produce_anchor = 1;
} }
/* Do wear leveling to get a new anchor PEB or check the /* See if we can find an anchor PEB on the list of free PEBs */
* existing next anchor candidate. anchor = ubi_wl_get_fm_peb(ubi, 1);
*/ if (anchor) {
ubi->fm_anchor = anchor;
spin_unlock(&ubi->wl_lock);
return 0;
}
ubi->fm_do_produce_anchor = 1;
/* No luck, trigger wear leveling to produce a new anchor PEB. */
if (ubi->wl_scheduled) { if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
return 0; return 0;
...@@ -381,11 +461,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi) ...@@ -381,11 +461,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
ubi->fm_anchor = NULL; ubi->fm_anchor = NULL;
} }
if (ubi->fm_next_anchor) {
return_unused_peb(ubi, ubi->fm_next_anchor);
ubi->fm_next_anchor = NULL;
}
if (ubi->fm) { if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++) for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]); kfree(ubi->fm->e[i]);
......
...@@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi, ...@@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec); fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size); ubi_assert(fm_pos <= ubi->fm_size);
} }
if (ubi->fm_next_anchor) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
free_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->free_peb_count = cpu_to_be32(free_peb_count); fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
......
...@@ -489,8 +489,7 @@ struct ubi_debug_info { ...@@ -489,8 +489,7 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue * @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled * @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap * @fast_attach: non-zero if UBI was attached by fastmap
* @fm_anchor: The new anchor PEB used during fastmap update * @fm_anchor: The next anchor PEB to use for fastmap
* @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
* @fm_do_produce_anchor: If true produce an anchor PEB in wl * @fm_do_produce_anchor: If true produce an anchor PEB in wl
* *
* @used: RB-tree of used physical eraseblocks * @used: RB-tree of used physical eraseblocks
...@@ -601,7 +600,6 @@ struct ubi_device { ...@@ -601,7 +600,6 @@ struct ubi_device {
int fm_work_scheduled; int fm_work_scheduled;
int fast_attach; int fast_attach;
struct ubi_wl_entry *fm_anchor; struct ubi_wl_entry *fm_anchor;
struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor; int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */ /* Wear-leveling sub-system's stuff */
......
...@@ -309,7 +309,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) ...@@ -309,7 +309,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
ubi->volumes[vol_id] = NULL; ubi->volumes[vol_id] = NULL;
ubi->vol_count -= 1; ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock); spin_unlock(&ubi->volumes_lock);
ubi_eba_destroy_table(eba_tbl);
out_acc: out_acc:
spin_lock(&ubi->volumes_lock); spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs; ubi->rsvd_pebs -= vol->reserved_pebs;
......
...@@ -670,7 +670,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -670,7 +670,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi_assert(!ubi->move_from && !ubi->move_to); ubi_assert(!ubi->move_from && !ubi->move_to);
ubi_assert(!ubi->move_to_put); ubi_assert(!ubi->move_to_put);
#ifdef CONFIG_MTD_UBI_FASTMAP
if (!next_peb_for_wl(ubi) ||
#else
if (!ubi->free.rb_node || if (!ubi->free.rb_node ||
#endif
(!ubi->used.rb_node && !ubi->scrub.rb_node)) { (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/* /*
* No free physical eraseblocks? Well, they must be waiting in * No free physical eraseblocks? Well, they must be waiting in
...@@ -689,16 +693,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -689,16 +693,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
#ifdef CONFIG_MTD_UBI_FASTMAP #ifdef CONFIG_MTD_UBI_FASTMAP
e1 = find_anchor_wl_entry(&ubi->used); e1 = find_anchor_wl_entry(&ubi->used);
if (e1 && ubi->fm_next_anchor && if (e1 && ubi->fm_anchor &&
(ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
ubi->fm_do_produce_anchor = 1; ubi->fm_do_produce_anchor = 1;
/* fm_next_anchor is no longer considered a good anchor /*
* candidate. * fm_anchor is no longer considered a good anchor.
* NULL assignment also prevents multiple wear level checks * NULL assignment also prevents multiple wear level checks
* of this PEB. * of this PEB.
*/ */
wl_tree_add(ubi->fm_next_anchor, &ubi->free); wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->fm_next_anchor = NULL; ubi->fm_anchor = NULL;
ubi->free_count++; ubi->free_count++;
} }
...@@ -1003,8 +1007,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -1003,8 +1007,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
static int ensure_wear_leveling(struct ubi_device *ubi, int nested) static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{ {
int err = 0; int err = 0;
struct ubi_wl_entry *e1;
struct ubi_wl_entry *e2;
struct ubi_work *wrk; struct ubi_work *wrk;
spin_lock(&ubi->wl_lock); spin_lock(&ubi->wl_lock);
...@@ -1017,6 +1019,13 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) ...@@ -1017,6 +1019,13 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
* the WL worker has to be scheduled anyway. * the WL worker has to be scheduled anyway.
*/ */
if (!ubi->scrub.rb_node) { if (!ubi->scrub.rb_node) {
#ifdef CONFIG_MTD_UBI_FASTMAP
if (!need_wear_leveling(ubi))
goto out_unlock;
#else
struct ubi_wl_entry *e1;
struct ubi_wl_entry *e2;
if (!ubi->used.rb_node || !ubi->free.rb_node) if (!ubi->used.rb_node || !ubi->free.rb_node)
/* No physical eraseblocks - no deal */ /* No physical eraseblocks - no deal */
goto out_unlock; goto out_unlock;
...@@ -1032,6 +1041,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) ...@@ -1032,6 +1041,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock; goto out_unlock;
#endif
dbg_wl("schedule wear-leveling"); dbg_wl("schedule wear-leveling");
} else } else
dbg_wl("schedule scrubbing"); dbg_wl("schedule scrubbing");
...@@ -1085,12 +1095,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) ...@@ -1085,12 +1095,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
if (!err) { if (!err) {
spin_lock(&ubi->wl_lock); spin_lock(&ubi->wl_lock);
if (!ubi->fm_disabled && !ubi->fm_next_anchor && if (!ubi->fm_disabled && !ubi->fm_anchor &&
e->pnum < UBI_FM_MAX_START) { e->pnum < UBI_FM_MAX_START) {
/* Abort anchor production, if needed it will be /*
* Abort anchor production, if needed it will be
* enabled again in the wear leveling started below. * enabled again in the wear leveling started below.
*/ */
ubi->fm_next_anchor = e; ubi->fm_anchor = e;
ubi->fm_do_produce_anchor = 0; ubi->fm_do_produce_anchor = 0;
} else { } else {
wl_tree_add(e, &ubi->free); wl_tree_add(e, &ubi->free);
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
static void update_fastmap_work_fn(struct work_struct *wrk); static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root); static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi); static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi);
static bool need_wear_leveling(struct ubi_device *ubi);
static void ubi_fastmap_close(struct ubi_device *ubi); static void ubi_fastmap_close(struct ubi_device *ubi);
static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
{ {
......
...@@ -43,9 +43,9 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, ...@@ -43,9 +43,9 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n", jffs2_dbg(1, "%s(): erase block %#08x (range %#08x-%#08x)\n",
__func__, __func__,
jeb->offset, jeb->offset, jeb->offset + c->sector_size); jeb->offset, jeb->offset, jeb->offset + c->sector_size);
instr = kmalloc(sizeof(struct erase_info), GFP_KERNEL); instr = kzalloc(sizeof(struct erase_info), GFP_KERNEL);
if (!instr) { if (!instr) {
pr_warn("kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); pr_warn("kzalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
mutex_lock(&c->erase_free_sem); mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock); spin_lock(&c->erase_completion_lock);
list_move(&jeb->list, &c->erase_pending_list); list_move(&jeb->list, &c->erase_pending_list);
...@@ -57,8 +57,6 @@ static void jffs2_erase_block(struct jffs2_sb_info *c, ...@@ -57,8 +57,6 @@ static void jffs2_erase_block(struct jffs2_sb_info *c,
return; return;
} }
memset(instr, 0, sizeof(*instr));
instr->addr = jeb->offset; instr->addr = jeb->offset;
instr->len = c->sector_size; instr->len = c->sector_size;
......
...@@ -604,6 +604,7 @@ int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -604,6 +604,7 @@ int jffs2_do_fill_super(struct super_block *sb, struct fs_context *fc)
jffs2_free_raw_node_refs(c); jffs2_free_raw_node_refs(c);
kvfree(c->blocks); kvfree(c->blocks);
jffs2_clear_xattr_subsystem(c); jffs2_clear_xattr_subsystem(c);
jffs2_sum_exit(c);
out_inohash: out_inohash:
kfree(c->inocache_list); kfree(c->inocache_list);
out_wbuf: out_wbuf:
......
...@@ -65,7 +65,7 @@ static void shrink_liability(struct ubifs_info *c, int nr_to_write) ...@@ -65,7 +65,7 @@ static void shrink_liability(struct ubifs_info *c, int nr_to_write)
*/ */
static int run_gc(struct ubifs_info *c) static int run_gc(struct ubifs_info *c)
{ {
int err, lnum; int lnum;
/* Make some free space by garbage-collecting dirty space */ /* Make some free space by garbage-collecting dirty space */
down_read(&c->commit_sem); down_read(&c->commit_sem);
...@@ -76,10 +76,7 @@ static int run_gc(struct ubifs_info *c) ...@@ -76,10 +76,7 @@ static int run_gc(struct ubifs_info *c)
/* GC freed one LEB, return it to lprops */ /* GC freed one LEB, return it to lprops */
dbg_budg("GC freed LEB %d", lnum); dbg_budg("GC freed LEB %d", lnum);
err = ubifs_return_leb(c, lnum); return ubifs_return_leb(c, lnum);
if (err)
return err;
return 0;
} }
/** /**
......
...@@ -677,7 +677,7 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, ...@@ -677,7 +677,7 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
int err; int err;
err = security_inode_init_security(inode, dentry, qstr, err = security_inode_init_security(inode, dentry, qstr,
&init_xattrs, 0); &init_xattrs, NULL);
if (err) { if (err) {
struct ubifs_info *c = dentry->i_sb->s_fs_info; struct ubifs_info *c = dentry->i_sb->s_fs_info;
ubifs_err(c, "cannot initialize security for inode %lu, error %d", ubifs_err(c, "cannot initialize security for inode %lu, error %d",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment