Commit efc1fd60 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.13-2021-06-12' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes that should go into 5.13:

   - Fix a regression deadlock introduced in this release between open
     and remove of a bdev (Christoph)

   - Fix an async_xor md regression in this release (Xiao)

   - Fix bcache oversized read issue (Coly)"

* tag 'block-5.13-2021-06-12' of git://git.kernel.dk/linux-block:
  block: loop: fix deadlock between open and remove
  async_xor: check src_offs is not NULL before updating it
  bcache: avoid oversized read request in cache missing code path
  bcache: remove bcache device self-defined readahead
parents b2568eeb 85f3f17b
...@@ -233,7 +233,8 @@ async_xor_offs(struct page *dest, unsigned int offset, ...@@ -233,7 +233,8 @@ async_xor_offs(struct page *dest, unsigned int offset,
if (submit->flags & ASYNC_TX_XOR_DROP_DST) { if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
src_cnt--; src_cnt--;
src_list++; src_list++;
src_offs++; if (src_offs)
src_offs++;
} }
/* wait for any prerequisite operations */ /* wait for any prerequisite operations */
......
...@@ -1879,29 +1879,18 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, ...@@ -1879,29 +1879,18 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
static int lo_open(struct block_device *bdev, fmode_t mode) static int lo_open(struct block_device *bdev, fmode_t mode)
{ {
struct loop_device *lo; struct loop_device *lo = bdev->bd_disk->private_data;
int err; int err;
/*
* take loop_ctl_mutex to protect lo pointer from race with
* loop_control_ioctl(LOOP_CTL_REMOVE), however, to reduce contention
* release it prior to updating lo->lo_refcnt.
*/
err = mutex_lock_killable(&loop_ctl_mutex);
if (err)
return err;
lo = bdev->bd_disk->private_data;
if (!lo) {
mutex_unlock(&loop_ctl_mutex);
return -ENXIO;
}
err = mutex_lock_killable(&lo->lo_mutex); err = mutex_lock_killable(&lo->lo_mutex);
mutex_unlock(&loop_ctl_mutex);
if (err) if (err)
return err; return err;
atomic_inc(&lo->lo_refcnt); if (lo->lo_state == Lo_deleting)
err = -ENXIO;
else
atomic_inc(&lo->lo_refcnt);
mutex_unlock(&lo->lo_mutex); mutex_unlock(&lo->lo_mutex);
return 0; return err;
} }
static void lo_release(struct gendisk *disk, fmode_t mode) static void lo_release(struct gendisk *disk, fmode_t mode)
...@@ -2285,7 +2274,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd, ...@@ -2285,7 +2274,7 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
mutex_unlock(&lo->lo_mutex); mutex_unlock(&lo->lo_mutex);
break; break;
} }
lo->lo_disk->private_data = NULL; lo->lo_state = Lo_deleting;
mutex_unlock(&lo->lo_mutex); mutex_unlock(&lo->lo_mutex);
idr_remove(&loop_index_idr, lo->lo_number); idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo); loop_remove(lo);
......
...@@ -22,6 +22,7 @@ enum { ...@@ -22,6 +22,7 @@ enum {
Lo_unbound, Lo_unbound,
Lo_bound, Lo_bound,
Lo_rundown, Lo_rundown,
Lo_deleting,
}; };
struct loop_func_table; struct loop_func_table;
......
...@@ -364,7 +364,6 @@ struct cached_dev { ...@@ -364,7 +364,6 @@ struct cached_dev {
/* The rest of this all shows up in sysfs */ /* The rest of this all shows up in sysfs */
unsigned int sequential_cutoff; unsigned int sequential_cutoff;
unsigned int readahead;
unsigned int io_disable:1; unsigned int io_disable:1;
unsigned int verify:1; unsigned int verify:1;
......
...@@ -880,9 +880,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, ...@@ -880,9 +880,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned int sectors) struct bio *bio, unsigned int sectors)
{ {
int ret = MAP_CONTINUE; int ret = MAP_CONTINUE;
unsigned int reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio; struct bio *miss, *cache_bio;
unsigned int size_limit;
s->cache_missed = 1; s->cache_missed = 1;
...@@ -892,14 +892,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, ...@@ -892,14 +892,10 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit; goto out_submit;
} }
if (!(bio->bi_opf & REQ_RAHEAD) && /* Limitation for valid replace key size and cache_bio bvecs number */
!(bio->bi_opf & (REQ_META|REQ_PRIO)) && size_limit = min_t(unsigned int, BIO_MAX_VECS * PAGE_SECTORS,
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) (1 << KEY_SIZE_BITS) - 1);
reada = min_t(sector_t, dc->readahead >> 9, s->insert_bio_sectors = min3(size_limit, sectors, bio_sectors(bio));
get_capacity(bio->bi_bdev->bd_disk) -
bio_end_sector(bio));
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
s->iop.replace_key = KEY(s->iop.inode, s->iop.replace_key = KEY(s->iop.inode,
bio->bi_iter.bi_sector + s->insert_bio_sectors, bio->bi_iter.bi_sector + s->insert_bio_sectors,
...@@ -911,7 +907,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, ...@@ -911,7 +907,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
s->iop.replace = true; s->iop.replace = true;
miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); miss = bio_next_split(bio, s->insert_bio_sectors, GFP_NOIO,
&s->d->bio_split);
/* btree_search_recurse()'s btree iterator is no good anymore */ /* btree_search_recurse()'s btree iterator is no good anymore */
ret = miss == bio ? MAP_DONE : -EINTR; ret = miss == bio ? MAP_DONE : -EINTR;
...@@ -933,9 +930,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, ...@@ -933,9 +930,6 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
goto out_put; goto out_put;
if (reada)
bch_mark_cache_readahead(s->iop.c, s->d);
s->cache_miss = miss; s->cache_miss = miss;
s->iop.bio = cache_bio; s->iop.bio = cache_bio;
bio_get(cache_bio); bio_get(cache_bio);
......
...@@ -46,7 +46,6 @@ read_attribute(cache_misses); ...@@ -46,7 +46,6 @@ read_attribute(cache_misses);
read_attribute(cache_bypass_hits); read_attribute(cache_bypass_hits);
read_attribute(cache_bypass_misses); read_attribute(cache_bypass_misses);
read_attribute(cache_hit_ratio); read_attribute(cache_hit_ratio);
read_attribute(cache_readaheads);
read_attribute(cache_miss_collisions); read_attribute(cache_miss_collisions);
read_attribute(bypassed); read_attribute(bypassed);
...@@ -64,7 +63,6 @@ SHOW(bch_stats) ...@@ -64,7 +63,6 @@ SHOW(bch_stats)
DIV_SAFE(var(cache_hits) * 100, DIV_SAFE(var(cache_hits) * 100,
var(cache_hits) + var(cache_misses))); var(cache_hits) + var(cache_misses)));
var_print(cache_readaheads);
var_print(cache_miss_collisions); var_print(cache_miss_collisions);
sysfs_hprint(bypassed, var(sectors_bypassed) << 9); sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
#undef var #undef var
...@@ -86,7 +84,6 @@ static struct attribute *bch_stats_files[] = { ...@@ -86,7 +84,6 @@ static struct attribute *bch_stats_files[] = {
&sysfs_cache_bypass_hits, &sysfs_cache_bypass_hits,
&sysfs_cache_bypass_misses, &sysfs_cache_bypass_misses,
&sysfs_cache_hit_ratio, &sysfs_cache_hit_ratio,
&sysfs_cache_readaheads,
&sysfs_cache_miss_collisions, &sysfs_cache_miss_collisions,
&sysfs_bypassed, &sysfs_bypassed,
NULL NULL
...@@ -113,7 +110,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc) ...@@ -113,7 +110,6 @@ void bch_cache_accounting_clear(struct cache_accounting *acc)
acc->total.cache_misses = 0; acc->total.cache_misses = 0;
acc->total.cache_bypass_hits = 0; acc->total.cache_bypass_hits = 0;
acc->total.cache_bypass_misses = 0; acc->total.cache_bypass_misses = 0;
acc->total.cache_readaheads = 0;
acc->total.cache_miss_collisions = 0; acc->total.cache_miss_collisions = 0;
acc->total.sectors_bypassed = 0; acc->total.sectors_bypassed = 0;
} }
...@@ -145,7 +141,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at) ...@@ -145,7 +141,6 @@ static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
scale_stat(&stats->cache_misses); scale_stat(&stats->cache_misses);
scale_stat(&stats->cache_bypass_hits); scale_stat(&stats->cache_bypass_hits);
scale_stat(&stats->cache_bypass_misses); scale_stat(&stats->cache_bypass_misses);
scale_stat(&stats->cache_readaheads);
scale_stat(&stats->cache_miss_collisions); scale_stat(&stats->cache_miss_collisions);
scale_stat(&stats->sectors_bypassed); scale_stat(&stats->sectors_bypassed);
} }
...@@ -168,7 +163,6 @@ static void scale_accounting(struct timer_list *t) ...@@ -168,7 +163,6 @@ static void scale_accounting(struct timer_list *t)
move_stat(cache_misses); move_stat(cache_misses);
move_stat(cache_bypass_hits); move_stat(cache_bypass_hits);
move_stat(cache_bypass_misses); move_stat(cache_bypass_misses);
move_stat(cache_readaheads);
move_stat(cache_miss_collisions); move_stat(cache_miss_collisions);
move_stat(sectors_bypassed); move_stat(sectors_bypassed);
...@@ -209,14 +203,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, ...@@ -209,14 +203,6 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
mark_cache_stats(&c->accounting.collector, hit, bypass); mark_cache_stats(&c->accounting.collector, hit, bypass);
} }
void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads);
atomic_inc(&c->accounting.collector.cache_readaheads);
}
void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{ {
struct cached_dev *dc = container_of(d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
......
...@@ -7,7 +7,6 @@ struct cache_stat_collector { ...@@ -7,7 +7,6 @@ struct cache_stat_collector {
atomic_t cache_misses; atomic_t cache_misses;
atomic_t cache_bypass_hits; atomic_t cache_bypass_hits;
atomic_t cache_bypass_misses; atomic_t cache_bypass_misses;
atomic_t cache_readaheads;
atomic_t cache_miss_collisions; atomic_t cache_miss_collisions;
atomic_t sectors_bypassed; atomic_t sectors_bypassed;
}; };
......
...@@ -137,7 +137,6 @@ rw_attribute(io_disable); ...@@ -137,7 +137,6 @@ rw_attribute(io_disable);
rw_attribute(discard); rw_attribute(discard);
rw_attribute(running); rw_attribute(running);
rw_attribute(label); rw_attribute(label);
rw_attribute(readahead);
rw_attribute(errors); rw_attribute(errors);
rw_attribute(io_error_limit); rw_attribute(io_error_limit);
rw_attribute(io_error_halflife); rw_attribute(io_error_halflife);
...@@ -260,7 +259,6 @@ SHOW(__bch_cached_dev) ...@@ -260,7 +259,6 @@ SHOW(__bch_cached_dev)
var_printf(partial_stripes_expensive, "%u"); var_printf(partial_stripes_expensive, "%u");
var_hprint(sequential_cutoff); var_hprint(sequential_cutoff);
var_hprint(readahead);
sysfs_print(running, atomic_read(&dc->running)); sysfs_print(running, atomic_read(&dc->running));
sysfs_print(state, states[BDEV_STATE(&dc->sb)]); sysfs_print(state, states[BDEV_STATE(&dc->sb)]);
...@@ -365,7 +363,6 @@ STORE(__cached_dev) ...@@ -365,7 +363,6 @@ STORE(__cached_dev)
sysfs_strtoul_clamp(sequential_cutoff, sysfs_strtoul_clamp(sequential_cutoff,
dc->sequential_cutoff, dc->sequential_cutoff,
0, UINT_MAX); 0, UINT_MAX);
d_strtoi_h(readahead);
if (attr == &sysfs_clear_stats) if (attr == &sysfs_clear_stats)
bch_cache_accounting_clear(&dc->accounting); bch_cache_accounting_clear(&dc->accounting);
...@@ -538,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = { ...@@ -538,7 +535,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_running, &sysfs_running,
&sysfs_state, &sysfs_state,
&sysfs_label, &sysfs_label,
&sysfs_readahead,
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify, &sysfs_verify,
&sysfs_bypass_torture_test, &sysfs_bypass_torture_test,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment