Commit 8aee1220 authored by Kent Overstreet's avatar Kent Overstreet

bcache: Kill sequential_merge option

It never really made sense to expose this, so just kill it.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent 50310164
...@@ -364,7 +364,6 @@ struct cached_dev { ...@@ -364,7 +364,6 @@ struct cached_dev {
unsigned sequential_cutoff; unsigned sequential_cutoff;
unsigned readahead; unsigned readahead;
unsigned sequential_merge:1;
unsigned verify:1; unsigned verify:1;
unsigned partial_stripes_expensive:1; unsigned partial_stripes_expensive:1;
......
...@@ -510,6 +510,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) ...@@ -510,6 +510,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
unsigned mode = cache_mode(dc, bio); unsigned mode = cache_mode(dc, bio);
unsigned sectors, congested = bch_get_congested(c); unsigned sectors, congested = bch_get_congested(c);
struct task_struct *task = current; struct task_struct *task = current;
struct io *i;
if (atomic_read(&dc->disk.detaching) || if (atomic_read(&dc->disk.detaching) ||
c->gc_stats.in_use > CUTOFF_CACHE_ADD || c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
...@@ -536,38 +537,30 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) ...@@ -536,38 +537,30 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
(bio->bi_rw & REQ_SYNC)) (bio->bi_rw & REQ_SYNC))
goto rescale; goto rescale;
if (dc->sequential_merge) { spin_lock(&dc->io_lock);
struct io *i;
spin_lock(&dc->io_lock); hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash)
if (i->last == bio->bi_sector &&
time_before(jiffies, i->jiffies))
goto found;
hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) i = list_first_entry(&dc->io_lru, struct io, lru);
if (i->last == bio->bi_sector &&
time_before(jiffies, i->jiffies))
goto found;
i = list_first_entry(&dc->io_lru, struct io, lru); add_sequential(task);
i->sequential = 0;
add_sequential(task);
i->sequential = 0;
found: found:
if (i->sequential + bio->bi_size > i->sequential) if (i->sequential + bio->bi_size > i->sequential)
i->sequential += bio->bi_size; i->sequential += bio->bi_size;
i->last = bio_end_sector(bio);
i->jiffies = jiffies + msecs_to_jiffies(5000);
task->sequential_io = i->sequential;
hlist_del(&i->hash); i->last = bio_end_sector(bio);
hlist_add_head(&i->hash, iohash(dc, i->last)); i->jiffies = jiffies + msecs_to_jiffies(5000);
list_move_tail(&i->lru, &dc->io_lru); task->sequential_io = i->sequential;
spin_unlock(&dc->io_lock); hlist_del(&i->hash);
} else { hlist_add_head(&i->hash, iohash(dc, i->last));
task->sequential_io = bio->bi_size; list_move_tail(&i->lru, &dc->io_lru);
add_sequential(task); spin_unlock(&dc->io_lock);
}
sectors = max(task->sequential_io, sectors = max(task->sequential_io,
task->sequential_io_avg) >> 9; task->sequential_io_avg) >> 9;
......
...@@ -1079,7 +1079,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) ...@@ -1079,7 +1079,6 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
spin_lock_init(&dc->io_lock); spin_lock_init(&dc->io_lock);
bch_cache_accounting_init(&dc->accounting, &dc->disk.cl); bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
dc->sequential_merge = true;
dc->sequential_cutoff = 4 << 20; dc->sequential_cutoff = 4 << 20;
for (io = dc->io; io < dc->io + RECENT_IO; io++) { for (io = dc->io; io < dc->io + RECENT_IO; io++) {
......
...@@ -72,7 +72,6 @@ rw_attribute(congested_read_threshold_us); ...@@ -72,7 +72,6 @@ rw_attribute(congested_read_threshold_us);
rw_attribute(congested_write_threshold_us); rw_attribute(congested_write_threshold_us);
rw_attribute(sequential_cutoff); rw_attribute(sequential_cutoff);
rw_attribute(sequential_merge);
rw_attribute(data_csum); rw_attribute(data_csum);
rw_attribute(cache_mode); rw_attribute(cache_mode);
rw_attribute(writeback_metadata); rw_attribute(writeback_metadata);
...@@ -161,7 +160,6 @@ SHOW(__bch_cached_dev) ...@@ -161,7 +160,6 @@ SHOW(__bch_cached_dev)
sysfs_hprint(stripe_size, dc->disk.stripe_size << 9); sysfs_hprint(stripe_size, dc->disk.stripe_size << 9);
var_printf(partial_stripes_expensive, "%u"); var_printf(partial_stripes_expensive, "%u");
var_printf(sequential_merge, "%i");
var_hprint(sequential_cutoff); var_hprint(sequential_cutoff);
var_hprint(readahead); var_hprint(readahead);
...@@ -207,7 +205,6 @@ STORE(__cached_dev) ...@@ -207,7 +205,6 @@ STORE(__cached_dev)
dc->writeback_rate_p_term_inverse, 1, INT_MAX); dc->writeback_rate_p_term_inverse, 1, INT_MAX);
d_strtoul(writeback_rate_d_smooth); d_strtoul(writeback_rate_d_smooth);
d_strtoul(sequential_merge);
d_strtoi_h(sequential_cutoff); d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead); d_strtoi_h(readahead);
...@@ -319,7 +316,6 @@ static struct attribute *bch_cached_dev_files[] = { ...@@ -319,7 +316,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_stripe_size, &sysfs_stripe_size,
&sysfs_partial_stripes_expensive, &sysfs_partial_stripes_expensive,
&sysfs_sequential_cutoff, &sysfs_sequential_cutoff,
&sysfs_sequential_merge,
&sysfs_clear_stats, &sysfs_clear_stats,
&sysfs_running, &sysfs_running,
&sysfs_state, &sysfs_state,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment