Commit 220bb38c authored by Kent Overstreet's avatar Kent Overstreet

bcache: Break up struct search

With all the recent refactoring around struct btree op struct search has
gotten rather large.

But we can now easily break it up in a different way - we break out
struct btree_insert_op which is for inserting data into the cache, and
that's now what the copying gc code uses - struct search is now specific
to request.c
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent cc7b8819
......@@ -8,7 +8,6 @@
#include "bcache.h"
#include "btree.h"
#include "debug.h"
#include "request.h"
#include <linux/console.h>
#include <linux/debugfs.h>
......@@ -176,42 +175,25 @@ void bch_btree_verify(struct btree *b, struct bset *new)
mutex_unlock(&b->c->verify_lock);
}
static void data_verify_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
closure_put(cl);
}
void bch_data_verify(struct search *s)
void bch_data_verify(struct cached_dev *dc, struct bio *bio)
{
char name[BDEVNAME_SIZE];
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct closure *cl = &s->cl;
struct bio *check;
struct bio_vec *bv;
int i;
if (!s->unaligned_bvec)
bio_for_each_segment(bv, s->orig_bio, i)
bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
check = bio_clone(s->orig_bio, GFP_NOIO);
check = bio_clone(bio, GFP_NOIO);
if (!check)
return;
if (bio_alloc_pages(check, GFP_NOIO))
goto out_put;
check->bi_rw = READ_SYNC;
check->bi_private = cl;
check->bi_end_io = data_verify_endio;
closure_bio_submit(check, cl, &dc->disk);
closure_sync(cl);
submit_bio_wait(READ_SYNC, check);
bio_for_each_segment(bv, s->orig_bio, i) {
void *p1 = kmap(bv->bv_page);
void *p2 = kmap(check->bi_io_vec[i].bv_page);
bio_for_each_segment(bv, bio, i) {
void *p1 = kmap_atomic(bv->bv_page);
void *p2 = page_address(check->bi_io_vec[i].bv_page);
if (memcmp(p1 + bv->bv_offset,
p2 + bv->bv_offset,
......@@ -219,13 +201,11 @@ void bch_data_verify(struct search *s)
printk(KERN_ERR
"bcache (%s): verify failed at sector %llu\n",
bdevname(dc->bdev, name),
(uint64_t) s->orig_bio->bi_sector);
kunmap(bv->bv_page);
kunmap(check->bi_io_vec[i].bv_page);
(uint64_t) bio->bi_sector);
kunmap_atomic(p1);
}
__bio_for_each_segment(bv, check, i, 0)
bio_for_each_segment_all(bv, check, i)
__free_page(bv->bv_page);
out_put:
bio_put(check);
......
......@@ -29,12 +29,12 @@ void bch_check_keys(struct btree *, const char *, ...);
#ifdef CONFIG_BCACHE_DEBUG
void bch_btree_verify(struct btree *, struct bset *);
void bch_data_verify(struct search *);
void bch_data_verify(struct cached_dev *, struct bio *);
#else /* DEBUG */
static inline void bch_btree_verify(struct btree *b, struct bset *i) {}
static inline void bch_data_verify(struct search *s) {};
static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {};
#endif
......
......@@ -12,8 +12,9 @@
#include <trace/events/bcache.h>
struct moving_io {
struct closure cl;
struct keybuf_key *w;
struct search s;
struct data_insert_op op;
struct bbio bio;
};
......@@ -38,13 +39,13 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
static void moving_io_destructor(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, s.cl);
struct moving_io *io = container_of(cl, struct moving_io, cl);
kfree(io);
}
static void write_moving_finish(struct closure *cl)
{
struct moving_io *io = container_of(cl, struct moving_io, s.cl);
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
struct bio_vec *bv;
int i;
......@@ -52,12 +53,12 @@ static void write_moving_finish(struct closure *cl)
bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page);
if (io->s.insert_collision)
if (io->op.replace_collision)
trace_bcache_gc_copy_collision(&io->w->key);
bch_keybuf_del(&io->s.c->moving_gc_keys, io->w);
bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
up(&io->s.c->moving_in_flight);
up(&io->op.c->moving_in_flight);
closure_return_with_destructor(cl, moving_io_destructor);
}
......@@ -65,12 +66,12 @@ static void write_moving_finish(struct closure *cl)
static void read_moving_endio(struct bio *bio, int error)
{
struct moving_io *io = container_of(bio->bi_private,
struct moving_io, s.cl);
struct moving_io, cl);
if (error)
io->s.error = error;
io->op.error = error;
bch_bbio_endio(io->s.c, bio, error, "reading data to move");
bch_bbio_endio(io->op.c, bio, error, "reading data to move");
}
static void moving_init(struct moving_io *io)
......@@ -84,32 +85,30 @@ static void moving_init(struct moving_io *io)
bio->bi_size = KEY_SIZE(&io->w->key) << 9;
bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
PAGE_SECTORS);
bio->bi_private = &io->s.cl;
bio->bi_private = &io->cl;
bio->bi_io_vec = bio->bi_inline_vecs;
bch_bio_map(bio, NULL);
}
static void write_moving(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct moving_io *io = container_of(s, struct moving_io, s);
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct data_insert_op *op = &io->op;
if (!s->error) {
if (!op->error) {
moving_init(io);
io->bio.bio.bi_sector = KEY_START(&io->w->key);
s->op.lock = -1;
s->write_prio = 1;
s->cache_bio = &io->bio.bio;
op->write_prio = 1;
op->bio = &io->bio.bio;
s->writeback = KEY_DIRTY(&io->w->key);
s->csum = KEY_CSUM(&io->w->key);
op->writeback = KEY_DIRTY(&io->w->key);
op->csum = KEY_CSUM(&io->w->key);
bkey_copy(&s->replace_key, &io->w->key);
s->replace = true;
bkey_copy(&op->replace_key, &io->w->key);
op->replace = true;
closure_init(&s->btree, cl);
bch_data_insert(&s->btree);
closure_call(&op->cl, bch_data_insert, NULL, cl);
}
continue_at(cl, write_moving_finish, system_wq);
......@@ -117,11 +116,10 @@ static void write_moving(struct closure *cl)
static void read_moving_submit(struct closure *cl)
{
struct search *s = container_of(cl, struct search, cl);
struct moving_io *io = container_of(s, struct moving_io, s);
struct moving_io *io = container_of(cl, struct moving_io, cl);
struct bio *bio = &io->bio.bio;
bch_submit_bbio(bio, s->c, &io->w->key, 0);
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
continue_at(cl, write_moving, system_wq);
}
......@@ -151,8 +149,8 @@ static void read_moving(struct cache_set *c)
w->private = io;
io->w = w;
io->s.inode = KEY_INODE(&w->key);
io->s.c = c;
io->op.inode = KEY_INODE(&w->key);
io->op.c = c;
moving_init(io);
bio = &io->bio.bio;
......@@ -166,7 +164,7 @@ static void read_moving(struct cache_set *c)
trace_bcache_gc_copy(&w->key);
down(&c->moving_in_flight);
closure_call(&io->s.cl, read_moving_submit, NULL, &cl);
closure_call(&io->cl, read_moving_submit, NULL, &cl);
}
if (0) {
......
This diff is collapsed.
......@@ -3,46 +3,25 @@
#include <linux/cgroup.h>
struct search {
/* Stack frame for bio_complete */
struct data_insert_op {
struct closure cl;
struct closure btree;
struct bcache_device *d;
struct cache_set *c;
struct task_struct *task;
struct bbio bio;
struct bio *orig_bio;
struct bio *cache_miss;
/* Bio to be inserted into the cache */
struct bio *cache_bio;
unsigned cache_bio_sectors;
struct bio *bio;
unsigned inode;
uint16_t write_prio;
short error;
unsigned recoverable:1;
unsigned unaligned_bvec:1;
unsigned write:1;
unsigned writeback:1;
unsigned csum:1;
unsigned bypass:1;
unsigned writeback:1;
unsigned flush_journal:1;
unsigned csum:1;
unsigned insert_data_done:1;
unsigned replace:1;
unsigned insert_collision:1;
uint16_t write_prio;
/* IO error returned to s->bio */
short error;
unsigned long start_time;
unsigned replace_collision:1;
struct btree_op op;
unsigned insert_data_done:1;
/* Anything past this point won't get zeroed in search_alloc() */
struct keylist insert_keys;
......
......@@ -7,7 +7,6 @@
#include "bcache.h"
#include "stats.h"
#include "btree.h"
#include "request.h"
#include "sysfs.h"
/*
......@@ -196,35 +195,36 @@ static void mark_cache_stats(struct cache_stat_collector *stats,
atomic_inc(&stats->cache_bypass_misses);
}
void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
bool hit, bool bypass)
{
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass);
mark_cache_stats(&s->c->accounting.collector, hit, bypass);
mark_cache_stats(&c->accounting.collector, hit, bypass);
#ifdef CONFIG_CGROUP_BCACHE
mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
#endif
}
void bch_mark_cache_readahead(struct search *s)
void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads);
atomic_inc(&s->c->accounting.collector.cache_readaheads);
atomic_inc(&c->accounting.collector.cache_readaheads);
}
void bch_mark_cache_miss_collision(struct search *s)
void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_miss_collisions);
atomic_inc(&s->c->accounting.collector.cache_miss_collisions);
atomic_inc(&c->accounting.collector.cache_miss_collisions);
}
void bch_mark_sectors_bypassed(struct search *s, int sectors)
void bch_mark_sectors_bypassed(struct cache_set *c, struct cached_dev *dc,
int sectors)
{
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
atomic_add(sectors, &s->c->accounting.collector.sectors_bypassed);
atomic_add(sectors, &c->accounting.collector.sectors_bypassed);
}
void bch_cache_accounting_init(struct cache_accounting *acc,
......
......@@ -38,7 +38,9 @@ struct cache_accounting {
struct cache_stats day;
};
struct search;
struct cache_set;
struct cached_dev;
struct bcache_device;
void bch_cache_accounting_init(struct cache_accounting *acc,
struct closure *parent);
......@@ -50,9 +52,10 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);
void bch_cache_accounting_destroy(struct cache_accounting *acc);
void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass);
void bch_mark_cache_readahead(struct search *s);
void bch_mark_cache_miss_collision(struct search *s);
void bch_mark_sectors_bypassed(struct search *s, int sectors);
void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *,
bool, bool);
void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *);
void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *);
void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int);
#endif /* _BCACHE_STATS_H_ */
#include "bcache.h"
#include "btree.h"
#include "request.h"
#include <linux/blktrace_api.h>
#include <linux/module.h>
......
......@@ -6,11 +6,9 @@
#include <linux/tracepoint.h>
struct search;
DECLARE_EVENT_CLASS(bcache_request,
TP_PROTO(struct search *s, struct bio *bio),
TP_ARGS(s, bio),
TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(d, bio),
TP_STRUCT__entry(
__field(dev_t, dev )
......@@ -24,8 +22,8 @@ DECLARE_EVENT_CLASS(bcache_request,
TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev;
__entry->orig_major = s->d->disk->major;
__entry->orig_minor = s->d->disk->first_minor;
__entry->orig_major = d->disk->major;
__entry->orig_minor = d->disk->first_minor;
__entry->sector = bio->bi_sector;
__entry->orig_sector = bio->bi_sector - 16;
__entry->nr_sector = bio->bi_size >> 9;
......@@ -79,13 +77,13 @@ DECLARE_EVENT_CLASS(btree_node,
/* request.c */
DEFINE_EVENT(bcache_request, bcache_request_start,
TP_PROTO(struct search *s, struct bio *bio),
TP_ARGS(s, bio)
TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(d, bio)
);
DEFINE_EVENT(bcache_request, bcache_request_end,
TP_PROTO(struct search *s, struct bio *bio),
TP_ARGS(s, bio)
TP_PROTO(struct bcache_device *d, struct bio *bio),
TP_ARGS(d, bio)
);
DECLARE_EVENT_CLASS(bcache_bio,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment