Commit 3be11dba authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: fix code comments style

This patch fixes 3 style issues warned by checkpatch.pl,
- Comment lines are not aligned
- Comments use "/*" on subsequent lines
- Comment lines use a trailing "*/"
Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarShenghui Wang <shhuiw@foxmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3069211b
...@@ -402,7 +402,8 @@ static unsigned int inorder_prev(unsigned int j, unsigned int size) ...@@ -402,7 +402,8 @@ static unsigned int inorder_prev(unsigned int j, unsigned int size)
return j; return j;
} }
/* I have no idea why this code works... and I'm the one who wrote it /*
* I have no idea why this code works... and I'm the one who wrote it
* *
* However, I do know what it does: * However, I do know what it does:
* Given a binary tree constructed in an array (i.e. how you normally implement * Given a binary tree constructed in an array (i.e. how you normally implement
...@@ -795,7 +796,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b, ...@@ -795,7 +796,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
if (!t->size) if (!t->size)
return; return;
/* k is the key we just inserted; we need to find the entry in the /*
* k is the key we just inserted; we need to find the entry in the
* lookup table for the first key that is strictly greater than k: * lookup table for the first key that is strictly greater than k:
* it's either k's cacheline or the next one * it's either k's cacheline or the next one
*/ */
...@@ -803,7 +805,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b, ...@@ -803,7 +805,8 @@ static void bch_bset_fix_lookup_table(struct btree_keys *b,
table_to_bkey(t, j) <= k) table_to_bkey(t, j) <= k)
j++; j++;
/* Adjust all the lookup table entries, and find a new key for any that /*
* Adjust all the lookup table entries, and find a new key for any that
* have gotten too big * have gotten too big
*/ */
for (; j < t->size; j++) { for (; j < t->size; j++) {
......
...@@ -465,8 +465,8 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c) ...@@ -465,8 +465,8 @@ static struct uuid_entry *uuid_find_empty(struct cache_set *c)
* Bucket priorities/gens: * Bucket priorities/gens:
* *
* For each bucket, we store on disk its * For each bucket, we store on disk its
* 8 bit gen * 8 bit gen
* 16 bit priority * 16 bit priority
* *
* See alloc.c for an explanation of the gen. The priority is used to implement * See alloc.c for an explanation of the gen. The priority is used to implement
* lru (and in the future other) cache replacement policies; for most purposes * lru (and in the future other) cache replacement policies; for most purposes
...@@ -934,8 +934,10 @@ void bch_cached_dev_run(struct cached_dev *dc) ...@@ -934,8 +934,10 @@ void bch_cached_dev_run(struct cached_dev *dc)
add_disk(d->disk); add_disk(d->disk);
bd_link_disk_holder(dc->bdev, dc->disk.disk); bd_link_disk_holder(dc->bdev, dc->disk.disk);
/* won't show up in the uevent file, use udevadm monitor -e instead /*
* only class / kset properties are persistent */ * won't show up in the uevent file, use udevadm monitor -e instead
* only class / kset properties are persistent
*/
kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
kfree(env[1]); kfree(env[1]);
kfree(env[2]); kfree(env[2]);
...@@ -1104,8 +1106,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, ...@@ -1104,8 +1106,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
} }
} }
/* Deadlocks since we're called via sysfs... /*
sysfs_remove_file(&dc->kobj, &sysfs_attach); * Deadlocks since we're called via sysfs...
* sysfs_remove_file(&dc->kobj, &sysfs_attach);
*/ */
if (bch_is_zero(u->uuid, 16)) { if (bch_is_zero(u->uuid, 16)) {
...@@ -1468,9 +1471,10 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) ...@@ -1468,9 +1471,10 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
pr_info("CACHE_SET_IO_DISABLE already set"); pr_info("CACHE_SET_IO_DISABLE already set");
/* XXX: we can be called from atomic context /*
acquire_console_sem(); * XXX: we can be called from atomic context
*/ * acquire_console_sem();
*/
pr_err("bcache: error on %pU: ", c->sb.set_uuid); pr_err("bcache: error on %pU: ", c->sb.set_uuid);
......
...@@ -468,7 +468,8 @@ static void read_dirty(struct cached_dev *dc) ...@@ -468,7 +468,8 @@ static void read_dirty(struct cached_dev *dc)
down(&dc->in_flight); down(&dc->in_flight);
/* We've acquired a semaphore for the maximum /*
* We've acquired a semaphore for the maximum
* simultaneous number of writebacks; from here * simultaneous number of writebacks; from here
* everything happens asynchronously. * everything happens asynchronously.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment