Commit 6f10f7d1 authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: style fix to replace 'unsigned' by 'unsigned int'

This patch fixes warning reported by checkpatch.pl by replacing 'unsigned'
with 'unsigned int'.
Signed-off-by: default avatarColy Li <colyli@suse.de>
Reviewed-by: default avatarShenghui Wang <shhuiw@foxmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b86d865c
...@@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors) ...@@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{ {
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned next = c->nbuckets * c->sb.bucket_size / 1024; unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
unsigned i; unsigned int i;
int r; int r;
atomic_sub(sectors, &c->rescale); atomic_sub(sectors, &c->rescale);
...@@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) ...@@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
#define bucket_prio(b) \ #define bucket_prio(b) \
({ \ ({ \
unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \ unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
\ \
(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \ (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
}) })
...@@ -301,7 +301,7 @@ do { \ ...@@ -301,7 +301,7 @@ do { \
static int bch_allocator_push(struct cache *ca, long bucket) static int bch_allocator_push(struct cache *ca, long bucket)
{ {
unsigned i; unsigned int i;
/* Prios/gens are actually the most important reserve */ /* Prios/gens are actually the most important reserve */
if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
...@@ -385,7 +385,7 @@ static int bch_allocator_thread(void *arg) ...@@ -385,7 +385,7 @@ static int bch_allocator_thread(void *arg)
/* Allocation */ /* Allocation */
long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
{ {
DEFINE_WAIT(w); DEFINE_WAIT(w);
struct bucket *b; struct bucket *b;
...@@ -421,7 +421,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) ...@@ -421,7 +421,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
if (expensive_debug_checks(ca->set)) { if (expensive_debug_checks(ca->set)) {
size_t iter; size_t iter;
long i; long i;
unsigned j; unsigned int j;
for (iter = 0; iter < prio_buckets(ca) * 2; iter++) for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
...@@ -470,14 +470,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b) ...@@ -470,14 +470,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
void bch_bucket_free(struct cache_set *c, struct bkey *k) void bch_bucket_free(struct cache_set *c, struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
__bch_bucket_free(PTR_CACHE(c, k, i), __bch_bucket_free(PTR_CACHE(c, k, i),
PTR_BUCKET(c, k, i)); PTR_BUCKET(c, k, i));
} }
int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait) struct bkey *k, int n, bool wait)
{ {
int i; int i;
...@@ -510,7 +510,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, ...@@ -510,7 +510,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
return -1; return -1;
} }
int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait) struct bkey *k, int n, bool wait)
{ {
int ret; int ret;
...@@ -524,8 +524,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, ...@@ -524,8 +524,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
struct open_bucket { struct open_bucket {
struct list_head list; struct list_head list;
unsigned last_write_point; unsigned int last_write_point;
unsigned sectors_free; unsigned int sectors_free;
BKEY_PADDED(key); BKEY_PADDED(key);
}; };
...@@ -556,7 +556,7 @@ struct open_bucket { ...@@ -556,7 +556,7 @@ struct open_bucket {
*/ */
static struct open_bucket *pick_data_bucket(struct cache_set *c, static struct open_bucket *pick_data_bucket(struct cache_set *c,
const struct bkey *search, const struct bkey *search,
unsigned write_point, unsigned int write_point,
struct bkey *alloc) struct bkey *alloc)
{ {
struct open_bucket *ret, *ret_task = NULL; struct open_bucket *ret, *ret_task = NULL;
...@@ -595,12 +595,16 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c, ...@@ -595,12 +595,16 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
* *
* If s->writeback is true, will not fail. * If s->writeback is true, will not fail.
*/ */
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, bool bch_alloc_sectors(struct cache_set *c,
unsigned write_point, unsigned write_prio, bool wait) struct bkey *k,
unsigned int sectors,
unsigned int write_point,
unsigned int write_prio,
bool wait)
{ {
struct open_bucket *b; struct open_bucket *b;
BKEY_PADDED(key) alloc; BKEY_PADDED(key) alloc;
unsigned i; unsigned int i;
/* /*
* We might have to allocate a new bucket, which we can't do with a * We might have to allocate a new bucket, which we can't do with a
...@@ -613,7 +617,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, ...@@ -613,7 +617,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
spin_lock(&c->data_bucket_lock); spin_lock(&c->data_bucket_lock);
while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
unsigned watermark = write_prio unsigned int watermark = write_prio
? RESERVE_MOVINGGC ? RESERVE_MOVINGGC
: RESERVE_NONE; : RESERVE_NONE;
......
...@@ -252,7 +252,7 @@ struct bcache_device { ...@@ -252,7 +252,7 @@ struct bcache_device {
struct kobject kobj; struct kobject kobj;
struct cache_set *c; struct cache_set *c;
unsigned id; unsigned int id;
#define BCACHEDEVNAME_SIZE 12 #define BCACHEDEVNAME_SIZE 12
char name[BCACHEDEVNAME_SIZE]; char name[BCACHEDEVNAME_SIZE];
...@@ -264,18 +264,18 @@ struct bcache_device { ...@@ -264,18 +264,18 @@ struct bcache_device {
#define BCACHE_DEV_UNLINK_DONE 2 #define BCACHE_DEV_UNLINK_DONE 2
#define BCACHE_DEV_WB_RUNNING 3 #define BCACHE_DEV_WB_RUNNING 3
#define BCACHE_DEV_RATE_DW_RUNNING 4 #define BCACHE_DEV_RATE_DW_RUNNING 4
unsigned nr_stripes; unsigned int nr_stripes;
unsigned stripe_size; unsigned int stripe_size;
atomic_t *stripe_sectors_dirty; atomic_t *stripe_sectors_dirty;
unsigned long *full_dirty_stripes; unsigned long *full_dirty_stripes;
struct bio_set bio_split; struct bio_set bio_split;
unsigned data_csum:1; unsigned int data_csum:1;
int (*cache_miss)(struct btree *, struct search *, int (*cache_miss)(struct btree *, struct search *,
struct bio *, unsigned); struct bio *, unsigned int);
int (*ioctl) (struct bcache_device *, fmode_t, unsigned, unsigned long); int (*ioctl) (struct bcache_device *, fmode_t, unsigned int, unsigned long);
}; };
struct io { struct io {
...@@ -284,7 +284,7 @@ struct io { ...@@ -284,7 +284,7 @@ struct io {
struct list_head lru; struct list_head lru;
unsigned long jiffies; unsigned long jiffies;
unsigned sequential; unsigned int sequential;
sector_t last; sector_t last;
}; };
...@@ -358,18 +358,18 @@ struct cached_dev { ...@@ -358,18 +358,18 @@ struct cached_dev {
struct cache_accounting accounting; struct cache_accounting accounting;
/* The rest of this all shows up in sysfs */ /* The rest of this all shows up in sysfs */
unsigned sequential_cutoff; unsigned int sequential_cutoff;
unsigned readahead; unsigned int readahead;
unsigned io_disable:1; unsigned int io_disable:1;
unsigned verify:1; unsigned int verify:1;
unsigned bypass_torture_test:1; unsigned int bypass_torture_test:1;
unsigned partial_stripes_expensive:1; unsigned int partial_stripes_expensive:1;
unsigned writeback_metadata:1; unsigned int writeback_metadata:1;
unsigned writeback_running:1; unsigned int writeback_running:1;
unsigned char writeback_percent; unsigned char writeback_percent;
unsigned writeback_delay; unsigned int writeback_delay;
uint64_t writeback_rate_target; uint64_t writeback_rate_target;
int64_t writeback_rate_proportional; int64_t writeback_rate_proportional;
...@@ -377,16 +377,16 @@ struct cached_dev { ...@@ -377,16 +377,16 @@ struct cached_dev {
int64_t writeback_rate_integral_scaled; int64_t writeback_rate_integral_scaled;
int32_t writeback_rate_change; int32_t writeback_rate_change;
unsigned writeback_rate_update_seconds; unsigned int writeback_rate_update_seconds;
unsigned writeback_rate_i_term_inverse; unsigned int writeback_rate_i_term_inverse;
unsigned writeback_rate_p_term_inverse; unsigned int writeback_rate_p_term_inverse;
unsigned writeback_rate_minimum; unsigned int writeback_rate_minimum;
enum stop_on_failure stop_when_cache_set_failed; enum stop_on_failure stop_when_cache_set_failed;
#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 #define DEFAULT_CACHED_DEV_ERROR_LIMIT 64
atomic_t io_errors; atomic_t io_errors;
unsigned error_limit; unsigned int error_limit;
unsigned offline_seconds; unsigned int offline_seconds;
char backing_dev_name[BDEVNAME_SIZE]; char backing_dev_name[BDEVNAME_SIZE];
}; };
...@@ -447,7 +447,7 @@ struct cache { ...@@ -447,7 +447,7 @@ struct cache {
* until a gc finishes - otherwise we could pointlessly burn a ton of * until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu * cpu
*/ */
unsigned invalidate_needs_gc; unsigned int invalidate_needs_gc;
bool discard; /* Get rid of? */ bool discard; /* Get rid of? */
...@@ -472,7 +472,7 @@ struct gc_stat { ...@@ -472,7 +472,7 @@ struct gc_stat {
size_t nkeys; size_t nkeys;
uint64_t data; /* sectors */ uint64_t data; /* sectors */
unsigned in_use; /* percent */ unsigned int in_use; /* percent */
}; };
/* /*
...@@ -518,7 +518,7 @@ struct cache_set { ...@@ -518,7 +518,7 @@ struct cache_set {
int caches_loaded; int caches_loaded;
struct bcache_device **devices; struct bcache_device **devices;
unsigned devices_max_used; unsigned int devices_max_used;
atomic_t attached_dev_nr; atomic_t attached_dev_nr;
struct list_head cached_devs; struct list_head cached_devs;
uint64_t cached_dev_sectors; uint64_t cached_dev_sectors;
...@@ -548,7 +548,7 @@ struct cache_set { ...@@ -548,7 +548,7 @@ struct cache_set {
* Default number of pages for a new btree node - may be less than a * Default number of pages for a new btree node - may be less than a
* full bucket * full bucket
*/ */
unsigned btree_pages; unsigned int btree_pages;
/* /*
* Lists of struct btrees; lru is the list for structs that have memory * Lists of struct btrees; lru is the list for structs that have memory
...@@ -571,7 +571,7 @@ struct cache_set { ...@@ -571,7 +571,7 @@ struct cache_set {
struct list_head btree_cache_freed; struct list_head btree_cache_freed;
/* Number of elements in btree_cache + btree_cache_freeable lists */ /* Number of elements in btree_cache + btree_cache_freeable lists */
unsigned btree_cache_used; unsigned int btree_cache_used;
/* /*
* If we need to allocate memory for a new btree node and that * If we need to allocate memory for a new btree node and that
...@@ -649,7 +649,7 @@ struct cache_set { ...@@ -649,7 +649,7 @@ struct cache_set {
struct mutex verify_lock; struct mutex verify_lock;
#endif #endif
unsigned nr_uuids; unsigned int nr_uuids;
struct uuid_entry *uuids; struct uuid_entry *uuids;
BKEY_PADDED(uuid_bucket); BKEY_PADDED(uuid_bucket);
struct closure uuid_write; struct closure uuid_write;
...@@ -670,12 +670,12 @@ struct cache_set { ...@@ -670,12 +670,12 @@ struct cache_set {
struct journal journal; struct journal journal;
#define CONGESTED_MAX 1024 #define CONGESTED_MAX 1024
unsigned congested_last_us; unsigned int congested_last_us;
atomic_t congested; atomic_t congested;
/* The rest of this all shows up in sysfs */ /* The rest of this all shows up in sysfs */
unsigned congested_read_threshold_us; unsigned int congested_read_threshold_us;
unsigned congested_write_threshold_us; unsigned int congested_write_threshold_us;
struct time_stats btree_gc_time; struct time_stats btree_gc_time;
struct time_stats btree_split_time; struct time_stats btree_split_time;
...@@ -694,16 +694,16 @@ struct cache_set { ...@@ -694,16 +694,16 @@ struct cache_set {
ON_ERROR_PANIC, ON_ERROR_PANIC,
} on_error; } on_error;
#define DEFAULT_IO_ERROR_LIMIT 8 #define DEFAULT_IO_ERROR_LIMIT 8
unsigned error_limit; unsigned int error_limit;
unsigned error_decay; unsigned int error_decay;
unsigned short journal_delay_ms; unsigned short journal_delay_ms;
bool expensive_debug_checks; bool expensive_debug_checks;
unsigned verify:1; unsigned int verify:1;
unsigned key_merging_disabled:1; unsigned int key_merging_disabled:1;
unsigned gc_always_rewrite:1; unsigned int gc_always_rewrite:1;
unsigned shrinker_disabled:1; unsigned int shrinker_disabled:1;
unsigned copy_gc_enabled:1; unsigned int copy_gc_enabled:1;
#define BUCKET_HASH_BITS 12 #define BUCKET_HASH_BITS 12
struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS]; struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
...@@ -712,7 +712,7 @@ struct cache_set { ...@@ -712,7 +712,7 @@ struct cache_set {
}; };
struct bbio { struct bbio {
unsigned submit_time_us; unsigned int submit_time_us;
union { union {
struct bkey key; struct bkey key;
uint64_t _pad[3]; uint64_t _pad[3];
...@@ -729,10 +729,10 @@ struct bbio { ...@@ -729,10 +729,10 @@ struct bbio {
#define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE) #define btree_bytes(c) ((c)->btree_pages * PAGE_SIZE)
#define btree_blocks(b) \ #define btree_blocks(b) \
((unsigned) (KEY_SIZE(&b->key) >> (b)->c->block_bits)) ((unsigned int) (KEY_SIZE(&b->key) >> (b)->c->block_bits))
#define btree_default_blocks(c) \ #define btree_default_blocks(c) \
((unsigned) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits)) ((unsigned int) ((PAGE_SECTORS * (c)->btree_pages) >> (c)->block_bits))
#define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS) #define bucket_pages(c) ((c)->sb.bucket_size / PAGE_SECTORS)
#define bucket_bytes(c) ((c)->sb.bucket_size << 9) #define bucket_bytes(c) ((c)->sb.bucket_size << 9)
...@@ -761,21 +761,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s) ...@@ -761,21 +761,21 @@ static inline sector_t bucket_remainder(struct cache_set *c, sector_t s)
static inline struct cache *PTR_CACHE(struct cache_set *c, static inline struct cache *PTR_CACHE(struct cache_set *c,
const struct bkey *k, const struct bkey *k,
unsigned ptr) unsigned int ptr)
{ {
return c->cache[PTR_DEV(k, ptr)]; return c->cache[PTR_DEV(k, ptr)];
} }
static inline size_t PTR_BUCKET_NR(struct cache_set *c, static inline size_t PTR_BUCKET_NR(struct cache_set *c,
const struct bkey *k, const struct bkey *k,
unsigned ptr) unsigned int ptr)
{ {
return sector_to_bucket(c, PTR_OFFSET(k, ptr)); return sector_to_bucket(c, PTR_OFFSET(k, ptr));
} }
static inline struct bucket *PTR_BUCKET(struct cache_set *c, static inline struct bucket *PTR_BUCKET(struct cache_set *c,
const struct bkey *k, const struct bkey *k,
unsigned ptr) unsigned int ptr)
{ {
return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr); return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
} }
...@@ -787,13 +787,13 @@ static inline uint8_t gen_after(uint8_t a, uint8_t b) ...@@ -787,13 +787,13 @@ static inline uint8_t gen_after(uint8_t a, uint8_t b)
} }
static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k, static inline uint8_t ptr_stale(struct cache_set *c, const struct bkey *k,
unsigned i) unsigned int i)
{ {
return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i)); return gen_after(PTR_BUCKET(c, k, i)->gen, PTR_GEN(k, i));
} }
static inline bool ptr_available(struct cache_set *c, const struct bkey *k, static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
unsigned i) unsigned int i)
{ {
return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i); return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
} }
...@@ -888,7 +888,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b) ...@@ -888,7 +888,7 @@ static inline uint8_t bucket_gc_gen(struct bucket *b)
static inline void wake_up_allocators(struct cache_set *c) static inline void wake_up_allocators(struct cache_set *c)
{ {
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
wake_up_process(ca->alloc_thread); wake_up_process(ca->alloc_thread);
...@@ -933,7 +933,8 @@ void bch_bbio_free(struct bio *, struct cache_set *); ...@@ -933,7 +933,8 @@ void bch_bbio_free(struct bio *, struct cache_set *);
struct bio *bch_bbio_alloc(struct cache_set *); struct bio *bch_bbio_alloc(struct cache_set *);
void __bch_submit_bbio(struct bio *, struct cache_set *); void __bch_submit_bbio(struct bio *, struct cache_set *);
void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); void bch_submit_bbio(struct bio *, struct cache_set *,
struct bkey *, unsigned int);
uint8_t bch_inc_gen(struct cache *, struct bucket *); uint8_t bch_inc_gen(struct cache *, struct bucket *);
void bch_rescale_priorities(struct cache_set *, int); void bch_rescale_priorities(struct cache_set *, int);
...@@ -944,13 +945,13 @@ void __bch_invalidate_one_bucket(struct cache *, struct bucket *); ...@@ -944,13 +945,13 @@ void __bch_invalidate_one_bucket(struct cache *, struct bucket *);
void __bch_bucket_free(struct cache *, struct bucket *); void __bch_bucket_free(struct cache *, struct bucket *);
void bch_bucket_free(struct cache_set *, struct bkey *); void bch_bucket_free(struct cache_set *, struct bkey *);
long bch_bucket_alloc(struct cache *, unsigned, bool); long bch_bucket_alloc(struct cache *, unsigned int, bool);
int __bch_bucket_alloc_set(struct cache_set *, unsigned, int __bch_bucket_alloc_set(struct cache_set *, unsigned int,
struct bkey *, int, bool); struct bkey *, int, bool);
int bch_bucket_alloc_set(struct cache_set *, unsigned, int bch_bucket_alloc_set(struct cache_set *, unsigned int,
struct bkey *, int, bool); struct bkey *, int, bool);
bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned int,
unsigned, unsigned, bool); unsigned int, unsigned int, bool);
bool bch_cached_dev_error(struct cached_dev *dc); bool bch_cached_dev_error(struct cached_dev *dc);
__printf(2, 3) __printf(2, 3)
......
This diff is collapsed.
...@@ -163,10 +163,10 @@ struct bset_tree { ...@@ -163,10 +163,10 @@ struct bset_tree {
*/ */
/* size of the binary tree and prev array */ /* size of the binary tree and prev array */
unsigned size; unsigned int size;
/* function of size - precalculated for to_inorder() */ /* function of size - precalculated for to_inorder() */
unsigned extra; unsigned int extra;
/* copy of the last key in the set */ /* copy of the last key in the set */
struct bkey end; struct bkey end;
...@@ -211,7 +211,7 @@ struct btree_keys { ...@@ -211,7 +211,7 @@ struct btree_keys {
const struct btree_keys_ops *ops; const struct btree_keys_ops *ops;
uint8_t page_order; uint8_t page_order;
uint8_t nsets; uint8_t nsets;
unsigned last_set_unwritten:1; unsigned int last_set_unwritten:1;
bool *expensive_debug_checks; bool *expensive_debug_checks;
/* /*
...@@ -239,12 +239,12 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k) ...@@ -239,12 +239,12 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
return !b->last_set_unwritten || k < b->set[b->nsets].data->start; return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
} }
static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i) static inline unsigned int bset_byte_offset(struct btree_keys *b, struct bset *i)
{ {
return ((size_t) i) - ((size_t) b->set->data); return ((size_t) i) - ((size_t) b->set->data);
} }
static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i) static inline unsigned int bset_sector_offset(struct btree_keys *b, struct bset *i)
{ {
return bset_byte_offset(b, i) >> 9; return bset_byte_offset(b, i) >> 9;
} }
...@@ -273,7 +273,7 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b) ...@@ -273,7 +273,7 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
} }
static inline struct bset *bset_next_set(struct btree_keys *b, static inline struct bset *bset_next_set(struct btree_keys *b,
unsigned block_bytes) unsigned int block_bytes)
{ {
struct bset *i = bset_tree_last(b)->data; struct bset *i = bset_tree_last(b)->data;
...@@ -281,7 +281,7 @@ static inline struct bset *bset_next_set(struct btree_keys *b, ...@@ -281,7 +281,7 @@ static inline struct bset *bset_next_set(struct btree_keys *b,
} }
void bch_btree_keys_free(struct btree_keys *); void bch_btree_keys_free(struct btree_keys *);
int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t); int bch_btree_keys_alloc(struct btree_keys *, unsigned int, gfp_t);
void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *, void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *,
bool *); bool *);
...@@ -290,7 +290,7 @@ void bch_bset_build_written_tree(struct btree_keys *); ...@@ -290,7 +290,7 @@ void bch_bset_build_written_tree(struct btree_keys *);
void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *); void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *); bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *);
void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *); void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *, unsigned int bch_btree_insert_key(struct btree_keys *, struct bkey *,
struct bkey *); struct bkey *);
enum { enum {
...@@ -349,20 +349,20 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b, ...@@ -349,20 +349,20 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
struct bset_sort_state { struct bset_sort_state {
mempool_t pool; mempool_t pool;
unsigned page_order; unsigned int page_order;
unsigned crit_factor; unsigned int crit_factor;
struct time_stats time; struct time_stats time;
}; };
void bch_bset_sort_state_free(struct bset_sort_state *); void bch_bset_sort_state_free(struct bset_sort_state *);
int bch_bset_sort_state_init(struct bset_sort_state *, unsigned); int bch_bset_sort_state_init(struct bset_sort_state *, unsigned int);
void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *); void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *);
void bch_btree_sort_into(struct btree_keys *, struct btree_keys *, void bch_btree_sort_into(struct btree_keys *, struct btree_keys *,
struct bset_sort_state *); struct bset_sort_state *);
void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *, void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *,
struct bset_sort_state *); struct bset_sort_state *);
void bch_btree_sort_partial(struct btree_keys *, unsigned, void bch_btree_sort_partial(struct btree_keys *, unsigned int,
struct bset_sort_state *); struct bset_sort_state *);
static inline void bch_btree_sort(struct btree_keys *b, static inline void bch_btree_sort(struct btree_keys *b,
...@@ -383,7 +383,7 @@ void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *); ...@@ -383,7 +383,7 @@ void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *);
#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys) #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx) static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
{ {
return bkey_idx(i->start, idx); return bkey_idx(i->start, idx);
} }
...@@ -402,7 +402,7 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l, ...@@ -402,7 +402,7 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
} }
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *,
unsigned); unsigned int);
bool __bch_cut_front(const struct bkey *, struct bkey *); bool __bch_cut_front(const struct bkey *, struct bkey *);
bool __bch_cut_back(const struct bkey *, struct bkey *); bool __bch_cut_back(const struct bkey *, struct bkey *);
...@@ -524,7 +524,7 @@ static inline size_t bch_keylist_bytes(struct keylist *l) ...@@ -524,7 +524,7 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
struct bkey *bch_keylist_pop(struct keylist *); struct bkey *bch_keylist_pop(struct keylist *);
void bch_keylist_pop_front(struct keylist *); void bch_keylist_pop_front(struct keylist *);
int __bch_keylist_realloc(struct keylist *, unsigned); int __bch_keylist_realloc(struct keylist *, unsigned int);
/* Debug stuff */ /* Debug stuff */
...@@ -532,7 +532,7 @@ int __bch_keylist_realloc(struct keylist *, unsigned); ...@@ -532,7 +532,7 @@ int __bch_keylist_realloc(struct keylist *, unsigned);
int __bch_count_data(struct btree_keys *); int __bch_count_data(struct btree_keys *);
void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...); void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...);
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int);
void bch_dump_bucket(struct btree_keys *); void bch_dump_bucket(struct btree_keys *);
#else #else
...@@ -541,7 +541,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; } ...@@ -541,7 +541,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; }
static inline void __printf(2, 3) static inline void __printf(2, 3)
__bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
static inline void bch_dump_bucket(struct btree_keys *b) {} static inline void bch_dump_bucket(struct btree_keys *b) {}
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); void bch_dump_bset(struct btree_keys *, struct bset *, unsigned int);
#endif #endif
......
...@@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b) ...@@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b)
void bkey_put(struct cache_set *c, struct bkey *k) void bkey_put(struct cache_set *c, struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) if (ptr_available(c, k, i))
...@@ -479,7 +479,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent) ...@@ -479,7 +479,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
void bch_btree_node_write(struct btree *b, struct closure *parent) void bch_btree_node_write(struct btree *b, struct closure *parent)
{ {
unsigned nsets = b->keys.nsets; unsigned int nsets = b->keys.nsets;
lockdep_assert_held(&b->lock); lockdep_assert_held(&b->lock);
...@@ -581,7 +581,7 @@ static void mca_bucket_free(struct btree *b) ...@@ -581,7 +581,7 @@ static void mca_bucket_free(struct btree *b)
list_move(&b->list, &b->c->btree_cache_freeable); list_move(&b->list, &b->c->btree_cache_freeable);
} }
static unsigned btree_order(struct bkey *k) static unsigned int btree_order(struct bkey *k)
{ {
return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
} }
...@@ -589,7 +589,7 @@ static unsigned btree_order(struct bkey *k) ...@@ -589,7 +589,7 @@ static unsigned btree_order(struct bkey *k)
static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
{ {
if (!bch_btree_keys_alloc(&b->keys, if (!bch_btree_keys_alloc(&b->keys,
max_t(unsigned, max_t(unsigned int,
ilog2(b->c->btree_pages), ilog2(b->c->btree_pages),
btree_order(k)), btree_order(k)),
gfp)) { gfp)) {
...@@ -620,7 +620,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, ...@@ -620,7 +620,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
return b; return b;
} }
static int mca_reap(struct btree *b, unsigned min_order, bool flush) static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
{ {
struct closure cl; struct closure cl;
...@@ -786,7 +786,7 @@ void bch_btree_cache_free(struct cache_set *c) ...@@ -786,7 +786,7 @@ void bch_btree_cache_free(struct cache_set *c)
int bch_btree_cache_alloc(struct cache_set *c) int bch_btree_cache_alloc(struct cache_set *c)
{ {
unsigned i; unsigned int i;
for (i = 0; i < mca_reserve(c); i++) for (i = 0; i < mca_reserve(c); i++)
if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
...@@ -1136,7 +1136,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b, ...@@ -1136,7 +1136,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
static void make_btree_freeing_key(struct btree *b, struct bkey *k) static void make_btree_freeing_key(struct btree *b, struct bkey *k)
{ {
unsigned i; unsigned int i;
mutex_lock(&b->c->bucket_lock); mutex_lock(&b->c->bucket_lock);
...@@ -1157,7 +1157,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op) ...@@ -1157,7 +1157,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op)
{ {
struct cache_set *c = b->c; struct cache_set *c = b->c;
struct cache *ca; struct cache *ca;
unsigned i, reserve = (c->root->level - b->level) * 2 + 1; unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
...@@ -1181,7 +1181,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, ...@@ -1181,7 +1181,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
struct bkey *k) struct bkey *k)
{ {
uint8_t stale = 0; uint8_t stale = 0;
unsigned i; unsigned int i;
struct bucket *g; struct bucket *g;
/* /*
...@@ -1219,7 +1219,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, ...@@ -1219,7 +1219,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
SET_GC_MARK(g, GC_MARK_RECLAIMABLE); SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
/* guard against overflow */ /* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned, SET_GC_SECTORS_USED(g, min_t(unsigned int,
GC_SECTORS_USED(g) + KEY_SIZE(k), GC_SECTORS_USED(g) + KEY_SIZE(k),
MAX_GC_SECTORS_USED)); MAX_GC_SECTORS_USED));
...@@ -1233,7 +1233,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, ...@@ -1233,7 +1233,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i) && if (ptr_available(c, k, i) &&
...@@ -1259,7 +1259,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) ...@@ -1259,7 +1259,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{ {
uint8_t stale = 0; uint8_t stale = 0;
unsigned keys = 0, good_keys = 0; unsigned int keys = 0, good_keys = 0;
struct bkey *k; struct bkey *k;
struct btree_iter iter; struct btree_iter iter;
struct bset_tree *t; struct bset_tree *t;
...@@ -1302,7 +1302,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) ...@@ -1302,7 +1302,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
struct gc_merge_info { struct gc_merge_info {
struct btree *b; struct btree *b;
unsigned keys; unsigned int keys;
}; };
static int bch_btree_insert_node(struct btree *, struct btree_op *, static int bch_btree_insert_node(struct btree *, struct btree_op *,
...@@ -1311,7 +1311,7 @@ static int bch_btree_insert_node(struct btree *, struct btree_op *, ...@@ -1311,7 +1311,7 @@ static int bch_btree_insert_node(struct btree *, struct btree_op *,
static int btree_gc_coalesce(struct btree *b, struct btree_op *op, static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
struct gc_stat *gc, struct gc_merge_info *r) struct gc_stat *gc, struct gc_merge_info *r)
{ {
unsigned i, nodes = 0, keys = 0, blocks; unsigned int i, nodes = 0, keys = 0, blocks;
struct btree *new_nodes[GC_MERGE_NODES]; struct btree *new_nodes[GC_MERGE_NODES];
struct keylist keylist; struct keylist keylist;
struct closure cl; struct closure cl;
...@@ -1511,11 +1511,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, ...@@ -1511,11 +1511,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return -EINTR; return -EINTR;
} }
static unsigned btree_gc_count_keys(struct btree *b) static unsigned int btree_gc_count_keys(struct btree *b)
{ {
struct bkey *k; struct bkey *k;
struct btree_iter iter; struct btree_iter iter;
unsigned ret = 0; unsigned int ret = 0;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
ret += bkey_u64s(k); ret += bkey_u64s(k);
...@@ -1678,7 +1678,7 @@ static void btree_gc_start(struct cache_set *c) ...@@ -1678,7 +1678,7 @@ static void btree_gc_start(struct cache_set *c)
{ {
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned i; unsigned int i;
if (!c->gc_mark_valid) if (!c->gc_mark_valid)
return; return;
...@@ -1704,7 +1704,7 @@ static void bch_btree_gc_finish(struct cache_set *c) ...@@ -1704,7 +1704,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
{ {
struct bucket *b; struct bucket *b;
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
...@@ -1722,7 +1722,7 @@ static void bch_btree_gc_finish(struct cache_set *c) ...@@ -1722,7 +1722,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
struct bcache_device *d = c->devices[i]; struct bcache_device *d = c->devices[i];
struct cached_dev *dc; struct cached_dev *dc;
struct keybuf_key *w, *n; struct keybuf_key *w, *n;
unsigned j; unsigned int j;
if (!d || UUID_FLASH_ONLY(&c->uuids[i])) if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
continue; continue;
...@@ -1814,7 +1814,7 @@ static void bch_btree_gc(struct cache_set *c) ...@@ -1814,7 +1814,7 @@ static void bch_btree_gc(struct cache_set *c)
static bool gc_should_run(struct cache_set *c) static bool gc_should_run(struct cache_set *c)
{ {
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
if (ca->invalidate_needs_gc) if (ca->invalidate_needs_gc)
...@@ -1905,7 +1905,7 @@ void bch_initial_gc_finish(struct cache_set *c) ...@@ -1905,7 +1905,7 @@ void bch_initial_gc_finish(struct cache_set *c)
{ {
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned i; unsigned int i;
bch_btree_gc_finish(c); bch_btree_gc_finish(c);
...@@ -1945,7 +1945,7 @@ void bch_initial_gc_finish(struct cache_set *c) ...@@ -1945,7 +1945,7 @@ void bch_initial_gc_finish(struct cache_set *c)
static bool btree_insert_key(struct btree *b, struct bkey *k, static bool btree_insert_key(struct btree *b, struct bkey *k,
struct bkey *replace_key) struct bkey *replace_key)
{ {
unsigned status; unsigned int status;
BUG_ON(bkey_cmp(k, &b->key) > 0); BUG_ON(bkey_cmp(k, &b->key) > 0);
...@@ -2044,7 +2044,7 @@ static int btree_split(struct btree *b, struct btree_op *op, ...@@ -2044,7 +2044,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
if (split) { if (split) {
unsigned keys = 0; unsigned int keys = 0;
trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
...@@ -2300,7 +2300,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys, ...@@ -2300,7 +2300,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
void bch_btree_set_root(struct btree *b) void bch_btree_set_root(struct btree *b)
{ {
unsigned i; unsigned int i;
struct closure cl; struct closure cl;
closure_init_stack(&cl); closure_init_stack(&cl);
...@@ -2412,7 +2412,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, ...@@ -2412,7 +2412,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
struct refill { struct refill {
struct btree_op op; struct btree_op op;
unsigned nr_found; unsigned int nr_found;
struct keybuf *buf; struct keybuf *buf;
struct bkey *end; struct bkey *end;
keybuf_pred_fn *pred; keybuf_pred_fn *pred;
......
...@@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b) ...@@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b)
return bset_tree_last(&b->keys)->data; return bset_tree_last(&b->keys)->data;
} }
static inline unsigned bset_block_offset(struct btree *b, struct bset *i) static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
{ {
return bset_sector_offset(&b->keys, i) >> b->c->block_bits; return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
} }
...@@ -213,7 +213,7 @@ struct btree_op { ...@@ -213,7 +213,7 @@ struct btree_op {
/* Btree level at which we start taking write locks */ /* Btree level at which we start taking write locks */
short lock; short lock;
unsigned insert_collision:1; unsigned int insert_collision:1;
}; };
static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
......
...@@ -159,7 +159,7 @@ struct closure { ...@@ -159,7 +159,7 @@ struct closure {
#define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e #define CLOSURE_MAGIC_ALIVE 0xc054a11e
unsigned magic; unsigned int magic;
struct list_head all; struct list_head all;
unsigned long ip; unsigned long ip;
unsigned long waiting_on; unsigned long waiting_on;
......
...@@ -69,7 +69,7 @@ void bch_btree_verify(struct btree *b) ...@@ -69,7 +69,7 @@ void bch_btree_verify(struct btree *b)
sorted->start, sorted->start,
(void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) { (void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) {
struct bset *i; struct bset *i;
unsigned j; unsigned int j;
console_lock(); console_lock();
...@@ -80,7 +80,7 @@ void bch_btree_verify(struct btree *b) ...@@ -80,7 +80,7 @@ void bch_btree_verify(struct btree *b)
bch_dump_bset(&v->keys, sorted, 0); bch_dump_bset(&v->keys, sorted, 0);
for_each_written_bset(b, ondisk, i) { for_each_written_bset(b, ondisk, i) {
unsigned block = ((void *) i - (void *) ondisk) / unsigned int block = ((void *) i - (void *) ondisk) /
block_bytes(b->c); block_bytes(b->c);
printk(KERN_ERR "*** on disk block %u:\n", block); printk(KERN_ERR "*** on disk block %u:\n", block);
...@@ -176,7 +176,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf, ...@@ -176,7 +176,7 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
while (size) { while (size) {
struct keybuf_key *w; struct keybuf_key *w;
unsigned bytes = min(i->bytes, size); unsigned int bytes = min(i->bytes, size);
int err = copy_to_user(buf, i->buf, bytes); int err = copy_to_user(buf, i->buf, bytes);
if (err) if (err)
......
...@@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l, ...@@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l,
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) { if (ptr_available(c, k, i)) {
...@@ -67,7 +67,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) ...@@ -67,7 +67,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) { if (ptr_available(c, k, i)) {
...@@ -96,7 +96,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) ...@@ -96,7 +96,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
{ {
unsigned i = 0; unsigned int i = 0;
char *out = buf, *end = buf + size; char *out = buf, *end = buf + size;
#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
...@@ -126,7 +126,7 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) ...@@ -126,7 +126,7 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
{ {
struct btree *b = container_of(keys, struct btree, keys); struct btree *b = container_of(keys, struct btree, keys);
unsigned j; unsigned int j;
char buf[80]; char buf[80];
bch_extent_to_text(buf, sizeof(buf), k); bch_extent_to_text(buf, sizeof(buf), k);
...@@ -171,7 +171,7 @@ static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) ...@@ -171,7 +171,7 @@ static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
{ {
unsigned i; unsigned int i;
char buf[80]; char buf[80];
struct bucket *g; struct bucket *g;
...@@ -204,7 +204,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) ...@@ -204,7 +204,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
{ {
struct btree *b = container_of(bk, struct btree, keys); struct btree *b = container_of(bk, struct btree, keys);
unsigned i; unsigned int i;
if (!bkey_cmp(k, &ZERO_KEY) || if (!bkey_cmp(k, &ZERO_KEY) ||
!KEY_PTRS(k) || !KEY_PTRS(k) ||
...@@ -327,7 +327,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, ...@@ -327,7 +327,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
struct cache_set *c = container_of(b, struct btree, keys)->c; struct cache_set *c = container_of(b, struct btree, keys)->c;
uint64_t old_offset; uint64_t old_offset;
unsigned old_size, sectors_found = 0; unsigned int old_size, sectors_found = 0;
BUG_ON(!KEY_OFFSET(insert)); BUG_ON(!KEY_OFFSET(insert));
BUG_ON(!KEY_SIZE(insert)); BUG_ON(!KEY_SIZE(insert));
...@@ -363,7 +363,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, ...@@ -363,7 +363,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
* k might have been split since we inserted/found the * k might have been split since we inserted/found the
* key we're replacing * key we're replacing
*/ */
unsigned i; unsigned int i;
uint64_t offset = KEY_START(k) - uint64_t offset = KEY_START(k) -
KEY_START(replace_key); KEY_START(replace_key);
...@@ -502,7 +502,7 @@ static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) ...@@ -502,7 +502,7 @@ static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
} }
static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
unsigned ptr) unsigned int ptr)
{ {
struct bucket *g = PTR_BUCKET(b->c, k, ptr); struct bucket *g = PTR_BUCKET(b->c, k, ptr);
char buf[80]; char buf[80];
...@@ -534,7 +534,7 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, ...@@ -534,7 +534,7 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
{ {
struct btree *b = container_of(bk, struct btree, keys); struct btree *b = container_of(bk, struct btree, keys);
unsigned i, stale; unsigned int i, stale;
if (!KEY_PTRS(k) || if (!KEY_PTRS(k) ||
bch_extent_invalid(bk, k)) bch_extent_invalid(bk, k))
...@@ -577,7 +577,7 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r) ...@@ -577,7 +577,7 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r) static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r)
{ {
struct btree *b = container_of(bk, struct btree, keys); struct btree *b = container_of(bk, struct btree, keys);
unsigned i; unsigned int i;
if (key_merging_disabled(b->c)) if (key_merging_disabled(b->c))
return false; return false;
......
...@@ -42,7 +42,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) ...@@ -42,7 +42,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
} }
void bch_submit_bbio(struct bio *bio, struct cache_set *c, void bch_submit_bbio(struct bio *bio, struct cache_set *c,
struct bkey *k, unsigned ptr) struct bkey *k, unsigned int ptr)
{ {
struct bbio *b = container_of(bio, struct bbio, bio); struct bbio *b = container_of(bio, struct bbio, bio);
bch_bkey_copy_single_ptr(&b->key, k, ptr); bch_bkey_copy_single_ptr(&b->key, k, ptr);
...@@ -52,7 +52,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, ...@@ -52,7 +52,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */ /* IO errors */
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
{ {
unsigned errors; unsigned int errors;
WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
...@@ -75,12 +75,12 @@ void bch_count_io_errors(struct cache *ca, ...@@ -75,12 +75,12 @@ void bch_count_io_errors(struct cache *ca,
*/ */
if (ca->set->error_decay) { if (ca->set->error_decay) {
unsigned count = atomic_inc_return(&ca->io_count); unsigned int count = atomic_inc_return(&ca->io_count);
while (count > ca->set->error_decay) { while (count > ca->set->error_decay) {
unsigned errors; unsigned int errors;
unsigned old = count; unsigned int old = count;
unsigned new = count - ca->set->error_decay; unsigned int new = count - ca->set->error_decay;
/* /*
* First we subtract refresh from count; each time we * First we subtract refresh from count; each time we
...@@ -104,7 +104,7 @@ void bch_count_io_errors(struct cache *ca, ...@@ -104,7 +104,7 @@ void bch_count_io_errors(struct cache *ca,
} }
if (error) { if (error) {
unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
&ca->io_errors); &ca->io_errors);
errors >>= IO_ERROR_SHIFT; errors >>= IO_ERROR_SHIFT;
...@@ -126,12 +126,12 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, ...@@ -126,12 +126,12 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
struct cache *ca = PTR_CACHE(c, &b->key, 0); struct cache *ca = PTR_CACHE(c, &b->key, 0);
int is_read = (bio_data_dir(bio) == READ ? 1 : 0); int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned threshold = op_is_write(bio_op(bio)) unsigned int threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us ? c->congested_write_threshold_us
: c->congested_read_threshold_us; : c->congested_read_threshold_us;
if (threshold) { if (threshold) {
unsigned t = local_clock_us(); unsigned int t = local_clock_us();
int us = t - b->submit_time_us; int us = t - b->submit_time_us;
int congested = atomic_read(&c->congested); int congested = atomic_read(&c->congested);
......
...@@ -32,7 +32,7 @@ static void journal_read_endio(struct bio *bio) ...@@ -32,7 +32,7 @@ static void journal_read_endio(struct bio *bio)
} }
static int journal_read_bucket(struct cache *ca, struct list_head *list, static int journal_read_bucket(struct cache *ca, struct list_head *list,
unsigned bucket_index) unsigned int bucket_index)
{ {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio; struct bio *bio = &ja->bio;
...@@ -40,7 +40,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, ...@@ -40,7 +40,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
struct journal_replay *i; struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data; struct jset *j, *data = ca->set->journal.w[0].data;
struct closure cl; struct closure cl;
unsigned len, left, offset = 0; unsigned int len, left, offset = 0;
int ret = 0; int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
...@@ -50,7 +50,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, ...@@ -50,7 +50,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
while (offset < ca->sb.bucket_size) { while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset; reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS); len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
bio_reset(bio); bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset; bio->bi_iter.bi_sector = bucket + offset;
...@@ -154,12 +154,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) ...@@ -154,12 +154,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
}) })
struct cache *ca; struct cache *ca;
unsigned iter; unsigned int iter;
for_each_cache(ca, c, iter) { for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS); DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
unsigned i, l, r, m; unsigned int i, l, r, m;
uint64_t seq; uint64_t seq;
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
...@@ -304,7 +304,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) ...@@ -304,7 +304,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
k < bset_bkey_last(&i->j); k < bset_bkey_last(&i->j);
k = bkey_next(k)) k = bkey_next(k))
if (!__bch_extent_invalid(c, k)) { if (!__bch_extent_invalid(c, k)) {
unsigned j; unsigned int j;
for (j = 0; j < KEY_PTRS(k); j++) for (j = 0; j < KEY_PTRS(k); j++)
if (ptr_available(c, k, j)) if (ptr_available(c, k, j))
...@@ -492,7 +492,7 @@ static void journal_reclaim(struct cache_set *c) ...@@ -492,7 +492,7 @@ static void journal_reclaim(struct cache_set *c)
struct bkey *k = &c->journal.key; struct bkey *k = &c->journal.key;
struct cache *ca; struct cache *ca;
uint64_t last_seq; uint64_t last_seq;
unsigned iter, n = 0; unsigned int iter, n = 0;
atomic_t p __maybe_unused; atomic_t p __maybe_unused;
atomic_long_inc(&c->reclaim); atomic_long_inc(&c->reclaim);
...@@ -526,7 +526,7 @@ static void journal_reclaim(struct cache_set *c) ...@@ -526,7 +526,7 @@ static void journal_reclaim(struct cache_set *c)
for_each_cache(ca, c, iter) { for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
/* No space available on this device */ /* No space available on this device */
if (next == ja->discard_idx) if (next == ja->discard_idx)
...@@ -609,7 +609,7 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -609,7 +609,7 @@ static void journal_write_unlocked(struct closure *cl)
struct cache *ca; struct cache *ca;
struct journal_write *w = c->journal.cur; struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key; struct bkey *k = &c->journal.key;
unsigned i, sectors = set_blocks(w->data, block_bytes(c)) * unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
c->sb.block_size; c->sb.block_size;
struct bio *bio; struct bio *bio;
...@@ -705,7 +705,7 @@ static void journal_try_write(struct cache_set *c) ...@@ -705,7 +705,7 @@ static void journal_try_write(struct cache_set *c)
} }
static struct journal_write *journal_wait_for_write(struct cache_set *c, static struct journal_write *journal_wait_for_write(struct cache_set *c,
unsigned nkeys) unsigned int nkeys)
__acquires(&c->journal.lock) __acquires(&c->journal.lock)
{ {
size_t sectors; size_t sectors;
......
...@@ -110,7 +110,7 @@ struct journal { ...@@ -110,7 +110,7 @@ struct journal {
struct delayed_work work; struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */ /* Number of blocks free in the bucket(s) we're currently writing to */
unsigned blocks_free; unsigned int blocks_free;
uint64_t seq; uint64_t seq;
DECLARE_FIFO(atomic_t, pin); DECLARE_FIFO(atomic_t, pin);
...@@ -131,13 +131,13 @@ struct journal_device { ...@@ -131,13 +131,13 @@ struct journal_device {
uint64_t seq[SB_JOURNAL_BUCKETS]; uint64_t seq[SB_JOURNAL_BUCKETS];
/* Journal bucket we're currently writing to */ /* Journal bucket we're currently writing to */
unsigned cur_idx; unsigned int cur_idx;
/* Last journal bucket that still contains an open journal entry */ /* Last journal bucket that still contains an open journal entry */
unsigned last_idx; unsigned int last_idx;
/* Next journal bucket to be discarded */ /* Next journal bucket to be discarded */
unsigned discard_idx; unsigned int discard_idx;
#define DISCARD_READY 0 #define DISCARD_READY 0
#define DISCARD_IN_FLIGHT 1 #define DISCARD_IN_FLIGHT 1
......
...@@ -23,7 +23,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) ...@@ -23,7 +23,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
{ {
struct cache_set *c = container_of(buf, struct cache_set, struct cache_set *c = container_of(buf, struct cache_set,
moving_gc_keys); moving_gc_keys);
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i) && if (ptr_available(c, k, i) &&
...@@ -186,7 +186,7 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) ...@@ -186,7 +186,7 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
return GC_SECTORS_USED(l) < GC_SECTORS_USED(r); return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
} }
static unsigned bucket_heap_top(struct cache *ca) static unsigned int bucket_heap_top(struct cache *ca)
{ {
struct bucket *b; struct bucket *b;
return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
...@@ -196,7 +196,7 @@ void bch_moving_gc(struct cache_set *c) ...@@ -196,7 +196,7 @@ void bch_moving_gc(struct cache_set *c)
{ {
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned i; unsigned int i;
if (!c->copy_gc_enabled) if (!c->copy_gc_enabled)
return; return;
...@@ -204,8 +204,8 @@ void bch_moving_gc(struct cache_set *c) ...@@ -204,8 +204,8 @@ void bch_moving_gc(struct cache_set *c)
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i) { for_each_cache(ca, c, i) {
unsigned sectors_to_move = 0; unsigned int sectors_to_move = 0;
unsigned reserve_sectors = ca->sb.bucket_size * unsigned int reserve_sectors = ca->sb.bucket_size *
fifo_used(&ca->free[RESERVE_MOVINGGC]); fifo_used(&ca->free[RESERVE_MOVINGGC]);
ca->heap.used = 0; ca->heap.used = 0;
......
...@@ -27,7 +27,7 @@ struct kmem_cache *bch_search_cache; ...@@ -27,7 +27,7 @@ struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *); static void bch_data_insert_start(struct closure *);
static unsigned cache_mode(struct cached_dev *dc) static unsigned int cache_mode(struct cached_dev *dc)
{ {
return BDEV_CACHE_MODE(&dc->sb); return BDEV_CACHE_MODE(&dc->sb);
} }
...@@ -98,7 +98,7 @@ static void bch_data_insert_keys(struct closure *cl) ...@@ -98,7 +98,7 @@ static void bch_data_insert_keys(struct closure *cl)
closure_return(cl); closure_return(cl);
} }
static int bch_keylist_realloc(struct keylist *l, unsigned u64s, static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
struct cache_set *c) struct cache_set *c)
{ {
size_t oldsize = bch_keylist_nkeys(l); size_t oldsize = bch_keylist_nkeys(l);
...@@ -125,7 +125,7 @@ static void bch_data_invalidate(struct closure *cl) ...@@ -125,7 +125,7 @@ static void bch_data_invalidate(struct closure *cl)
bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) { while (bio_sectors(bio)) {
unsigned sectors = min(bio_sectors(bio), unsigned int sectors = min(bio_sectors(bio),
1U << (KEY_SIZE_BITS - 1)); 1U << (KEY_SIZE_BITS - 1));
if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
...@@ -211,7 +211,7 @@ static void bch_data_insert_start(struct closure *cl) ...@@ -211,7 +211,7 @@ static void bch_data_insert_start(struct closure *cl)
bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
do { do {
unsigned i; unsigned int i;
struct bkey *k; struct bkey *k;
struct bio_set *split = &op->c->bio_split; struct bio_set *split = &op->c->bio_split;
...@@ -328,7 +328,7 @@ void bch_data_insert(struct closure *cl) ...@@ -328,7 +328,7 @@ void bch_data_insert(struct closure *cl)
/* Congested? */ /* Congested? */
unsigned bch_get_congested(struct cache_set *c) unsigned int bch_get_congested(struct cache_set *c)
{ {
int i; int i;
long rand; long rand;
...@@ -372,8 +372,8 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) ...@@ -372,8 +372,8 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{ {
struct cache_set *c = dc->disk.c; struct cache_set *c = dc->disk.c;
unsigned mode = cache_mode(dc); unsigned int mode = cache_mode(dc);
unsigned sectors, congested = bch_get_congested(c); unsigned int sectors, congested = bch_get_congested(c);
struct task_struct *task = current; struct task_struct *task = current;
struct io *i; struct io *i;
...@@ -469,11 +469,11 @@ struct search { ...@@ -469,11 +469,11 @@ struct search {
struct bio *cache_miss; struct bio *cache_miss;
struct bcache_device *d; struct bcache_device *d;
unsigned insert_bio_sectors; unsigned int insert_bio_sectors;
unsigned recoverable:1; unsigned int recoverable:1;
unsigned write:1; unsigned int write:1;
unsigned read_dirty_data:1; unsigned int read_dirty_data:1;
unsigned cache_missed:1; unsigned int cache_missed:1;
unsigned long start_time; unsigned long start_time;
...@@ -514,15 +514,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) ...@@ -514,15 +514,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
struct search *s = container_of(op, struct search, op); struct search *s = container_of(op, struct search, op);
struct bio *n, *bio = &s->bio.bio; struct bio *n, *bio = &s->bio.bio;
struct bkey *bio_key; struct bkey *bio_key;
unsigned ptr; unsigned int ptr;
if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
return MAP_CONTINUE; return MAP_CONTINUE;
if (KEY_INODE(k) != s->iop.inode || if (KEY_INODE(k) != s->iop.inode ||
KEY_START(k) > bio->bi_iter.bi_sector) { KEY_START(k) > bio->bi_iter.bi_sector) {
unsigned bio_sectors = bio_sectors(bio); unsigned int bio_sectors = bio_sectors(bio);
unsigned sectors = KEY_INODE(k) == s->iop.inode unsigned int sectors = KEY_INODE(k) == s->iop.inode
? min_t(uint64_t, INT_MAX, ? min_t(uint64_t, INT_MAX,
KEY_START(k) - bio->bi_iter.bi_sector) KEY_START(k) - bio->bi_iter.bi_sector)
: INT_MAX; : INT_MAX;
...@@ -856,10 +856,10 @@ static void cached_dev_read_done_bh(struct closure *cl) ...@@ -856,10 +856,10 @@ static void cached_dev_read_done_bh(struct closure *cl)
} }
static int cached_dev_cache_miss(struct btree *b, struct search *s, static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors) struct bio *bio, unsigned int sectors)
{ {
int ret = MAP_CONTINUE; int ret = MAP_CONTINUE;
unsigned reada = 0; unsigned int reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio; struct bio *miss, *cache_bio;
...@@ -1226,7 +1226,7 @@ static int cached_dev_congested(void *data, int bits) ...@@ -1226,7 +1226,7 @@ static int cached_dev_congested(void *data, int bits)
return 1; return 1;
if (cached_dev_get(dc)) { if (cached_dev_get(dc)) {
unsigned i; unsigned int i;
struct cache *ca; struct cache *ca;
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
...@@ -1253,9 +1253,9 @@ void bch_cached_dev_request_init(struct cached_dev *dc) ...@@ -1253,9 +1253,9 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
/* Flash backed devices */ /* Flash backed devices */
static int flash_dev_cache_miss(struct btree *b, struct search *s, static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors) struct bio *bio, unsigned int sectors)
{ {
unsigned bytes = min(sectors, bio_sectors(bio)) << 9; unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
swap(bio->bi_iter.bi_size, bytes); swap(bio->bi_iter.bi_size, bytes);
zero_fill_bio(bio); zero_fill_bio(bio);
...@@ -1338,7 +1338,7 @@ static int flash_dev_congested(void *data, int bits) ...@@ -1338,7 +1338,7 @@ static int flash_dev_congested(void *data, int bits)
struct bcache_device *d = data; struct bcache_device *d = data;
struct request_queue *q; struct request_queue *q;
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
int ret = 0; int ret = 0;
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
......
...@@ -8,7 +8,7 @@ struct data_insert_op { ...@@ -8,7 +8,7 @@ struct data_insert_op {
struct bio *bio; struct bio *bio;
struct workqueue_struct *wq; struct workqueue_struct *wq;
unsigned inode; unsigned int inode;
uint16_t write_point; uint16_t write_point;
uint16_t write_prio; uint16_t write_prio;
blk_status_t status; blk_status_t status;
...@@ -17,15 +17,15 @@ struct data_insert_op { ...@@ -17,15 +17,15 @@ struct data_insert_op {
uint16_t flags; uint16_t flags;
struct { struct {
unsigned bypass:1; unsigned int bypass:1;
unsigned writeback:1; unsigned int writeback:1;
unsigned flush_journal:1; unsigned int flush_journal:1;
unsigned csum:1; unsigned int csum:1;
unsigned replace:1; unsigned int replace:1;
unsigned replace_collision:1; unsigned int replace_collision:1;
unsigned insert_data_done:1; unsigned int insert_data_done:1;
}; };
}; };
...@@ -33,7 +33,7 @@ struct data_insert_op { ...@@ -33,7 +33,7 @@ struct data_insert_op {
BKEY_PADDED(replace_key); BKEY_PADDED(replace_key);
}; };
unsigned bch_get_congested(struct cache_set *); unsigned int bch_get_congested(struct cache_set *);
void bch_data_insert(struct closure *cl); void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc); void bch_cached_dev_request_init(struct cached_dev *dc);
......
...@@ -33,11 +33,11 @@ ...@@ -33,11 +33,11 @@
* stored left shifted by 16, and scaled back in the sysfs show() function. * stored left shifted by 16, and scaled back in the sysfs show() function.
*/ */
static const unsigned DAY_RESCALE = 288; static const unsigned int DAY_RESCALE = 288;
static const unsigned HOUR_RESCALE = 12; static const unsigned int HOUR_RESCALE = 12;
static const unsigned FIVE_MINUTE_RESCALE = 1; static const unsigned int FIVE_MINUTE_RESCALE = 1;
static const unsigned accounting_delay = (HZ * 300) / 22; static const unsigned int accounting_delay = (HZ * 300) / 22;
static const unsigned accounting_weight = 32; static const unsigned int accounting_weight = 32;
/* sysfs reading/writing */ /* sysfs reading/writing */
...@@ -152,7 +152,7 @@ static void scale_accounting(struct timer_list *t) ...@@ -152,7 +152,7 @@ static void scale_accounting(struct timer_list *t)
struct cache_accounting *acc = from_timer(acc, t, timer); struct cache_accounting *acc = from_timer(acc, t, timer);
#define move_stat(name) do { \ #define move_stat(name) do { \
unsigned t = atomic_xchg(&acc->collector.name, 0); \ unsigned int t = atomic_xchg(&acc->collector.name, 0); \
t <<= 16; \ t <<= 16; \
acc->five_minute.name += t; \ acc->five_minute.name += t; \
acc->hour.name += t; \ acc->hour.name += t; \
......
...@@ -23,7 +23,7 @@ struct cache_stats { ...@@ -23,7 +23,7 @@ struct cache_stats {
unsigned long cache_miss_collisions; unsigned long cache_miss_collisions;
unsigned long sectors_bypassed; unsigned long sectors_bypassed;
unsigned rescale; unsigned int rescale;
}; };
struct cache_accounting { struct cache_accounting {
......
...@@ -61,7 +61,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, ...@@ -61,7 +61,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
const char *err; const char *err;
struct cache_sb *s; struct cache_sb *s;
struct buffer_head *bh = __bread(bdev, 1, SB_SIZE); struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
unsigned i; unsigned int i;
if (!bh) if (!bh)
return "IO error"; return "IO error";
...@@ -202,7 +202,7 @@ static void write_bdev_super_endio(struct bio *bio) ...@@ -202,7 +202,7 @@ static void write_bdev_super_endio(struct bio *bio)
static void __write_super(struct cache_sb *sb, struct bio *bio) static void __write_super(struct cache_sb *sb, struct bio *bio)
{ {
struct cache_sb *out = page_address(bio_first_page_all(bio)); struct cache_sb *out = page_address(bio_first_page_all(bio));
unsigned i; unsigned int i;
bio->bi_iter.bi_sector = SB_SECTOR; bio->bi_iter.bi_sector = SB_SECTOR;
bio->bi_iter.bi_size = SB_SIZE; bio->bi_iter.bi_size = SB_SIZE;
...@@ -282,7 +282,7 @@ void bcache_write_super(struct cache_set *c) ...@@ -282,7 +282,7 @@ void bcache_write_super(struct cache_set *c)
{ {
struct closure *cl = &c->sb_write; struct closure *cl = &c->sb_write;
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
down(&c->sb_write_mutex); down(&c->sb_write_mutex);
closure_init(cl, &c->cl); closure_init(cl, &c->cl);
...@@ -334,7 +334,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags, ...@@ -334,7 +334,7 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
{ {
struct closure *cl = &c->uuid_write; struct closure *cl = &c->uuid_write;
struct uuid_entry *u; struct uuid_entry *u;
unsigned i; unsigned int i;
char buf[80]; char buf[80];
BUG_ON(!parent); BUG_ON(!parent);
...@@ -587,7 +587,7 @@ static void prio_read(struct cache *ca, uint64_t bucket) ...@@ -587,7 +587,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
struct prio_set *p = ca->disk_buckets; struct prio_set *p = ca->disk_buckets;
struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d; struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
struct bucket *b; struct bucket *b;
unsigned bucket_nr = 0; unsigned int bucket_nr = 0;
for (b = ca->buckets; for (b = ca->buckets;
b < ca->buckets + ca->sb.nbuckets; b < ca->buckets + ca->sb.nbuckets;
...@@ -662,7 +662,7 @@ static void bcache_device_unlink(struct bcache_device *d) ...@@ -662,7 +662,7 @@ static void bcache_device_unlink(struct bcache_device *d)
lockdep_assert_held(&bch_register_lock); lockdep_assert_held(&bch_register_lock);
if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
unsigned i; unsigned int i;
struct cache *ca; struct cache *ca;
sysfs_remove_link(&d->c->kobj, d->name); sysfs_remove_link(&d->c->kobj, d->name);
...@@ -676,7 +676,7 @@ static void bcache_device_unlink(struct bcache_device *d) ...@@ -676,7 +676,7 @@ static void bcache_device_unlink(struct bcache_device *d)
static void bcache_device_link(struct bcache_device *d, struct cache_set *c, static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
const char *name) const char *name)
{ {
unsigned i; unsigned int i;
struct cache *ca; struct cache *ca;
for_each_cache(ca, d->c, i) for_each_cache(ca, d->c, i)
...@@ -715,7 +715,7 @@ static void bcache_device_detach(struct bcache_device *d) ...@@ -715,7 +715,7 @@ static void bcache_device_detach(struct bcache_device *d)
} }
static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
unsigned id) unsigned int id)
{ {
d->id = id; d->id = id;
d->c = c; d->c = c;
...@@ -762,7 +762,7 @@ static void bcache_device_free(struct bcache_device *d) ...@@ -762,7 +762,7 @@ static void bcache_device_free(struct bcache_device *d)
closure_debug_destroy(&d->cl); closure_debug_destroy(&d->cl);
} }
static int bcache_device_init(struct bcache_device *d, unsigned block_size, static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
sector_t sectors) sector_t sectors)
{ {
struct request_queue *q; struct request_queue *q;
...@@ -778,7 +778,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, ...@@ -778,7 +778,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (!d->nr_stripes || d->nr_stripes > max_stripes) { if (!d->nr_stripes || d->nr_stripes > max_stripes) {
pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
(unsigned)d->nr_stripes); (unsigned int)d->nr_stripes);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1212,7 +1212,7 @@ static void cached_dev_flush(struct closure *cl) ...@@ -1212,7 +1212,7 @@ static void cached_dev_flush(struct closure *cl)
continue_at(cl, cached_dev_free, system_wq); continue_at(cl, cached_dev_free, system_wq);
} }
static int cached_dev_init(struct cached_dev *dc, unsigned block_size) static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
{ {
int ret; int ret;
struct io *io; struct io *io;
...@@ -1489,7 +1489,7 @@ static void cache_set_free(struct closure *cl) ...@@ -1489,7 +1489,7 @@ static void cache_set_free(struct closure *cl)
{ {
struct cache_set *c = container_of(cl, struct cache_set, cl); struct cache_set *c = container_of(cl, struct cache_set, cl);
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
if (!IS_ERR_OR_NULL(c->debug)) if (!IS_ERR_OR_NULL(c->debug))
debugfs_remove(c->debug); debugfs_remove(c->debug);
...@@ -1532,7 +1532,7 @@ static void cache_set_flush(struct closure *cl) ...@@ -1532,7 +1532,7 @@ static void cache_set_flush(struct closure *cl)
struct cache_set *c = container_of(cl, struct cache_set, caching); struct cache_set *c = container_of(cl, struct cache_set, caching);
struct cache *ca; struct cache *ca;
struct btree *b; struct btree *b;
unsigned i; unsigned int i;
bch_cache_accounting_destroy(&c->accounting); bch_cache_accounting_destroy(&c->accounting);
...@@ -1762,7 +1762,7 @@ static void run_cache_set(struct cache_set *c) ...@@ -1762,7 +1762,7 @@ static void run_cache_set(struct cache_set *c)
struct cached_dev *dc, *t; struct cached_dev *dc, *t;
struct cache *ca; struct cache *ca;
struct closure cl; struct closure cl;
unsigned i; unsigned int i;
closure_init_stack(&cl); closure_init_stack(&cl);
...@@ -1853,7 +1853,7 @@ static void run_cache_set(struct cache_set *c) ...@@ -1853,7 +1853,7 @@ static void run_cache_set(struct cache_set *c)
pr_notice("invalidating existing data"); pr_notice("invalidating existing data");
for_each_cache(ca, c, i) { for_each_cache(ca, c, i) {
unsigned j; unsigned int j;
ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7, ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
2, SB_JOURNAL_BUCKETS); 2, SB_JOURNAL_BUCKETS);
...@@ -1998,7 +1998,7 @@ static const char *register_cache_set(struct cache *ca) ...@@ -1998,7 +1998,7 @@ static const char *register_cache_set(struct cache *ca)
void bch_cache_release(struct kobject *kobj) void bch_cache_release(struct kobject *kobj)
{ {
struct cache *ca = container_of(kobj, struct cache, kobj); struct cache *ca = container_of(kobj, struct cache, kobj);
unsigned i; unsigned int i;
if (ca->set) { if (ca->set) {
BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
...@@ -2150,7 +2150,7 @@ static bool bch_is_open_backing(struct block_device *bdev) { ...@@ -2150,7 +2150,7 @@ static bool bch_is_open_backing(struct block_device *bdev) {
static bool bch_is_open_cache(struct block_device *bdev) { static bool bch_is_open_cache(struct block_device *bdev) {
struct cache_set *c, *tc; struct cache_set *c, *tc;
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
......
...@@ -307,7 +307,7 @@ STORE(__cached_dev) ...@@ -307,7 +307,7 @@ STORE(__cached_dev)
if (v < 0) if (v < 0)
return v; return v;
if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) { if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
SET_BDEV_CACHE_MODE(&dc->sb, v); SET_BDEV_CACHE_MODE(&dc->sb, v);
bch_write_bdev_super(dc, NULL); bch_write_bdev_super(dc, NULL);
} }
...@@ -533,9 +533,9 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) ...@@ -533,9 +533,9 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
op.stats.floats, op.stats.failed); op.stats.floats, op.stats.failed);
} }
static unsigned bch_root_usage(struct cache_set *c) static unsigned int bch_root_usage(struct cache_set *c)
{ {
unsigned bytes = 0; unsigned int bytes = 0;
struct bkey *k; struct bkey *k;
struct btree *b; struct btree *b;
struct btree_iter iter; struct btree_iter iter;
...@@ -570,9 +570,9 @@ static size_t bch_cache_size(struct cache_set *c) ...@@ -570,9 +570,9 @@ static size_t bch_cache_size(struct cache_set *c)
return ret; return ret;
} }
static unsigned bch_cache_max_chain(struct cache_set *c) static unsigned int bch_cache_max_chain(struct cache_set *c)
{ {
unsigned ret = 0; unsigned int ret = 0;
struct hlist_head *h; struct hlist_head *h;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
...@@ -580,7 +580,7 @@ static unsigned bch_cache_max_chain(struct cache_set *c) ...@@ -580,7 +580,7 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
for (h = c->bucket_hash; for (h = c->bucket_hash;
h < c->bucket_hash + (1 << BUCKET_HASH_BITS); h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
h++) { h++) {
unsigned i = 0; unsigned int i = 0;
struct hlist_node *p; struct hlist_node *p;
hlist_for_each(p, h) hlist_for_each(p, h)
...@@ -593,13 +593,13 @@ static unsigned bch_cache_max_chain(struct cache_set *c) ...@@ -593,13 +593,13 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
return ret; return ret;
} }
static unsigned bch_btree_used(struct cache_set *c) static unsigned int bch_btree_used(struct cache_set *c)
{ {
return div64_u64(c->gc_stats.key_bytes * 100, return div64_u64(c->gc_stats.key_bytes * 100,
(c->gc_stats.nodes ?: 1) * btree_bytes(c)); (c->gc_stats.nodes ?: 1) * btree_bytes(c));
} }
static unsigned bch_average_key_size(struct cache_set *c) static unsigned int bch_average_key_size(struct cache_set *c)
{ {
return c->gc_stats.nkeys return c->gc_stats.nkeys
? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
...@@ -996,7 +996,7 @@ STORE(__bch_cache) ...@@ -996,7 +996,7 @@ STORE(__bch_cache)
if (v < 0) if (v < 0)
return v; return v;
if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) { if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
mutex_lock(&ca->set->bucket_lock); mutex_lock(&ca->set->bucket_lock);
SET_CACHE_REPLACEMENT(&ca->sb, v); SET_CACHE_REPLACEMENT(&ca->sb, v);
mutex_unlock(&ca->set->bucket_lock); mutex_unlock(&ca->set->bucket_lock);
......
...@@ -347,7 +347,7 @@ static inline int bch_strtoul_h(const char *cp, long *res) ...@@ -347,7 +347,7 @@ static inline int bch_strtoul_h(const char *cp, long *res)
snprintf(buf, size, \ snprintf(buf, size, \
__builtin_types_compatible_p(typeof(var), int) \ __builtin_types_compatible_p(typeof(var), int) \
? "%i\n" : \ ? "%i\n" : \
__builtin_types_compatible_p(typeof(var), unsigned) \ __builtin_types_compatible_p(typeof(var), unsigned int) \
? "%u\n" : \ ? "%u\n" : \
__builtin_types_compatible_p(typeof(var), long) \ __builtin_types_compatible_p(typeof(var), long) \
? "%li\n" : \ ? "%li\n" : \
...@@ -379,7 +379,7 @@ struct time_stats { ...@@ -379,7 +379,7 @@ struct time_stats {
void bch_time_stats_update(struct time_stats *stats, uint64_t time); void bch_time_stats_update(struct time_stats *stats, uint64_t time);
static inline unsigned local_clock_us(void) static inline unsigned int local_clock_us(void)
{ {
return local_clock() >> 10; return local_clock() >> 10;
} }
...@@ -543,9 +543,10 @@ dup: \ ...@@ -543,9 +543,10 @@ dup: \
container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member) container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
/* Does linear interpolation between powers of two */ /* Does linear interpolation between powers of two */
static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) static inline unsigned int fract_exp_two(unsigned int x,
unsigned int fract_bits)
{ {
unsigned fract = x & ~(~0 << fract_bits); unsigned int fract = x & ~(~0 << fract_bits);
x >>= fract_bits; x >>= fract_bits;
x = 1 << x; x = 1 << x;
......
...@@ -215,7 +215,8 @@ static void update_writeback_rate(struct work_struct *work) ...@@ -215,7 +215,8 @@ static void update_writeback_rate(struct work_struct *work)
smp_mb(); smp_mb();
} }
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) static unsigned int writeback_delay(struct cached_dev *dc,
unsigned int sectors)
{ {
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent) !dc->writeback_percent)
...@@ -263,7 +264,7 @@ static void write_dirty_finish(struct closure *cl) ...@@ -263,7 +264,7 @@ static void write_dirty_finish(struct closure *cl)
/* This is kind of a dumb way of signalling errors. */ /* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) { if (KEY_DIRTY(&w->key)) {
int ret; int ret;
unsigned i; unsigned int i;
struct keylist keys; struct keylist keys;
bch_keylist_init(&keys); bch_keylist_init(&keys);
...@@ -377,7 +378,7 @@ static void read_dirty_submit(struct closure *cl) ...@@ -377,7 +378,7 @@ static void read_dirty_submit(struct closure *cl)
static void read_dirty(struct cached_dev *dc) static void read_dirty(struct cached_dev *dc)
{ {
unsigned delay = 0; unsigned int delay = 0;
struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w; struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
size_t size; size_t size;
int nk, i; int nk, i;
...@@ -498,11 +499,11 @@ static void read_dirty(struct cached_dev *dc) ...@@ -498,11 +499,11 @@ static void read_dirty(struct cached_dev *dc)
/* Scan for dirty data */ /* Scan for dirty data */
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
uint64_t offset, int nr_sectors) uint64_t offset, int nr_sectors)
{ {
struct bcache_device *d = c->devices[inode]; struct bcache_device *d = c->devices[inode];
unsigned stripe_offset, stripe, sectors_dirty; unsigned int stripe_offset, stripe, sectors_dirty;
if (!d) if (!d)
return; return;
...@@ -514,7 +515,7 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, ...@@ -514,7 +515,7 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
stripe_offset = offset & (d->stripe_size - 1); stripe_offset = offset & (d->stripe_size - 1);
while (nr_sectors) { while (nr_sectors) {
int s = min_t(unsigned, abs(nr_sectors), int s = min_t(unsigned int, abs(nr_sectors),
d->stripe_size - stripe_offset); d->stripe_size - stripe_offset);
if (nr_sectors < 0) if (nr_sectors < 0)
...@@ -548,7 +549,7 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k) ...@@ -548,7 +549,7 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
static void refill_full_stripes(struct cached_dev *dc) static void refill_full_stripes(struct cached_dev *dc)
{ {
struct keybuf *buf = &dc->writeback_keys; struct keybuf *buf = &dc->writeback_keys;
unsigned start_stripe, stripe, next_stripe; unsigned int start_stripe, stripe, next_stripe;
bool wrapped = false; bool wrapped = false;
stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
...@@ -688,7 +689,7 @@ static int bch_writeback_thread(void *arg) ...@@ -688,7 +689,7 @@ static int bch_writeback_thread(void *arg)
read_dirty(dc); read_dirty(dc);
if (searched_full_index) { if (searched_full_index) {
unsigned delay = dc->writeback_delay * HZ; unsigned int delay = dc->writeback_delay * HZ;
while (delay && while (delay &&
!kthread_should_stop() && !kthread_should_stop() &&
...@@ -712,7 +713,7 @@ static int bch_writeback_thread(void *arg) ...@@ -712,7 +713,7 @@ static int bch_writeback_thread(void *arg)
struct sectors_dirty_init { struct sectors_dirty_init {
struct btree_op op; struct btree_op op;
unsigned inode; unsigned int inode;
size_t count; size_t count;
struct bkey start; struct bkey start;
}; };
......
...@@ -28,7 +28,7 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) ...@@ -28,7 +28,7 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret; return ret;
} }
static inline unsigned offset_to_stripe(struct bcache_device *d, static inline unsigned int offset_to_stripe(struct bcache_device *d,
uint64_t offset) uint64_t offset)
{ {
do_div(offset, d->stripe_size); do_div(offset, d->stripe_size);
...@@ -37,9 +37,9 @@ static inline unsigned offset_to_stripe(struct bcache_device *d, ...@@ -37,9 +37,9 @@ static inline unsigned offset_to_stripe(struct bcache_device *d,
static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset, uint64_t offset,
unsigned nr_sectors) unsigned int nr_sectors)
{ {
unsigned stripe = offset_to_stripe(&dc->disk, offset); unsigned int stripe = offset_to_stripe(&dc->disk, offset);
while (1) { while (1) {
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
...@@ -54,9 +54,9 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, ...@@ -54,9 +54,9 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
} }
static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
unsigned cache_mode, bool would_skip) unsigned int cache_mode, bool would_skip)
{ {
unsigned in_use = dc->disk.c->gc_stats.in_use; unsigned int in_use = dc->disk.c->gc_stats.in_use;
if (cache_mode != CACHE_MODE_WRITEBACK || if (cache_mode != CACHE_MODE_WRITEBACK ||
test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
...@@ -96,7 +96,7 @@ static inline void bch_writeback_add(struct cached_dev *dc) ...@@ -96,7 +96,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
} }
} }
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned int, uint64_t, int);
void bch_sectors_dirty_init(struct bcache_device *); void bch_sectors_dirty_init(struct bcache_device *);
void bch_cached_dev_writeback_init(struct cached_dev *); void bch_cached_dev_writeback_init(struct cached_dev *);
......
...@@ -30,10 +30,10 @@ struct bkey { ...@@ -30,10 +30,10 @@ struct bkey {
BITMASK(name, struct bkey, field, offset, size) BITMASK(name, struct bkey, field, offset, size)
#define PTR_FIELD(name, offset, size) \ #define PTR_FIELD(name, offset, size) \
static inline __u64 name(const struct bkey *k, unsigned i) \ static inline __u64 name(const struct bkey *k, unsigned int i) \
{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \ { return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
\ \
static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \ static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
{ \ { \
k->ptr[i] &= ~(~(~0ULL << size) << offset); \ k->ptr[i] &= ~(~(~0ULL << size) << offset); \
k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \ k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
...@@ -120,7 +120,7 @@ static inline struct bkey *bkey_next(const struct bkey *k) ...@@ -120,7 +120,7 @@ static inline struct bkey *bkey_next(const struct bkey *k)
return (struct bkey *) (d + bkey_u64s(k)); return (struct bkey *) (d + bkey_u64s(k));
} }
static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys) static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
{ {
__u64 *d = (void *) k; __u64 *d = (void *) k;
return (struct bkey *) (d + nr_keys); return (struct bkey *) (d + nr_keys);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment