Commit 5bed49ad authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-4.19/post-20180822' of git://git.kernel.dk/linux-block

Pull more block updates from Jens Axboe:

 - Set of bcache fixes and changes (Coly)

 - The flush warn fix (me)

 - Small series of BFQ fixes (Paolo)

 - wbt hang fix (Ming)

 - blktrace fix (Steven)

 - blk-mq hardware queue count update fix (Jianchao)

 - Various little fixes

* tag 'for-4.19/post-20180822' of git://git.kernel.dk/linux-block: (31 commits)
  block/DAC960.c: make some arrays static const, shrinks object size
  blk-mq: sync the update nr_hw_queues with blk_mq_queue_tag_busy_iter
  blk-mq: init hctx sched after update ctx and hctx mapping
  block: remove duplicate initialization
  tracing/blktrace: Fix to allow setting same value
  pktcdvd: fix setting of 'ret' error return for a few cases
  block: change return type to bool
  block, bfq: return nbytes and not zero from struct cftype .write() method
  block, bfq: improve code of bfq_bfqq_charge_time
  block, bfq: reduce write overcharge
  block, bfq: always update the budget of an entity when needed
  block, bfq: readd missing reset of parent-entity service
  blk-wbt: fix IO hang in wbt_wait()
  block: don't warn for flush on read-only device
  bcache: add the missing comments for smp_mb()/smp_wmb()
  bcache: remove unnecessary space before ioctl function pointer arguments
  bcache: add missing SPDX header
  bcache: move open brace at end of function definitions to next line
  bcache: add static const prefix to char * array declarations
  bcache: fix code comments style
  ...
parents fe6f0ed0 1e7da865
...@@ -913,7 +913,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, ...@@ -913,7 +913,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
if (ret) if (ret)
return ret; return ret;
return bfq_io_set_weight_legacy(of_css(of), NULL, weight); ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
return ret ?: nbytes;
} }
#ifdef CONFIG_DEBUG_BLK_CGROUP #ifdef CONFIG_DEBUG_BLK_CGROUP
......
...@@ -187,11 +187,25 @@ static const int bfq_stats_min_budgets = 194; ...@@ -187,11 +187,25 @@ static const int bfq_stats_min_budgets = 194;
static const int bfq_default_max_budget = 16 * 1024; static const int bfq_default_max_budget = 16 * 1024;
/* /*
* Async to sync throughput distribution is controlled as follows: * When a sync request is dispatched, the queue that contains that
* when an async request is served, the entity is charged the number * request, and all the ancestor entities of that queue, are charged
* of sectors of the request, multiplied by the factor below * with the number of sectors of the request. In constrast, if the
* request is async, then the queue and its ancestor entities are
* charged with the number of sectors of the request, multiplied by
* the factor below. This throttles the bandwidth for async I/O,
* w.r.t. to sync I/O, and it is done to counter the tendency of async
* writes to steal I/O throughput to reads.
*
* The current value of this parameter is the result of a tuning with
* several hardware and software configurations. We tried to find the
* lowest value for which writes do not cause noticeable problems to
* reads. In fact, the lower this parameter, the stabler I/O control,
* in the following respect. The lower this parameter is, the less
* the bandwidth enjoyed by a group decreases
* - when the group does writes, w.r.t. to when it does reads;
* - when other groups do reads, w.r.t. to when they do writes.
*/ */
static const int bfq_async_charge_factor = 10; static const int bfq_async_charge_factor = 3;
/* Default timeout values, in jiffies, approximating CFQ defaults. */ /* Default timeout values, in jiffies, approximating CFQ defaults. */
const int bfq_timeout = HZ / 8; const int bfq_timeout = HZ / 8;
...@@ -853,16 +867,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq, ...@@ -853,16 +867,7 @@ static unsigned long bfq_serv_to_charge(struct request *rq,
if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1) if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1)
return blk_rq_sectors(rq); return blk_rq_sectors(rq);
/*
* If there are no weight-raised queues, then amplify service
* by just the async charge factor; otherwise amplify service
* by twice the async charge factor, to further reduce latency
* for weight-raised queues.
*/
if (bfqq->bfqd->wr_busy_queues == 0)
return blk_rq_sectors(rq) * bfq_async_charge_factor; return blk_rq_sectors(rq) * bfq_async_charge_factor;
return blk_rq_sectors(rq) * 2 * bfq_async_charge_factor;
} }
/** /**
...@@ -3298,6 +3303,27 @@ void bfq_bfqq_expire(struct bfq_data *bfqd, ...@@ -3298,6 +3303,27 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
*/ */
} else } else
entity->service = 0; entity->service = 0;
/*
* Reset the received-service counter for every parent entity.
* Differently from what happens with bfqq->entity.service,
* the resetting of this counter never needs to be postponed
* for parent entities. In fact, in case bfqq may have a
* chance to go on being served using the last, partially
* consumed budget, bfqq->entity.service needs to be kept,
* because if bfqq then actually goes on being served using
* the same budget, the last value of bfqq->entity.service is
* needed to properly decrement bfqq->entity.budget by the
* portion already consumed. In contrast, it is not necessary
* to keep entity->service for parent entities too, because
* the bubble up of the new value of bfqq->entity.budget will
* make sure that the budgets of parent entities are correct,
* even in case bfqq and thus parent entities go on receiving
* service with the same budget.
*/
entity = entity->parent;
for_each_entity(entity)
entity->service = 0;
} }
/* /*
......
...@@ -130,10 +130,14 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd, ...@@ -130,10 +130,14 @@ static bool bfq_update_next_in_service(struct bfq_sched_data *sd,
if (!change_without_lookup) /* lookup needed */ if (!change_without_lookup) /* lookup needed */
next_in_service = bfq_lookup_next_entity(sd, expiration); next_in_service = bfq_lookup_next_entity(sd, expiration);
if (next_in_service) if (next_in_service) {
parent_sched_may_change = !sd->next_in_service || bool new_budget_triggers_change =
bfq_update_parent_budget(next_in_service); bfq_update_parent_budget(next_in_service);
parent_sched_may_change = !sd->next_in_service ||
new_budget_triggers_change;
}
sd->next_in_service = next_in_service; sd->next_in_service = next_in_service;
if (!next_in_service) if (!next_in_service)
...@@ -877,15 +881,11 @@ void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq, ...@@ -877,15 +881,11 @@ void bfq_bfqq_charge_time(struct bfq_data *bfqd, struct bfq_queue *bfqq,
unsigned long time_ms) unsigned long time_ms)
{ {
struct bfq_entity *entity = &bfqq->entity; struct bfq_entity *entity = &bfqq->entity;
int tot_serv_to_charge = entity->service; unsigned long timeout_ms = jiffies_to_msecs(bfq_timeout);
unsigned int timeout_ms = jiffies_to_msecs(bfq_timeout); unsigned long bounded_time_ms = min(time_ms, timeout_ms);
int serv_to_charge_for_time =
if (time_ms > 0 && time_ms < timeout_ms) (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms;
tot_serv_to_charge = int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service);
(bfqd->bfq_max_budget * time_ms) / timeout_ms;
if (tot_serv_to_charge < entity->service)
tot_serv_to_charge = entity->service;
/* Increase budget to avoid inconsistencies */ /* Increase budget to avoid inconsistencies */
if (tot_serv_to_charge > entity->budget) if (tot_serv_to_charge > entity->budget)
......
...@@ -1036,7 +1036,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, ...@@ -1036,7 +1036,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
laptop_mode_timer_fn, 0); laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, NULL); INIT_WORK(&q->timeout_work, NULL);
INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list); INIT_LIST_HEAD(&q->timeout_list);
INIT_LIST_HEAD(&q->icq_list); INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
...@@ -2162,7 +2161,9 @@ static inline bool should_fail_request(struct hd_struct *part, ...@@ -2162,7 +2161,9 @@ static inline bool should_fail_request(struct hd_struct *part,
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part) static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
{ {
if (part->policy && op_is_write(bio_op(bio))) { const int op = bio_op(bio);
if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
WARN_ONCE(1, WARN_ONCE(1,
......
...@@ -462,50 +462,6 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q) ...@@ -462,50 +462,6 @@ static void blk_mq_sched_tags_teardown(struct request_queue *q)
blk_mq_sched_free_tags(set, hctx, i); blk_mq_sched_free_tags(set, hctx, i);
} }
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
struct elevator_queue *e = q->elevator;
int ret;
if (!e)
return 0;
ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
if (ret)
return ret;
if (e->type->ops.mq.init_hctx) {
ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
if (ret) {
blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
return ret;
}
}
blk_mq_debugfs_register_sched_hctx(q, hctx);
return 0;
}
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)
{
struct elevator_queue *e = q->elevator;
if (!e)
return;
blk_mq_debugfs_unregister_sched_hctx(hctx);
if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
e->type->ops.mq.exit_hctx(hctx, hctx_idx);
hctx->sched_data = NULL;
}
blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
}
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{ {
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
......
...@@ -28,11 +28,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx); ...@@ -28,11 +28,6 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx);
void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx);
static inline bool static inline bool
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{ {
......
...@@ -320,6 +320,18 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, ...@@ -320,6 +320,18 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
struct blk_mq_hw_ctx *hctx; struct blk_mq_hw_ctx *hctx;
int i; int i;
/*
* __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
* queue_hw_ctx after freeze the queue. So we could use q_usage_counter
* to avoid race with it. __blk_mq_update_nr_hw_queues will users
* synchronize_rcu to ensure all of the users go out of the critical
* section below and see zeroed q_usage_counter.
*/
rcu_read_lock();
if (percpu_ref_is_zero(&q->q_usage_counter)) {
rcu_read_unlock();
return;
}
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags; struct blk_mq_tags *tags = hctx->tags;
...@@ -335,7 +347,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, ...@@ -335,7 +347,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
} }
rcu_read_unlock();
} }
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
......
...@@ -2145,8 +2145,6 @@ static void blk_mq_exit_hctx(struct request_queue *q, ...@@ -2145,8 +2145,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
if (set->ops->exit_request) if (set->ops->exit_request)
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
if (set->ops->exit_hctx) if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx); set->ops->exit_hctx(hctx, hctx_idx);
...@@ -2214,12 +2212,9 @@ static int blk_mq_init_hctx(struct request_queue *q, ...@@ -2214,12 +2212,9 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap; goto free_bitmap;
if (blk_mq_sched_init_hctx(q, hctx, hctx_idx))
goto exit_hctx;
hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size); hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
if (!hctx->fq) if (!hctx->fq)
goto sched_exit_hctx; goto exit_hctx;
if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node)) if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, node))
goto free_fq; goto free_fq;
...@@ -2233,8 +2228,6 @@ static int blk_mq_init_hctx(struct request_queue *q, ...@@ -2233,8 +2228,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
free_fq: free_fq:
kfree(hctx->fq); kfree(hctx->fq);
sched_exit_hctx:
blk_mq_sched_exit_hctx(q, hctx, hctx_idx);
exit_hctx: exit_hctx:
if (set->ops->exit_hctx) if (set->ops->exit_hctx)
set->ops->exit_hctx(hctx, hctx_idx); set->ops->exit_hctx(hctx, hctx_idx);
...@@ -2896,10 +2889,81 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) ...@@ -2896,10 +2889,81 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
return ret; return ret;
} }
/*
* request_queue and elevator_type pair.
* It is just used by __blk_mq_update_nr_hw_queues to cache
* the elevator_type associated with a request_queue.
*/
struct blk_mq_qe_pair {
struct list_head node;
struct request_queue *q;
struct elevator_type *type;
};
/*
* Cache the elevator_type in qe pair list and switch the
* io scheduler to 'none'
*/
static bool blk_mq_elv_switch_none(struct list_head *head,
struct request_queue *q)
{
struct blk_mq_qe_pair *qe;
if (!q->elevator)
return true;
qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
if (!qe)
return false;
INIT_LIST_HEAD(&qe->node);
qe->q = q;
qe->type = q->elevator->type;
list_add(&qe->node, head);
mutex_lock(&q->sysfs_lock);
/*
* After elevator_switch_mq, the previous elevator_queue will be
* released by elevator_release. The reference of the io scheduler
* module get by elevator_get will also be put. So we need to get
* a reference of the io scheduler module here to prevent it to be
* removed.
*/
__module_get(qe->type->elevator_owner);
elevator_switch_mq(q, NULL);
mutex_unlock(&q->sysfs_lock);
return true;
}
static void blk_mq_elv_switch_back(struct list_head *head,
struct request_queue *q)
{
struct blk_mq_qe_pair *qe;
struct elevator_type *t = NULL;
list_for_each_entry(qe, head, node)
if (qe->q == q) {
t = qe->type;
break;
}
if (!t)
return;
list_del(&qe->node);
kfree(qe);
mutex_lock(&q->sysfs_lock);
elevator_switch_mq(q, t);
mutex_unlock(&q->sysfs_lock);
}
static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
int nr_hw_queues) int nr_hw_queues)
{ {
struct request_queue *q; struct request_queue *q;
LIST_HEAD(head);
lockdep_assert_held(&set->tag_list_lock); lockdep_assert_held(&set->tag_list_lock);
...@@ -2910,6 +2974,18 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, ...@@ -2910,6 +2974,18 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_freeze_queue(q); blk_mq_freeze_queue(q);
/*
* Sync with blk_mq_queue_tag_busy_iter.
*/
synchronize_rcu();
/*
* Switch IO scheduler to 'none', cleaning up the data associated
* with the previous scheduler. We will switch back once we are done
* updating the new sw to hw queue mappings.
*/
list_for_each_entry(q, &set->tag_list, tag_set_list)
if (!blk_mq_elv_switch_none(&head, q))
goto switch_back;
set->nr_hw_queues = nr_hw_queues; set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set); blk_mq_update_queue_map(set);
...@@ -2918,6 +2994,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, ...@@ -2918,6 +2994,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_queue_reinit(q); blk_mq_queue_reinit(q);
} }
switch_back:
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_elv_switch_back(&head, q);
list_for_each_entry(q, &set->tag_list, tag_set_list) list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q); blk_mq_unfreeze_queue(q);
} }
......
...@@ -576,12 +576,8 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock) ...@@ -576,12 +576,8 @@ static void wbt_wait(struct rq_qos *rqos, struct bio *bio, spinlock_t *lock)
struct rq_wb *rwb = RQWB(rqos); struct rq_wb *rwb = RQWB(rqos);
enum wbt_flags flags; enum wbt_flags flags;
if (!rwb_enabled(rwb))
return;
flags = bio_to_wbt_flags(rwb, bio); flags = bio_to_wbt_flags(rwb, bio);
if (!(flags & WBT_TRACKED)) {
if (!wbt_should_throttle(rwb, bio)) {
if (flags & WBT_READ) if (flags & WBT_READ)
wb_timestamp(rwb, &rwb->last_issue); wb_timestamp(rwb, &rwb->last_issue);
return; return;
......
...@@ -234,6 +234,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq ...@@ -234,6 +234,8 @@ static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq
int elevator_init(struct request_queue *); int elevator_init(struct request_queue *);
int elevator_init_mq(struct request_queue *q); int elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
void elevator_exit(struct request_queue *, struct elevator_queue *); void elevator_exit(struct request_queue *, struct elevator_queue *);
int elv_register_queue(struct request_queue *q); int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q); void elv_unregister_queue(struct request_queue *q);
...@@ -297,7 +299,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int); ...@@ -297,7 +299,7 @@ extern int blk_update_nr_requests(struct request_queue *, unsigned int);
* b) the queue had IO stats enabled when this request was started, and * b) the queue had IO stats enabled when this request was started, and
* c) it's a file system request * c) it's a file system request
*/ */
static inline int blk_do_io_stat(struct request *rq) static inline bool blk_do_io_stat(struct request *rq)
{ {
return rq->rq_disk && return rq->rq_disk &&
(rq->rq_flags & RQF_IO_STAT) && (rq->rq_flags & RQF_IO_STAT) &&
......
...@@ -933,16 +933,13 @@ void elv_unregister(struct elevator_type *e) ...@@ -933,16 +933,13 @@ void elv_unregister(struct elevator_type *e)
} }
EXPORT_SYMBOL_GPL(elv_unregister); EXPORT_SYMBOL_GPL(elv_unregister);
static int elevator_switch_mq(struct request_queue *q, int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e) struct elevator_type *new_e)
{ {
int ret; int ret;
lockdep_assert_held(&q->sysfs_lock); lockdep_assert_held(&q->sysfs_lock);
blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
if (q->elevator) { if (q->elevator) {
if (q->elevator->registered) if (q->elevator->registered)
elv_unregister_queue(q); elv_unregister_queue(q);
...@@ -968,8 +965,6 @@ static int elevator_switch_mq(struct request_queue *q, ...@@ -968,8 +965,6 @@ static int elevator_switch_mq(struct request_queue *q,
blk_add_trace_msg(q, "elv switch: none"); blk_add_trace_msg(q, "elv switch: none");
out: out:
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return ret; return ret;
} }
...@@ -1021,8 +1016,17 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) ...@@ -1021,8 +1016,17 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
lockdep_assert_held(&q->sysfs_lock); lockdep_assert_held(&q->sysfs_lock);
if (q->mq_ops) if (q->mq_ops) {
return elevator_switch_mq(q, new_e); blk_mq_freeze_queue(q);
blk_mq_quiesce_queue(q);
err = elevator_switch_mq(q, new_e);
blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
return err;
}
/* /*
* Turn on BYPASS and drain all requests w/ elevator private data. * Turn on BYPASS and drain all requests w/ elevator private data.
......
...@@ -2428,16 +2428,20 @@ static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T ...@@ -2428,16 +2428,20 @@ static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
{ {
DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo = DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
Controller->V2.LogicalDeviceInformation[LogicalDriveNumber]; Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
unsigned char *ReadCacheStatus[] = { "Read Cache Disabled", static const unsigned char *ReadCacheStatus[] = {
"Read Cache Disabled",
"Read Cache Enabled", "Read Cache Enabled",
"Read Ahead Enabled", "Read Ahead Enabled",
"Intelligent Read Ahead Enabled", "Intelligent Read Ahead Enabled",
"-", "-", "-", "-" }; "-", "-", "-", "-"
unsigned char *WriteCacheStatus[] = { "Write Cache Disabled", };
static const unsigned char *WriteCacheStatus[] = {
"Write Cache Disabled",
"Logical Device Read Only", "Logical Device Read Only",
"Write Cache Enabled", "Write Cache Enabled",
"Intelligent Write Cache Enabled", "Intelligent Write Cache Enabled",
"-", "-", "-", "-" }; "-", "-", "-", "-"
};
unsigned char *GeometryTranslation; unsigned char *GeometryTranslation;
if (LogicalDeviceInfo == NULL) continue; if (LogicalDeviceInfo == NULL) continue;
switch (LogicalDeviceInfo->DriveGeometry) switch (LogicalDeviceInfo->DriveGeometry)
...@@ -4339,14 +4343,16 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command) ...@@ -4339,14 +4343,16 @@ static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command) static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
{ {
DAC960_Controller_T *Controller = Command->Controller; DAC960_Controller_T *Controller = Command->Controller;
unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR", static const unsigned char *SenseErrors[] = {
"NO SENSE", "RECOVERED ERROR",
"NOT READY", "MEDIUM ERROR", "NOT READY", "MEDIUM ERROR",
"HARDWARE ERROR", "ILLEGAL REQUEST", "HARDWARE ERROR", "ILLEGAL REQUEST",
"UNIT ATTENTION", "DATA PROTECT", "UNIT ATTENTION", "DATA PROTECT",
"BLANK CHECK", "VENDOR-SPECIFIC", "BLANK CHECK", "VENDOR-SPECIFIC",
"COPY ABORTED", "ABORTED COMMAND", "COPY ABORTED", "ABORTED COMMAND",
"EQUAL", "VOLUME OVERFLOW", "EQUAL", "VOLUME OVERFLOW",
"MISCOMPARE", "RESERVED" }; "MISCOMPARE", "RESERVED"
};
unsigned char *CommandName = "UNKNOWN"; unsigned char *CommandName = "UNKNOWN";
switch (Command->CommandType) switch (Command->CommandType)
{ {
......
...@@ -2740,6 +2740,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) ...@@ -2740,6 +2740,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
pd->write_congestion_on = write_congestion_on; pd->write_congestion_on = write_congestion_on;
pd->write_congestion_off = write_congestion_off; pd->write_congestion_off = write_congestion_off;
ret = -ENOMEM;
disk = alloc_disk(1); disk = alloc_disk(1);
if (!disk) if (!disk)
goto out_mem; goto out_mem;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
config BCACHE config BCACHE
tristate "Block device as cache" tristate "Block device as cache"
select CRC64 select CRC64
---help--- help
Allows a block device to be used as cache for other devices; uses Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs. a btree for indexing and the layout is optimized for SSDs.
...@@ -11,7 +11,7 @@ config BCACHE ...@@ -11,7 +11,7 @@ config BCACHE
config BCACHE_DEBUG config BCACHE_DEBUG
bool "Bcache debugging" bool "Bcache debugging"
depends on BCACHE depends on BCACHE
---help--- help
Don't select this option unless you're a developer Don't select this option unless you're a developer
Enables extra debugging tools, allows expensive runtime checks to be Enables extra debugging tools, allows expensive runtime checks to be
...@@ -21,7 +21,7 @@ config BCACHE_CLOSURES_DEBUG ...@@ -21,7 +21,7 @@ config BCACHE_CLOSURES_DEBUG
bool "Debug closures" bool "Debug closures"
depends on BCACHE depends on BCACHE
select DEBUG_FS select DEBUG_FS
---help--- help
Keeps all active closures in a linked list and provides a debugfs Keeps all active closures in a linked list and provides a debugfs
interface to list them, which makes it possible to see asynchronous interface to list them, which makes it possible to see asynchronous
operations that get stuck. operations that get stuck.
...@@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors) ...@@ -87,8 +87,8 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
{ {
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned next = c->nbuckets * c->sb.bucket_size / 1024; unsigned int next = c->nbuckets * c->sb.bucket_size / 1024;
unsigned i; unsigned int i;
int r; int r;
atomic_sub(sectors, &c->rescale); atomic_sub(sectors, &c->rescale);
...@@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) ...@@ -169,7 +169,7 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
#define bucket_prio(b) \ #define bucket_prio(b) \
({ \ ({ \
unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \ unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
\ \
(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \ (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
}) })
...@@ -244,6 +244,7 @@ static void invalidate_buckets_random(struct cache *ca) ...@@ -244,6 +244,7 @@ static void invalidate_buckets_random(struct cache *ca)
while (!fifo_full(&ca->free_inc)) { while (!fifo_full(&ca->free_inc)) {
size_t n; size_t n;
get_random_bytes(&n, sizeof(n)); get_random_bytes(&n, sizeof(n));
n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
...@@ -301,7 +302,7 @@ do { \ ...@@ -301,7 +302,7 @@ do { \
static int bch_allocator_push(struct cache *ca, long bucket) static int bch_allocator_push(struct cache *ca, long bucket)
{ {
unsigned i; unsigned int i;
/* Prios/gens are actually the most important reserve */ /* Prios/gens are actually the most important reserve */
if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
...@@ -385,7 +386,7 @@ static int bch_allocator_thread(void *arg) ...@@ -385,7 +386,7 @@ static int bch_allocator_thread(void *arg)
/* Allocation */ /* Allocation */
long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
{ {
DEFINE_WAIT(w); DEFINE_WAIT(w);
struct bucket *b; struct bucket *b;
...@@ -421,7 +422,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait) ...@@ -421,7 +422,7 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
if (expensive_debug_checks(ca->set)) { if (expensive_debug_checks(ca->set)) {
size_t iter; size_t iter;
long i; long i;
unsigned j; unsigned int j;
for (iter = 0; iter < prio_buckets(ca) * 2; iter++) for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
...@@ -470,14 +471,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b) ...@@ -470,14 +471,14 @@ void __bch_bucket_free(struct cache *ca, struct bucket *b)
void bch_bucket_free(struct cache_set *c, struct bkey *k) void bch_bucket_free(struct cache_set *c, struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
__bch_bucket_free(PTR_CACHE(c, k, i), __bch_bucket_free(PTR_CACHE(c, k, i),
PTR_BUCKET(c, k, i)); PTR_BUCKET(c, k, i));
} }
int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait) struct bkey *k, int n, bool wait)
{ {
int i; int i;
...@@ -510,10 +511,11 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, ...@@ -510,10 +511,11 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
return -1; return -1;
} }
int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, int bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
struct bkey *k, int n, bool wait) struct bkey *k, int n, bool wait)
{ {
int ret; int ret;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
ret = __bch_bucket_alloc_set(c, reserve, k, n, wait); ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
mutex_unlock(&c->bucket_lock); mutex_unlock(&c->bucket_lock);
...@@ -524,8 +526,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve, ...@@ -524,8 +526,8 @@ int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
struct open_bucket { struct open_bucket {
struct list_head list; struct list_head list;
unsigned last_write_point; unsigned int last_write_point;
unsigned sectors_free; unsigned int sectors_free;
BKEY_PADDED(key); BKEY_PADDED(key);
}; };
...@@ -556,7 +558,7 @@ struct open_bucket { ...@@ -556,7 +558,7 @@ struct open_bucket {
*/ */
static struct open_bucket *pick_data_bucket(struct cache_set *c, static struct open_bucket *pick_data_bucket(struct cache_set *c,
const struct bkey *search, const struct bkey *search,
unsigned write_point, unsigned int write_point,
struct bkey *alloc) struct bkey *alloc)
{ {
struct open_bucket *ret, *ret_task = NULL; struct open_bucket *ret, *ret_task = NULL;
...@@ -595,12 +597,16 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c, ...@@ -595,12 +597,16 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
* *
* If s->writeback is true, will not fail. * If s->writeback is true, will not fail.
*/ */
bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, bool bch_alloc_sectors(struct cache_set *c,
unsigned write_point, unsigned write_prio, bool wait) struct bkey *k,
unsigned int sectors,
unsigned int write_point,
unsigned int write_prio,
bool wait)
{ {
struct open_bucket *b; struct open_bucket *b;
BKEY_PADDED(key) alloc; BKEY_PADDED(key) alloc;
unsigned i; unsigned int i;
/* /*
* We might have to allocate a new bucket, which we can't do with a * We might have to allocate a new bucket, which we can't do with a
...@@ -613,7 +619,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors, ...@@ -613,7 +619,7 @@ bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
spin_lock(&c->data_bucket_lock); spin_lock(&c->data_bucket_lock);
while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) { while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
unsigned watermark = write_prio unsigned int watermark = write_prio
? RESERVE_MOVINGGC ? RESERVE_MOVINGGC
: RESERVE_NONE; : RESERVE_NONE;
...@@ -702,6 +708,7 @@ int bch_open_buckets_alloc(struct cache_set *c) ...@@ -702,6 +708,7 @@ int bch_open_buckets_alloc(struct cache_set *c)
for (i = 0; i < MAX_OPEN_BUCKETS; i++) { for (i = 0; i < MAX_OPEN_BUCKETS; i++) {
struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL); struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
if (!b) if (!b)
return -ENOMEM; return -ENOMEM;
......
This diff is collapsed.
This diff is collapsed.
...@@ -163,10 +163,10 @@ struct bset_tree { ...@@ -163,10 +163,10 @@ struct bset_tree {
*/ */
/* size of the binary tree and prev array */ /* size of the binary tree and prev array */
unsigned size; unsigned int size;
/* function of size - precalculated for to_inorder() */ /* function of size - precalculated for to_inorder() */
unsigned extra; unsigned int extra;
/* copy of the last key in the set */ /* copy of the last key in the set */
struct bkey end; struct bkey end;
...@@ -187,18 +187,25 @@ struct bset_tree { ...@@ -187,18 +187,25 @@ struct bset_tree {
}; };
struct btree_keys_ops { struct btree_keys_ops {
bool (*sort_cmp)(struct btree_iter_set, bool (*sort_cmp)(struct btree_iter_set l,
struct btree_iter_set); struct btree_iter_set r);
struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *); struct bkey *(*sort_fixup)(struct btree_iter *iter,
bool (*insert_fixup)(struct btree_keys *, struct bkey *, struct bkey *tmp);
struct btree_iter *, struct bkey *); bool (*insert_fixup)(struct btree_keys *b,
bool (*key_invalid)(struct btree_keys *, struct bkey *insert,
const struct bkey *); struct btree_iter *iter,
bool (*key_bad)(struct btree_keys *, const struct bkey *); struct bkey *replace_key);
bool (*key_merge)(struct btree_keys *, bool (*key_invalid)(struct btree_keys *bk,
struct bkey *, struct bkey *); const struct bkey *k);
void (*key_to_text)(char *, size_t, const struct bkey *); bool (*key_bad)(struct btree_keys *bk,
void (*key_dump)(struct btree_keys *, const struct bkey *); const struct bkey *k);
bool (*key_merge)(struct btree_keys *bk,
struct bkey *l, struct bkey *r);
void (*key_to_text)(char *buf,
size_t size,
const struct bkey *k);
void (*key_dump)(struct btree_keys *keys,
const struct bkey *k);
/* /*
* Only used for deciding whether to use START_KEY(k) or just the key * Only used for deciding whether to use START_KEY(k) or just the key
...@@ -211,7 +218,7 @@ struct btree_keys { ...@@ -211,7 +218,7 @@ struct btree_keys {
const struct btree_keys_ops *ops; const struct btree_keys_ops *ops;
uint8_t page_order; uint8_t page_order;
uint8_t nsets; uint8_t nsets;
unsigned last_set_unwritten:1; unsigned int last_set_unwritten:1;
bool *expensive_debug_checks; bool *expensive_debug_checks;
/* /*
...@@ -239,12 +246,14 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k) ...@@ -239,12 +246,14 @@ static inline bool bkey_written(struct btree_keys *b, struct bkey *k)
return !b->last_set_unwritten || k < b->set[b->nsets].data->start; return !b->last_set_unwritten || k < b->set[b->nsets].data->start;
} }
static inline unsigned bset_byte_offset(struct btree_keys *b, struct bset *i) static inline unsigned int bset_byte_offset(struct btree_keys *b,
struct bset *i)
{ {
return ((size_t) i) - ((size_t) b->set->data); return ((size_t) i) - ((size_t) b->set->data);
} }
static inline unsigned bset_sector_offset(struct btree_keys *b, struct bset *i) static inline unsigned int bset_sector_offset(struct btree_keys *b,
struct bset *i)
{ {
return bset_byte_offset(b, i) >> 9; return bset_byte_offset(b, i) >> 9;
} }
...@@ -273,25 +282,27 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b) ...@@ -273,25 +282,27 @@ static inline size_t bch_btree_keys_u64s_remaining(struct btree_keys *b)
} }
static inline struct bset *bset_next_set(struct btree_keys *b, static inline struct bset *bset_next_set(struct btree_keys *b,
unsigned block_bytes) unsigned int block_bytes)
{ {
struct bset *i = bset_tree_last(b)->data; struct bset *i = bset_tree_last(b)->data;
return ((void *) i) + roundup(set_bytes(i), block_bytes); return ((void *) i) + roundup(set_bytes(i), block_bytes);
} }
void bch_btree_keys_free(struct btree_keys *); void bch_btree_keys_free(struct btree_keys *b);
int bch_btree_keys_alloc(struct btree_keys *, unsigned, gfp_t); int bch_btree_keys_alloc(struct btree_keys *b, unsigned int page_order,
void bch_btree_keys_init(struct btree_keys *, const struct btree_keys_ops *, gfp_t gfp);
bool *); void bch_btree_keys_init(struct btree_keys *b, const struct btree_keys_ops *ops,
bool *expensive_debug_checks);
void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
void bch_bset_build_written_tree(struct btree_keys *); void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic);
void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *); void bch_bset_build_written_tree(struct btree_keys *b);
bool bch_bkey_try_merge(struct btree_keys *, struct bkey *, struct bkey *); void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k);
void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *); bool bch_bkey_try_merge(struct btree_keys *b, struct bkey *l, struct bkey *r);
unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *, void bch_bset_insert(struct btree_keys *b, struct bkey *where,
struct bkey *); struct bkey *insert);
unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bkey *replace_key);
enum { enum {
BTREE_INSERT_STATUS_NO_INSERT = 0, BTREE_INSERT_STATUS_NO_INSERT = 0,
...@@ -313,18 +324,21 @@ struct btree_iter { ...@@ -313,18 +324,21 @@ struct btree_iter {
} data[MAX_BSETS]; } data[MAX_BSETS];
}; };
typedef bool (*ptr_filter_fn)(struct btree_keys *, const struct bkey *); typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
struct bkey *bch_btree_iter_next(struct btree_iter *); struct bkey *bch_btree_iter_next(struct btree_iter *iter);
struct bkey *bch_btree_iter_next_filter(struct btree_iter *, struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
struct btree_keys *, ptr_filter_fn); struct btree_keys *b,
ptr_filter_fn fn);
void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *, struct bkey *end);
struct bkey *); struct bkey *bch_btree_iter_init(struct btree_keys *b,
struct btree_iter *iter,
struct bkey *search);
struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *, struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
const struct bkey *); const struct bkey *search);
/* /*
* Returns the first key that is strictly greater than search * Returns the first key that is strictly greater than search
...@@ -349,21 +363,23 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b, ...@@ -349,21 +363,23 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
struct bset_sort_state { struct bset_sort_state {
mempool_t pool; mempool_t pool;
unsigned page_order; unsigned int page_order;
unsigned crit_factor; unsigned int crit_factor;
struct time_stats time; struct time_stats time;
}; };
void bch_bset_sort_state_free(struct bset_sort_state *); void bch_bset_sort_state_free(struct bset_sort_state *state);
int bch_bset_sort_state_init(struct bset_sort_state *, unsigned); int bch_bset_sort_state_init(struct bset_sort_state *state,
void bch_btree_sort_lazy(struct btree_keys *, struct bset_sort_state *); unsigned int page_order);
void bch_btree_sort_into(struct btree_keys *, struct btree_keys *, void bch_btree_sort_lazy(struct btree_keys *b, struct bset_sort_state *state);
struct bset_sort_state *); void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
void bch_btree_sort_and_fix_extents(struct btree_keys *, struct btree_iter *, struct bset_sort_state *state);
struct bset_sort_state *); void bch_btree_sort_and_fix_extents(struct btree_keys *b,
void bch_btree_sort_partial(struct btree_keys *, unsigned, struct btree_iter *iter,
struct bset_sort_state *); struct bset_sort_state *state);
void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
struct bset_sort_state *state);
static inline void bch_btree_sort(struct btree_keys *b, static inline void bch_btree_sort(struct btree_keys *b,
struct bset_sort_state *state) struct bset_sort_state *state)
...@@ -377,13 +393,13 @@ struct bset_stats { ...@@ -377,13 +393,13 @@ struct bset_stats {
size_t floats, failed; size_t floats, failed;
}; };
void bch_btree_keys_stats(struct btree_keys *, struct bset_stats *); void bch_btree_keys_stats(struct btree_keys *b, struct bset_stats *state);
/* Bkey utility code */ /* Bkey utility code */
#define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys) #define bset_bkey_last(i) bkey_idx((struct bkey *) (i)->d, (i)->keys)
static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned idx) static inline struct bkey *bset_bkey_idx(struct bset *i, unsigned int idx)
{ {
return bkey_idx(i->start, idx); return bkey_idx(i->start, idx);
} }
...@@ -401,10 +417,10 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l, ...@@ -401,10 +417,10 @@ static __always_inline int64_t bkey_cmp(const struct bkey *l,
: (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r); : (int64_t) KEY_OFFSET(l) - (int64_t) KEY_OFFSET(r);
} }
void bch_bkey_copy_single_ptr(struct bkey *, const struct bkey *, void bch_bkey_copy_single_ptr(struct bkey *dest, const struct bkey *src,
unsigned); unsigned int i);
bool __bch_cut_front(const struct bkey *, struct bkey *); bool __bch_cut_front(const struct bkey *where, struct bkey *k);
bool __bch_cut_back(const struct bkey *, struct bkey *); bool __bch_cut_back(const struct bkey *where, struct bkey *k);
static inline bool bch_cut_front(const struct bkey *where, struct bkey *k) static inline bool bch_cut_front(const struct bkey *where, struct bkey *k)
{ {
...@@ -522,18 +538,20 @@ static inline size_t bch_keylist_bytes(struct keylist *l) ...@@ -522,18 +538,20 @@ static inline size_t bch_keylist_bytes(struct keylist *l)
return bch_keylist_nkeys(l) * sizeof(uint64_t); return bch_keylist_nkeys(l) * sizeof(uint64_t);
} }
struct bkey *bch_keylist_pop(struct keylist *); struct bkey *bch_keylist_pop(struct keylist *l);
void bch_keylist_pop_front(struct keylist *); void bch_keylist_pop_front(struct keylist *l);
int __bch_keylist_realloc(struct keylist *, unsigned); int __bch_keylist_realloc(struct keylist *l, unsigned int u64s);
/* Debug stuff */ /* Debug stuff */
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
int __bch_count_data(struct btree_keys *); int __bch_count_data(struct btree_keys *b);
void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...); void __printf(2, 3) __bch_check_keys(struct btree_keys *b,
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); const char *fmt,
void bch_dump_bucket(struct btree_keys *); ...);
void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
void bch_dump_bucket(struct btree_keys *b);
#else #else
...@@ -541,7 +559,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; } ...@@ -541,7 +559,7 @@ static inline int __bch_count_data(struct btree_keys *b) { return -1; }
static inline void __printf(2, 3) static inline void __printf(2, 3)
__bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {}
static inline void bch_dump_bucket(struct btree_keys *b) {} static inline void bch_dump_bucket(struct btree_keys *b) {}
void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned int set);
#endif #endif
......
...@@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b) ...@@ -183,7 +183,7 @@ static void bch_btree_init_next(struct btree *b)
void bkey_put(struct cache_set *c, struct bkey *k) void bkey_put(struct cache_set *c, struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) if (ptr_available(c, k, i))
...@@ -287,6 +287,7 @@ void bch_btree_node_read_done(struct btree *b) ...@@ -287,6 +287,7 @@ void bch_btree_node_read_done(struct btree *b)
static void btree_node_read_endio(struct bio *bio) static void btree_node_read_endio(struct bio *bio)
{ {
struct closure *cl = bio->bi_private; struct closure *cl = bio->bi_private;
closure_put(cl); closure_put(cl);
} }
...@@ -435,7 +436,10 @@ static void do_btree_node_write(struct btree *b) ...@@ -435,7 +436,10 @@ static void do_btree_node_write(struct btree *b)
continue_at(cl, btree_node_write_done, NULL); continue_at(cl, btree_node_write_done, NULL);
} else { } else {
/* No problem for multipage bvec since the bio is just allocated */ /*
* No problem for multipage bvec since the bio is
* just allocated
*/
b->bio->bi_vcnt = 0; b->bio->bi_vcnt = 0;
bch_bio_map(b->bio, i); bch_bio_map(b->bio, i);
...@@ -479,7 +483,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent) ...@@ -479,7 +483,7 @@ void __bch_btree_node_write(struct btree *b, struct closure *parent)
void bch_btree_node_write(struct btree *b, struct closure *parent) void bch_btree_node_write(struct btree *b, struct closure *parent)
{ {
unsigned nsets = b->keys.nsets; unsigned int nsets = b->keys.nsets;
lockdep_assert_held(&b->lock); lockdep_assert_held(&b->lock);
...@@ -581,7 +585,7 @@ static void mca_bucket_free(struct btree *b) ...@@ -581,7 +585,7 @@ static void mca_bucket_free(struct btree *b)
list_move(&b->list, &b->c->btree_cache_freeable); list_move(&b->list, &b->c->btree_cache_freeable);
} }
static unsigned btree_order(struct bkey *k) static unsigned int btree_order(struct bkey *k)
{ {
return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1); return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
} }
...@@ -589,7 +593,7 @@ static unsigned btree_order(struct bkey *k) ...@@ -589,7 +593,7 @@ static unsigned btree_order(struct bkey *k)
static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp) static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
{ {
if (!bch_btree_keys_alloc(&b->keys, if (!bch_btree_keys_alloc(&b->keys,
max_t(unsigned, max_t(unsigned int,
ilog2(b->c->btree_pages), ilog2(b->c->btree_pages),
btree_order(k)), btree_order(k)),
gfp)) { gfp)) {
...@@ -604,6 +608,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, ...@@ -604,6 +608,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
struct bkey *k, gfp_t gfp) struct bkey *k, gfp_t gfp)
{ {
struct btree *b = kzalloc(sizeof(struct btree), gfp); struct btree *b = kzalloc(sizeof(struct btree), gfp);
if (!b) if (!b)
return NULL; return NULL;
...@@ -620,7 +625,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c, ...@@ -620,7 +625,7 @@ static struct btree *mca_bucket_alloc(struct cache_set *c,
return b; return b;
} }
static int mca_reap(struct btree *b, unsigned min_order, bool flush) static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
{ {
struct closure cl; struct closure cl;
...@@ -746,6 +751,7 @@ void bch_btree_cache_free(struct cache_set *c) ...@@ -746,6 +751,7 @@ void bch_btree_cache_free(struct cache_set *c)
{ {
struct btree *b; struct btree *b;
struct closure cl; struct closure cl;
closure_init_stack(&cl); closure_init_stack(&cl);
if (c->shrink.list.next) if (c->shrink.list.next)
...@@ -786,7 +792,7 @@ void bch_btree_cache_free(struct cache_set *c) ...@@ -786,7 +792,7 @@ void bch_btree_cache_free(struct cache_set *c)
int bch_btree_cache_alloc(struct cache_set *c) int bch_btree_cache_alloc(struct cache_set *c)
{ {
unsigned i; unsigned int i;
for (i = 0; i < mca_reserve(c); i++) for (i = 0; i < mca_reserve(c); i++)
if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
...@@ -1124,6 +1130,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b, ...@@ -1124,6 +1130,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
struct btree_op *op) struct btree_op *op)
{ {
struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
if (!IS_ERR_OR_NULL(n)) { if (!IS_ERR_OR_NULL(n)) {
mutex_lock(&n->write_lock); mutex_lock(&n->write_lock);
bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
...@@ -1136,7 +1143,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b, ...@@ -1136,7 +1143,7 @@ static struct btree *btree_node_alloc_replacement(struct btree *b,
static void make_btree_freeing_key(struct btree *b, struct bkey *k) static void make_btree_freeing_key(struct btree *b, struct bkey *k)
{ {
unsigned i; unsigned int i;
mutex_lock(&b->c->bucket_lock); mutex_lock(&b->c->bucket_lock);
...@@ -1157,7 +1164,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op) ...@@ -1157,7 +1164,7 @@ static int btree_check_reserve(struct btree *b, struct btree_op *op)
{ {
struct cache_set *c = b->c; struct cache_set *c = b->c;
struct cache *ca; struct cache *ca;
unsigned i, reserve = (c->root->level - b->level) * 2 + 1; unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
...@@ -1181,7 +1188,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, ...@@ -1181,7 +1188,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
struct bkey *k) struct bkey *k)
{ {
uint8_t stale = 0; uint8_t stale = 0;
unsigned i; unsigned int i;
struct bucket *g; struct bucket *g;
/* /*
...@@ -1219,7 +1226,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, ...@@ -1219,7 +1226,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
SET_GC_MARK(g, GC_MARK_RECLAIMABLE); SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
/* guard against overflow */ /* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned, SET_GC_SECTORS_USED(g, min_t(unsigned int,
GC_SECTORS_USED(g) + KEY_SIZE(k), GC_SECTORS_USED(g) + KEY_SIZE(k),
MAX_GC_SECTORS_USED)); MAX_GC_SECTORS_USED));
...@@ -1233,7 +1240,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, ...@@ -1233,7 +1240,7 @@ static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i) && if (ptr_available(c, k, i) &&
...@@ -1259,7 +1266,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) ...@@ -1259,7 +1266,7 @@ void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
{ {
uint8_t stale = 0; uint8_t stale = 0;
unsigned keys = 0, good_keys = 0; unsigned int keys = 0, good_keys = 0;
struct bkey *k; struct bkey *k;
struct btree_iter iter; struct btree_iter iter;
struct bset_tree *t; struct bset_tree *t;
...@@ -1302,16 +1309,18 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc) ...@@ -1302,16 +1309,18 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
struct gc_merge_info { struct gc_merge_info {
struct btree *b; struct btree *b;
unsigned keys; unsigned int keys;
}; };
static int bch_btree_insert_node(struct btree *, struct btree_op *, static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
struct keylist *, atomic_t *, struct bkey *); struct keylist *insert_keys,
atomic_t *journal_ref,
struct bkey *replace_key);
static int btree_gc_coalesce(struct btree *b, struct btree_op *op, static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
struct gc_stat *gc, struct gc_merge_info *r) struct gc_stat *gc, struct gc_merge_info *r)
{ {
unsigned i, nodes = 0, keys = 0, blocks; unsigned int i, nodes = 0, keys = 0, blocks;
struct btree *new_nodes[GC_MERGE_NODES]; struct btree *new_nodes[GC_MERGE_NODES];
struct keylist keylist; struct keylist keylist;
struct closure cl; struct closure cl;
...@@ -1511,11 +1520,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op, ...@@ -1511,11 +1520,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
return -EINTR; return -EINTR;
} }
static unsigned btree_gc_count_keys(struct btree *b) static unsigned int btree_gc_count_keys(struct btree *b)
{ {
struct bkey *k; struct bkey *k;
struct btree_iter iter; struct btree_iter iter;
unsigned ret = 0; unsigned int ret = 0;
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad) for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
ret += bkey_u64s(k); ret += bkey_u64s(k);
...@@ -1678,7 +1687,7 @@ static void btree_gc_start(struct cache_set *c) ...@@ -1678,7 +1687,7 @@ static void btree_gc_start(struct cache_set *c)
{ {
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned i; unsigned int i;
if (!c->gc_mark_valid) if (!c->gc_mark_valid)
return; return;
...@@ -1704,7 +1713,7 @@ static void bch_btree_gc_finish(struct cache_set *c) ...@@ -1704,7 +1713,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
{ {
struct bucket *b; struct bucket *b;
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
...@@ -1722,7 +1731,7 @@ static void bch_btree_gc_finish(struct cache_set *c) ...@@ -1722,7 +1731,7 @@ static void bch_btree_gc_finish(struct cache_set *c)
struct bcache_device *d = c->devices[i]; struct bcache_device *d = c->devices[i];
struct cached_dev *dc; struct cached_dev *dc;
struct keybuf_key *w, *n; struct keybuf_key *w, *n;
unsigned j; unsigned int j;
if (!d || UUID_FLASH_ONLY(&c->uuids[i])) if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
continue; continue;
...@@ -1814,7 +1823,7 @@ static void bch_btree_gc(struct cache_set *c) ...@@ -1814,7 +1823,7 @@ static void bch_btree_gc(struct cache_set *c)
static bool gc_should_run(struct cache_set *c) static bool gc_should_run(struct cache_set *c)
{ {
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
if (ca->invalidate_needs_gc) if (ca->invalidate_needs_gc)
...@@ -1905,7 +1914,7 @@ void bch_initial_gc_finish(struct cache_set *c) ...@@ -1905,7 +1914,7 @@ void bch_initial_gc_finish(struct cache_set *c)
{ {
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned i; unsigned int i;
bch_btree_gc_finish(c); bch_btree_gc_finish(c);
...@@ -1945,7 +1954,7 @@ void bch_initial_gc_finish(struct cache_set *c) ...@@ -1945,7 +1954,7 @@ void bch_initial_gc_finish(struct cache_set *c)
static bool btree_insert_key(struct btree *b, struct bkey *k, static bool btree_insert_key(struct btree *b, struct bkey *k,
struct bkey *replace_key) struct bkey *replace_key)
{ {
unsigned status; unsigned int status;
BUG_ON(bkey_cmp(k, &b->key) > 0); BUG_ON(bkey_cmp(k, &b->key) > 0);
...@@ -2044,7 +2053,7 @@ static int btree_split(struct btree *b, struct btree_op *op, ...@@ -2044,7 +2053,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5; block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
if (split) { if (split) {
unsigned keys = 0; unsigned int keys = 0;
trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys); trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
...@@ -2300,7 +2309,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys, ...@@ -2300,7 +2309,7 @@ int bch_btree_insert(struct cache_set *c, struct keylist *keys,
void bch_btree_set_root(struct btree *b) void bch_btree_set_root(struct btree *b)
{ {
unsigned i; unsigned int i;
struct closure cl; struct closure cl;
closure_init_stack(&cl); closure_init_stack(&cl);
...@@ -2412,7 +2421,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l, ...@@ -2412,7 +2421,7 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
struct refill { struct refill {
struct btree_op op; struct btree_op op;
unsigned nr_found; unsigned int nr_found;
struct keybuf *buf; struct keybuf *buf;
struct bkey *end; struct bkey *end;
keybuf_pred_fn *pred; keybuf_pred_fn *pred;
...@@ -2488,6 +2497,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, ...@@ -2488,6 +2497,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
if (!RB_EMPTY_ROOT(&buf->keys)) { if (!RB_EMPTY_ROOT(&buf->keys)) {
struct keybuf_key *w; struct keybuf_key *w;
w = RB_FIRST(&buf->keys, struct keybuf_key, node); w = RB_FIRST(&buf->keys, struct keybuf_key, node);
buf->start = START_KEY(&w->key); buf->start = START_KEY(&w->key);
...@@ -2519,6 +2529,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, ...@@ -2519,6 +2529,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
{ {
bool ret = false; bool ret = false;
struct keybuf_key *p, *w, s; struct keybuf_key *p, *w, s;
s.key = *start; s.key = *start;
if (bkey_cmp(end, &buf->start) <= 0 || if (bkey_cmp(end, &buf->start) <= 0 ||
...@@ -2545,6 +2556,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start, ...@@ -2545,6 +2556,7 @@ bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
struct keybuf_key *bch_keybuf_next(struct keybuf *buf) struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
{ {
struct keybuf_key *w; struct keybuf_key *w;
spin_lock(&buf->lock); spin_lock(&buf->lock);
w = RB_FIRST(&buf->keys, struct keybuf_key, node); w = RB_FIRST(&buf->keys, struct keybuf_key, node);
......
...@@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b) ...@@ -184,7 +184,7 @@ static inline struct bset *btree_bset_last(struct btree *b)
return bset_tree_last(&b->keys)->data; return bset_tree_last(&b->keys)->data;
} }
static inline unsigned bset_block_offset(struct btree *b, struct bset *i) static inline unsigned int bset_block_offset(struct btree *b, struct bset *i)
{ {
return bset_sector_offset(&b->keys, i) >> b->c->block_bits; return bset_sector_offset(&b->keys, i) >> b->c->block_bits;
} }
...@@ -213,7 +213,7 @@ struct btree_op { ...@@ -213,7 +213,7 @@ struct btree_op {
/* Btree level at which we start taking write locks */ /* Btree level at which we start taking write locks */
short lock; short lock;
unsigned insert_collision:1; unsigned int insert_collision:1;
}; };
static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level) static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
...@@ -238,26 +238,28 @@ static inline void rw_unlock(bool w, struct btree *b) ...@@ -238,26 +238,28 @@ static inline void rw_unlock(bool w, struct btree *b)
(w ? up_write : up_read)(&b->lock); (w ? up_write : up_read)(&b->lock);
} }
void bch_btree_node_read_done(struct btree *); void bch_btree_node_read_done(struct btree *b);
void __bch_btree_node_write(struct btree *, struct closure *); void __bch_btree_node_write(struct btree *b, struct closure *parent);
void bch_btree_node_write(struct btree *, struct closure *); void bch_btree_node_write(struct btree *b, struct closure *parent);
void bch_btree_set_root(struct btree *); void bch_btree_set_root(struct btree *b);
struct btree *__bch_btree_node_alloc(struct cache_set *, struct btree_op *, struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
int, bool, struct btree *); int level, bool wait,
struct btree *bch_btree_node_get(struct cache_set *, struct btree_op *, struct btree *parent);
struct bkey *, int, bool, struct btree *); struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
struct bkey *k, int level, bool write,
int bch_btree_insert_check_key(struct btree *, struct btree_op *, struct btree *parent);
struct bkey *);
int bch_btree_insert(struct cache_set *, struct keylist *, int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
atomic_t *, struct bkey *); struct bkey *check_key);
int bch_btree_insert(struct cache_set *c, struct keylist *keys,
int bch_gc_thread_start(struct cache_set *); atomic_t *journal_ref, struct bkey *replace_key);
void bch_initial_gc_finish(struct cache_set *);
void bch_moving_gc(struct cache_set *); int bch_gc_thread_start(struct cache_set *c);
int bch_btree_check(struct cache_set *); void bch_initial_gc_finish(struct cache_set *c);
void bch_initial_mark_key(struct cache_set *, int, struct bkey *); void bch_moving_gc(struct cache_set *c);
int bch_btree_check(struct cache_set *c);
void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k);
static inline void wake_up_gc(struct cache_set *c) static inline void wake_up_gc(struct cache_set *c)
{ {
...@@ -272,9 +274,9 @@ static inline void wake_up_gc(struct cache_set *c) ...@@ -272,9 +274,9 @@ static inline void wake_up_gc(struct cache_set *c)
#define MAP_END_KEY 1 #define MAP_END_KEY 1
typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *); typedef int (btree_map_nodes_fn)(struct btree_op *b_op, struct btree *b);
int __bch_btree_map_nodes(struct btree_op *, struct cache_set *, int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *, btree_map_nodes_fn *, int); struct bkey *from, btree_map_nodes_fn *fn, int flags);
static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn) struct bkey *from, btree_map_nodes_fn *fn)
...@@ -290,21 +292,23 @@ static inline int bch_btree_map_leaf_nodes(struct btree_op *op, ...@@ -290,21 +292,23 @@ static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES); return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
} }
typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *, typedef int (btree_map_keys_fn)(struct btree_op *op, struct btree *b,
struct bkey *); struct bkey *k);
int bch_btree_map_keys(struct btree_op *, struct cache_set *, int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
struct bkey *, btree_map_keys_fn *, int); struct bkey *from, btree_map_keys_fn *fn, int flags);
typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *); typedef bool (keybuf_pred_fn)(struct keybuf *buf, struct bkey *k);
void bch_keybuf_init(struct keybuf *); void bch_keybuf_init(struct keybuf *buf);
void bch_refill_keybuf(struct cache_set *, struct keybuf *, void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
struct bkey *, keybuf_pred_fn *); struct bkey *end, keybuf_pred_fn *pred);
bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *, bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
struct bkey *); struct bkey *end);
void bch_keybuf_del(struct keybuf *, struct keybuf_key *); void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w);
struct keybuf_key *bch_keybuf_next(struct keybuf *); struct keybuf_key *bch_keybuf_next(struct keybuf *buf);
struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *, struct keybuf *, struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
struct bkey *, keybuf_pred_fn *); struct keybuf *buf,
struct bkey *end,
keybuf_pred_fn *pred);
void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats); void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats);
#endif #endif
// SPDX-License-Identifier: GPL-2.0
/* /*
* Asynchronous refcounty things * Asynchronous refcounty things
* *
...@@ -162,12 +163,13 @@ static struct dentry *closure_debug; ...@@ -162,12 +163,13 @@ static struct dentry *closure_debug;
static int debug_seq_show(struct seq_file *f, void *data) static int debug_seq_show(struct seq_file *f, void *data)
{ {
struct closure *cl; struct closure *cl;
spin_lock_irq(&closure_list_lock); spin_lock_irq(&closure_list_lock);
list_for_each_entry(cl, &closure_list, all) { list_for_each_entry(cl, &closure_list, all) {
int r = atomic_read(&cl->remaining); int r = atomic_read(&cl->remaining);
seq_printf(f, "%p: %pF -> %pf p %p r %i ", seq_printf(f, "%p: %pS -> %pS p %p r %i ",
cl, (void *) cl->ip, cl->fn, cl->parent, cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK); r & CLOSURE_REMAINING_MASK);
...@@ -177,7 +179,7 @@ static int debug_seq_show(struct seq_file *f, void *data) ...@@ -177,7 +179,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
r & CLOSURE_RUNNING ? "R" : ""); r & CLOSURE_RUNNING ? "R" : "");
if (r & CLOSURE_WAITING) if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n", seq_printf(f, " W %pS\n",
(void *) cl->waiting_on); (void *) cl->waiting_on);
seq_printf(f, "\n"); seq_printf(f, "\n");
......
...@@ -159,7 +159,7 @@ struct closure { ...@@ -159,7 +159,7 @@ struct closure {
#define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e #define CLOSURE_MAGIC_ALIVE 0xc054a11e
unsigned magic; unsigned int magic;
struct list_head all; struct list_head all;
unsigned long ip; unsigned long ip;
unsigned long waiting_on; unsigned long waiting_on;
...@@ -289,10 +289,12 @@ static inline void closure_init_stack(struct closure *cl) ...@@ -289,10 +289,12 @@ static inline void closure_init_stack(struct closure *cl)
} }
/** /**
* closure_wake_up - wake up all closures on a wait list. * closure_wake_up - wake up all closures on a wait list,
* with memory barrier
*/ */
static inline void closure_wake_up(struct closure_waitlist *list) static inline void closure_wake_up(struct closure_waitlist *list)
{ {
/* Memory barrier for the wait list */
smp_mb(); smp_mb();
__closure_wake_up(list); __closure_wake_up(list);
} }
......
...@@ -67,34 +67,35 @@ void bch_btree_verify(struct btree *b) ...@@ -67,34 +67,35 @@ void bch_btree_verify(struct btree *b)
if (inmemory->keys != sorted->keys || if (inmemory->keys != sorted->keys ||
memcmp(inmemory->start, memcmp(inmemory->start,
sorted->start, sorted->start,
(void *) bset_bkey_last(inmemory) - (void *) inmemory->start)) { (void *) bset_bkey_last(inmemory) -
(void *) inmemory->start)) {
struct bset *i; struct bset *i;
unsigned j; unsigned int j;
console_lock(); console_lock();
printk(KERN_ERR "*** in memory:\n"); pr_err("*** in memory:\n");
bch_dump_bset(&b->keys, inmemory, 0); bch_dump_bset(&b->keys, inmemory, 0);
printk(KERN_ERR "*** read back in:\n"); pr_err("*** read back in:\n");
bch_dump_bset(&v->keys, sorted, 0); bch_dump_bset(&v->keys, sorted, 0);
for_each_written_bset(b, ondisk, i) { for_each_written_bset(b, ondisk, i) {
unsigned block = ((void *) i - (void *) ondisk) / unsigned int block = ((void *) i - (void *) ondisk) /
block_bytes(b->c); block_bytes(b->c);
printk(KERN_ERR "*** on disk block %u:\n", block); pr_err("*** on disk block %u:\n", block);
bch_dump_bset(&b->keys, i, block); bch_dump_bset(&b->keys, i, block);
} }
printk(KERN_ERR "*** block %zu not written\n", pr_err("*** block %zu not written\n",
((void *) i - (void *) ondisk) / block_bytes(b->c)); ((void *) i - (void *) ondisk) / block_bytes(b->c));
for (j = 0; j < inmemory->keys; j++) for (j = 0; j < inmemory->keys; j++)
if (inmemory->d[j] != sorted->d[j]) if (inmemory->d[j] != sorted->d[j])
break; break;
printk(KERN_ERR "b->written %u\n", b->written); pr_err("b->written %u\n", b->written);
console_unlock(); console_unlock();
panic("verify failed at %u\n", j); panic("verify failed at %u\n", j);
...@@ -176,9 +177,9 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf, ...@@ -176,9 +177,9 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
while (size) { while (size) {
struct keybuf_key *w; struct keybuf_key *w;
unsigned bytes = min(i->bytes, size); unsigned int bytes = min(i->bytes, size);
int err = copy_to_user(buf, i->buf, bytes); int err = copy_to_user(buf, i->buf, bytes);
if (err) if (err)
return err; return err;
...@@ -237,8 +238,8 @@ void bch_debug_init_cache_set(struct cache_set *c) ...@@ -237,8 +238,8 @@ void bch_debug_init_cache_set(struct cache_set *c)
{ {
if (!IS_ERR_OR_NULL(bcache_debug)) { if (!IS_ERR_OR_NULL(bcache_debug)) {
char name[50]; char name[50];
snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
snprintf(name, 50, "bcache-%pU", c->sb.set_uuid);
c->debug = debugfs_create_file(name, 0400, bcache_debug, c, c->debug = debugfs_create_file(name, 0400, bcache_debug, c,
&cache_set_debug_ops); &cache_set_debug_ops);
} }
......
...@@ -8,8 +8,8 @@ struct cache_set; ...@@ -8,8 +8,8 @@ struct cache_set;
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
void bch_btree_verify(struct btree *); void bch_btree_verify(struct btree *b);
void bch_data_verify(struct cached_dev *, struct bio *); void bch_data_verify(struct cached_dev *dc, struct bio *bio);
#define expensive_debug_checks(c) ((c)->expensive_debug_checks) #define expensive_debug_checks(c) ((c)->expensive_debug_checks)
#define key_merging_disabled(c) ((c)->key_merging_disabled) #define key_merging_disabled(c) ((c)->key_merging_disabled)
...@@ -27,7 +27,7 @@ static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {} ...@@ -27,7 +27,7 @@ static inline void bch_data_verify(struct cached_dev *dc, struct bio *bio) {}
#endif #endif
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void bch_debug_init_cache_set(struct cache_set *); void bch_debug_init_cache_set(struct cache_set *c);
#else #else
static inline void bch_debug_init_cache_set(struct cache_set *c) {} static inline void bch_debug_init_cache_set(struct cache_set *c) {}
#endif #endif
......
...@@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l, ...@@ -46,7 +46,7 @@ static bool bch_key_sort_cmp(struct btree_iter_set l,
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) { if (ptr_available(c, k, i)) {
...@@ -67,7 +67,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k) ...@@ -67,7 +67,7 @@ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
{ {
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) { if (ptr_available(c, k, i)) {
...@@ -96,7 +96,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k) ...@@ -96,7 +96,7 @@ static const char *bch_ptr_status(struct cache_set *c, const struct bkey *k)
void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
{ {
unsigned i = 0; unsigned int i = 0;
char *out = buf, *end = buf + size; char *out = buf, *end = buf + size;
#define p(...) (out += scnprintf(out, end - out, __VA_ARGS__)) #define p(...) (out += scnprintf(out, end - out, __VA_ARGS__))
...@@ -126,22 +126,22 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k) ...@@ -126,22 +126,22 @@ void bch_extent_to_text(char *buf, size_t size, const struct bkey *k)
static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k) static void bch_bkey_dump(struct btree_keys *keys, const struct bkey *k)
{ {
struct btree *b = container_of(keys, struct btree, keys); struct btree *b = container_of(keys, struct btree, keys);
unsigned j; unsigned int j;
char buf[80]; char buf[80];
bch_extent_to_text(buf, sizeof(buf), k); bch_extent_to_text(buf, sizeof(buf), k);
printk(" %s", buf); pr_err(" %s", buf);
for (j = 0; j < KEY_PTRS(k); j++) { for (j = 0; j < KEY_PTRS(k); j++) {
size_t n = PTR_BUCKET_NR(b->c, k, j); size_t n = PTR_BUCKET_NR(b->c, k, j);
printk(" bucket %zu", n);
pr_err(" bucket %zu", n);
if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets) if (n >= b->c->sb.first_bucket && n < b->c->sb.nbuckets)
printk(" prio %i", pr_err(" prio %i",
PTR_BUCKET(b->c, k, j)->prio); PTR_BUCKET(b->c, k, j)->prio);
} }
printk(" %s\n", bch_ptr_status(b->c, k)); pr_err(" %s\n", bch_ptr_status(b->c, k));
} }
/* Btree ptrs */ /* Btree ptrs */
...@@ -166,12 +166,13 @@ bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k) ...@@ -166,12 +166,13 @@ bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k)
static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k) static bool bch_btree_ptr_invalid(struct btree_keys *bk, const struct bkey *k)
{ {
struct btree *b = container_of(bk, struct btree, keys); struct btree *b = container_of(bk, struct btree, keys);
return __bch_btree_ptr_invalid(b->c, k); return __bch_btree_ptr_invalid(b->c, k);
} }
static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
{ {
unsigned i; unsigned int i;
char buf[80]; char buf[80];
struct bucket *g; struct bucket *g;
...@@ -204,7 +205,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k) ...@@ -204,7 +205,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
{ {
struct btree *b = container_of(bk, struct btree, keys); struct btree *b = container_of(bk, struct btree, keys);
unsigned i; unsigned int i;
if (!bkey_cmp(k, &ZERO_KEY) || if (!bkey_cmp(k, &ZERO_KEY) ||
!KEY_PTRS(k) || !KEY_PTRS(k) ||
...@@ -327,13 +328,14 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, ...@@ -327,13 +328,14 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
struct cache_set *c = container_of(b, struct btree, keys)->c; struct cache_set *c = container_of(b, struct btree, keys)->c;
uint64_t old_offset; uint64_t old_offset;
unsigned old_size, sectors_found = 0; unsigned int old_size, sectors_found = 0;
BUG_ON(!KEY_OFFSET(insert)); BUG_ON(!KEY_OFFSET(insert));
BUG_ON(!KEY_SIZE(insert)); BUG_ON(!KEY_SIZE(insert));
while (1) { while (1) {
struct bkey *k = bch_btree_iter_next(iter); struct bkey *k = bch_btree_iter_next(iter);
if (!k) if (!k)
break; break;
...@@ -363,7 +365,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b, ...@@ -363,7 +365,7 @@ static bool bch_extent_insert_fixup(struct btree_keys *b,
* k might have been split since we inserted/found the * k might have been split since we inserted/found the
* key we're replacing * key we're replacing
*/ */
unsigned i; unsigned int i;
uint64_t offset = KEY_START(k) - uint64_t offset = KEY_START(k) -
KEY_START(replace_key); KEY_START(replace_key);
...@@ -498,11 +500,12 @@ bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k) ...@@ -498,11 +500,12 @@ bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k)
static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
{ {
struct btree *b = container_of(bk, struct btree, keys); struct btree *b = container_of(bk, struct btree, keys);
return __bch_extent_invalid(b->c, k); return __bch_extent_invalid(b->c, k);
} }
static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
unsigned ptr) unsigned int ptr)
{ {
struct bucket *g = PTR_BUCKET(b->c, k, ptr); struct bucket *g = PTR_BUCKET(b->c, k, ptr);
char buf[80]; char buf[80];
...@@ -534,7 +537,7 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k, ...@@ -534,7 +537,7 @@ static bool bch_extent_bad_expensive(struct btree *b, const struct bkey *k,
static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
{ {
struct btree *b = container_of(bk, struct btree, keys); struct btree *b = container_of(bk, struct btree, keys);
unsigned i, stale; unsigned int i, stale;
if (!KEY_PTRS(k) || if (!KEY_PTRS(k) ||
bch_extent_invalid(bk, k)) bch_extent_invalid(bk, k))
...@@ -574,10 +577,12 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r) ...@@ -574,10 +577,12 @@ static uint64_t merge_chksums(struct bkey *l, struct bkey *r)
~((uint64_t)1 << 63); ~((uint64_t)1 << 63);
} }
static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey *r) static bool bch_extent_merge(struct btree_keys *bk,
struct bkey *l,
struct bkey *r)
{ {
struct btree *b = container_of(bk, struct btree, keys); struct btree *b = container_of(bk, struct btree, keys);
unsigned i; unsigned int i;
if (key_merging_disabled(b->c)) if (key_merging_disabled(b->c))
return false; return false;
......
...@@ -8,8 +8,8 @@ extern const struct btree_keys_ops bch_extent_keys_ops; ...@@ -8,8 +8,8 @@ extern const struct btree_keys_ops bch_extent_keys_ops;
struct bkey; struct bkey;
struct cache_set; struct cache_set;
void bch_extent_to_text(char *, size_t, const struct bkey *); void bch_extent_to_text(char *buf, size_t size, const struct bkey *k);
bool __bch_btree_ptr_invalid(struct cache_set *, const struct bkey *); bool __bch_btree_ptr_invalid(struct cache_set *c, const struct bkey *k);
bool __bch_extent_invalid(struct cache_set *, const struct bkey *); bool __bch_extent_invalid(struct cache_set *c, const struct bkey *k);
#endif /* _BCACHE_EXTENTS_H */ #endif /* _BCACHE_EXTENTS_H */
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
void bch_bbio_free(struct bio *bio, struct cache_set *c) void bch_bbio_free(struct bio *bio, struct cache_set *c)
{ {
struct bbio *b = container_of(bio, struct bbio, bio); struct bbio *b = container_of(bio, struct bbio, bio);
mempool_free(b, &c->bio_meta); mempool_free(b, &c->bio_meta);
} }
...@@ -42,9 +43,10 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) ...@@ -42,9 +43,10 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
} }
void bch_submit_bbio(struct bio *bio, struct cache_set *c, void bch_submit_bbio(struct bio *bio, struct cache_set *c,
struct bkey *k, unsigned ptr) struct bkey *k, unsigned int ptr)
{ {
struct bbio *b = container_of(bio, struct bbio, bio); struct bbio *b = container_of(bio, struct bbio, bio);
bch_bkey_copy_single_ptr(&b->key, k, ptr); bch_bkey_copy_single_ptr(&b->key, k, ptr);
__bch_submit_bbio(bio, c); __bch_submit_bbio(bio, c);
} }
...@@ -52,7 +54,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, ...@@ -52,7 +54,7 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c,
/* IO errors */ /* IO errors */
void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
{ {
unsigned errors; unsigned int errors;
WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
...@@ -75,16 +77,16 @@ void bch_count_io_errors(struct cache *ca, ...@@ -75,16 +77,16 @@ void bch_count_io_errors(struct cache *ca,
*/ */
if (ca->set->error_decay) { if (ca->set->error_decay) {
unsigned count = atomic_inc_return(&ca->io_count); unsigned int count = atomic_inc_return(&ca->io_count);
while (count > ca->set->error_decay) { while (count > ca->set->error_decay) {
unsigned errors; unsigned int errors;
unsigned old = count; unsigned int old = count;
unsigned new = count - ca->set->error_decay; unsigned int new = count - ca->set->error_decay;
/* /*
* First we subtract refresh from count; each time we * First we subtract refresh from count; each time we
* succesfully do so, we rescale the errors once: * successfully do so, we rescale the errors once:
*/ */
count = atomic_cmpxchg(&ca->io_count, old, new); count = atomic_cmpxchg(&ca->io_count, old, new);
...@@ -104,7 +106,7 @@ void bch_count_io_errors(struct cache *ca, ...@@ -104,7 +106,7 @@ void bch_count_io_errors(struct cache *ca,
} }
if (error) { if (error) {
unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT, unsigned int errors = atomic_add_return(1 << IO_ERROR_SHIFT,
&ca->io_errors); &ca->io_errors);
errors >>= IO_ERROR_SHIFT; errors >>= IO_ERROR_SHIFT;
...@@ -126,18 +128,18 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio, ...@@ -126,18 +128,18 @@ void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
struct cache *ca = PTR_CACHE(c, &b->key, 0); struct cache *ca = PTR_CACHE(c, &b->key, 0);
int is_read = (bio_data_dir(bio) == READ ? 1 : 0); int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned threshold = op_is_write(bio_op(bio)) unsigned int threshold = op_is_write(bio_op(bio))
? c->congested_write_threshold_us ? c->congested_write_threshold_us
: c->congested_read_threshold_us; : c->congested_read_threshold_us;
if (threshold) { if (threshold) {
unsigned t = local_clock_us(); unsigned int t = local_clock_us();
int us = t - b->submit_time_us; int us = t - b->submit_time_us;
int congested = atomic_read(&c->congested); int congested = atomic_read(&c->congested);
if (us > (int) threshold) { if (us > (int) threshold) {
int ms = us / 1024; int ms = us / 1024;
c->congested_last_us = t; c->congested_last_us = t;
ms = min(ms, CONGESTED_MAX + congested); ms = min(ms, CONGESTED_MAX + congested);
......
...@@ -28,11 +28,12 @@ ...@@ -28,11 +28,12 @@
static void journal_read_endio(struct bio *bio) static void journal_read_endio(struct bio *bio)
{ {
struct closure *cl = bio->bi_private; struct closure *cl = bio->bi_private;
closure_put(cl); closure_put(cl);
} }
static int journal_read_bucket(struct cache *ca, struct list_head *list, static int journal_read_bucket(struct cache *ca, struct list_head *list,
unsigned bucket_index) unsigned int bucket_index)
{ {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio; struct bio *bio = &ja->bio;
...@@ -40,7 +41,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, ...@@ -40,7 +41,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
struct journal_replay *i; struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data; struct jset *j, *data = ca->set->journal.w[0].data;
struct closure cl; struct closure cl;
unsigned len, left, offset = 0; unsigned int len, left, offset = 0;
int ret = 0; int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
...@@ -50,7 +51,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list, ...@@ -50,7 +51,7 @@ static int journal_read_bucket(struct cache *ca, struct list_head *list,
while (offset < ca->sb.bucket_size) { while (offset < ca->sb.bucket_size) {
reread: left = ca->sb.bucket_size - offset; reread: left = ca->sb.bucket_size - offset;
len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS); len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
bio_reset(bio); bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset; bio->bi_iter.bi_sector = bucket + offset;
...@@ -154,12 +155,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) ...@@ -154,12 +155,12 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
}) })
struct cache *ca; struct cache *ca;
unsigned iter; unsigned int iter;
for_each_cache(ca, c, iter) { for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS); DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
unsigned i, l, r, m; unsigned int i, l, r, m;
uint64_t seq; uint64_t seq;
bitmap_zero(bitmap, SB_JOURNAL_BUCKETS); bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
...@@ -192,7 +193,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) ...@@ -192,7 +193,8 @@ int bch_journal_read(struct cache_set *c, struct list_head *list)
for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets); for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
l < ca->sb.njournal_buckets; l < ca->sb.njournal_buckets;
l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1)) l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
l + 1))
if (read_bucket(l)) if (read_bucket(l))
goto bsearch; goto bsearch;
...@@ -304,7 +306,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) ...@@ -304,7 +306,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
k < bset_bkey_last(&i->j); k < bset_bkey_last(&i->j);
k = bkey_next(k)) k = bkey_next(k))
if (!__bch_extent_invalid(c, k)) { if (!__bch_extent_invalid(c, k)) {
unsigned j; unsigned int j;
for (j = 0; j < KEY_PTRS(k); j++) for (j = 0; j < KEY_PTRS(k); j++)
if (ptr_available(c, k, j)) if (ptr_available(c, k, j))
...@@ -492,7 +494,7 @@ static void journal_reclaim(struct cache_set *c) ...@@ -492,7 +494,7 @@ static void journal_reclaim(struct cache_set *c)
struct bkey *k = &c->journal.key; struct bkey *k = &c->journal.key;
struct cache *ca; struct cache *ca;
uint64_t last_seq; uint64_t last_seq;
unsigned iter, n = 0; unsigned int iter, n = 0;
atomic_t p __maybe_unused; atomic_t p __maybe_unused;
atomic_long_inc(&c->reclaim); atomic_long_inc(&c->reclaim);
...@@ -526,7 +528,7 @@ static void journal_reclaim(struct cache_set *c) ...@@ -526,7 +528,7 @@ static void journal_reclaim(struct cache_set *c)
for_each_cache(ca, c, iter) { for_each_cache(ca, c, iter) {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets; unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
/* No space available on this device */ /* No space available on this device */
if (next == ja->discard_idx) if (next == ja->discard_idx)
...@@ -580,7 +582,7 @@ static void journal_write_endio(struct bio *bio) ...@@ -580,7 +582,7 @@ static void journal_write_endio(struct bio *bio)
closure_put(&w->c->journal.io); closure_put(&w->c->journal.io);
} }
static void journal_write(struct closure *); static void journal_write(struct closure *cl);
static void journal_write_done(struct closure *cl) static void journal_write_done(struct closure *cl)
{ {
...@@ -609,11 +611,12 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -609,11 +611,12 @@ static void journal_write_unlocked(struct closure *cl)
struct cache *ca; struct cache *ca;
struct journal_write *w = c->journal.cur; struct journal_write *w = c->journal.cur;
struct bkey *k = &c->journal.key; struct bkey *k = &c->journal.key;
unsigned i, sectors = set_blocks(w->data, block_bytes(c)) * unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
c->sb.block_size; c->sb.block_size;
struct bio *bio; struct bio *bio;
struct bio_list list; struct bio_list list;
bio_list_init(&list); bio_list_init(&list);
if (!w->need_write) { if (!w->need_write) {
...@@ -705,7 +708,7 @@ static void journal_try_write(struct cache_set *c) ...@@ -705,7 +708,7 @@ static void journal_try_write(struct cache_set *c)
} }
static struct journal_write *journal_wait_for_write(struct cache_set *c, static struct journal_write *journal_wait_for_write(struct cache_set *c,
unsigned nkeys) unsigned int nkeys)
__acquires(&c->journal.lock) __acquires(&c->journal.lock)
{ {
size_t sectors; size_t sectors;
......
...@@ -110,7 +110,7 @@ struct journal { ...@@ -110,7 +110,7 @@ struct journal {
struct delayed_work work; struct delayed_work work;
/* Number of blocks free in the bucket(s) we're currently writing to */ /* Number of blocks free in the bucket(s) we're currently writing to */
unsigned blocks_free; unsigned int blocks_free;
uint64_t seq; uint64_t seq;
DECLARE_FIFO(atomic_t, pin); DECLARE_FIFO(atomic_t, pin);
...@@ -131,13 +131,13 @@ struct journal_device { ...@@ -131,13 +131,13 @@ struct journal_device {
uint64_t seq[SB_JOURNAL_BUCKETS]; uint64_t seq[SB_JOURNAL_BUCKETS];
/* Journal bucket we're currently writing to */ /* Journal bucket we're currently writing to */
unsigned cur_idx; unsigned int cur_idx;
/* Last journal bucket that still contains an open journal entry */ /* Last journal bucket that still contains an open journal entry */
unsigned last_idx; unsigned int last_idx;
/* Next journal bucket to be discarded */ /* Next journal bucket to be discarded */
unsigned discard_idx; unsigned int discard_idx;
#define DISCARD_READY 0 #define DISCARD_READY 0
#define DISCARD_IN_FLIGHT 1 #define DISCARD_IN_FLIGHT 1
...@@ -167,14 +167,16 @@ struct cache_set; ...@@ -167,14 +167,16 @@ struct cache_set;
struct btree_op; struct btree_op;
struct keylist; struct keylist;
atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *); atomic_t *bch_journal(struct cache_set *c,
void bch_journal_next(struct journal *); struct keylist *keys,
void bch_journal_mark(struct cache_set *, struct list_head *); struct closure *parent);
void bch_journal_meta(struct cache_set *, struct closure *); void bch_journal_next(struct journal *j);
int bch_journal_read(struct cache_set *, struct list_head *); void bch_journal_mark(struct cache_set *c, struct list_head *list);
int bch_journal_replay(struct cache_set *, struct list_head *); void bch_journal_meta(struct cache_set *c, struct closure *cl);
int bch_journal_read(struct cache_set *c, struct list_head *list);
void bch_journal_free(struct cache_set *); int bch_journal_replay(struct cache_set *c, struct list_head *list);
int bch_journal_alloc(struct cache_set *);
void bch_journal_free(struct cache_set *c);
int bch_journal_alloc(struct cache_set *c);
#endif /* _BCACHE_JOURNAL_H */ #endif /* _BCACHE_JOURNAL_H */
...@@ -23,7 +23,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) ...@@ -23,7 +23,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
{ {
struct cache_set *c = container_of(buf, struct cache_set, struct cache_set *c = container_of(buf, struct cache_set,
moving_gc_keys); moving_gc_keys);
unsigned i; unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++) for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i) && if (ptr_available(c, k, i) &&
...@@ -38,6 +38,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k) ...@@ -38,6 +38,7 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
static void moving_io_destructor(struct closure *cl) static void moving_io_destructor(struct closure *cl)
{ {
struct moving_io *io = container_of(cl, struct moving_io, cl); struct moving_io *io = container_of(cl, struct moving_io, cl);
kfree(io); kfree(io);
} }
...@@ -186,9 +187,10 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r) ...@@ -186,9 +187,10 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
return GC_SECTORS_USED(l) < GC_SECTORS_USED(r); return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
} }
static unsigned bucket_heap_top(struct cache *ca) static unsigned int bucket_heap_top(struct cache *ca)
{ {
struct bucket *b; struct bucket *b;
return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0; return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
} }
...@@ -196,7 +198,7 @@ void bch_moving_gc(struct cache_set *c) ...@@ -196,7 +198,7 @@ void bch_moving_gc(struct cache_set *c)
{ {
struct cache *ca; struct cache *ca;
struct bucket *b; struct bucket *b;
unsigned i; unsigned int i;
if (!c->copy_gc_enabled) if (!c->copy_gc_enabled)
return; return;
...@@ -204,8 +206,8 @@ void bch_moving_gc(struct cache_set *c) ...@@ -204,8 +206,8 @@ void bch_moving_gc(struct cache_set *c)
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
for_each_cache(ca, c, i) { for_each_cache(ca, c, i) {
unsigned sectors_to_move = 0; unsigned int sectors_to_move = 0;
unsigned reserve_sectors = ca->sb.bucket_size * unsigned int reserve_sectors = ca->sb.bucket_size *
fifo_used(&ca->free[RESERVE_MOVINGGC]); fifo_used(&ca->free[RESERVE_MOVINGGC]);
ca->heap.used = 0; ca->heap.used = 0;
......
...@@ -25,9 +25,9 @@ ...@@ -25,9 +25,9 @@
struct kmem_cache *bch_search_cache; struct kmem_cache *bch_search_cache;
static void bch_data_insert_start(struct closure *); static void bch_data_insert_start(struct closure *cl);
static unsigned cache_mode(struct cached_dev *dc) static unsigned int cache_mode(struct cached_dev *dc)
{ {
return BDEV_CACHE_MODE(&dc->sb); return BDEV_CACHE_MODE(&dc->sb);
} }
...@@ -45,6 +45,7 @@ static void bio_csum(struct bio *bio, struct bkey *k) ...@@ -45,6 +45,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
void *d = kmap(bv.bv_page) + bv.bv_offset; void *d = kmap(bv.bv_page) + bv.bv_offset;
csum = bch_crc64_update(csum, d, bv.bv_len); csum = bch_crc64_update(csum, d, bv.bv_len);
kunmap(bv.bv_page); kunmap(bv.bv_page);
} }
...@@ -98,7 +99,7 @@ static void bch_data_insert_keys(struct closure *cl) ...@@ -98,7 +99,7 @@ static void bch_data_insert_keys(struct closure *cl)
closure_return(cl); closure_return(cl);
} }
static int bch_keylist_realloc(struct keylist *l, unsigned u64s, static int bch_keylist_realloc(struct keylist *l, unsigned int u64s,
struct cache_set *c) struct cache_set *c)
{ {
size_t oldsize = bch_keylist_nkeys(l); size_t oldsize = bch_keylist_nkeys(l);
...@@ -125,7 +126,7 @@ static void bch_data_invalidate(struct closure *cl) ...@@ -125,7 +126,7 @@ static void bch_data_invalidate(struct closure *cl)
bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
while (bio_sectors(bio)) { while (bio_sectors(bio)) {
unsigned sectors = min(bio_sectors(bio), unsigned int sectors = min(bio_sectors(bio),
1U << (KEY_SIZE_BITS - 1)); 1U << (KEY_SIZE_BITS - 1));
if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
...@@ -135,7 +136,9 @@ static void bch_data_invalidate(struct closure *cl) ...@@ -135,7 +136,9 @@ static void bch_data_invalidate(struct closure *cl)
bio->bi_iter.bi_size -= sectors << 9; bio->bi_iter.bi_size -= sectors << 9;
bch_keylist_add(&op->insert_keys, bch_keylist_add(&op->insert_keys,
&KEY(op->inode, bio->bi_iter.bi_sector, sectors)); &KEY(op->inode,
bio->bi_iter.bi_sector,
sectors));
} }
op->insert_data_done = true; op->insert_data_done = true;
...@@ -151,7 +154,7 @@ static void bch_data_insert_error(struct closure *cl) ...@@ -151,7 +154,7 @@ static void bch_data_insert_error(struct closure *cl)
/* /*
* Our data write just errored, which means we've got a bunch of keys to * Our data write just errored, which means we've got a bunch of keys to
* insert that point to data that wasn't succesfully written. * insert that point to data that wasn't successfully written.
* *
* We don't have to insert those keys but we still have to invalidate * We don't have to insert those keys but we still have to invalidate
* that region of the cache - so, if we just strip off all the pointers * that region of the cache - so, if we just strip off all the pointers
...@@ -211,7 +214,7 @@ static void bch_data_insert_start(struct closure *cl) ...@@ -211,7 +214,7 @@ static void bch_data_insert_start(struct closure *cl)
bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
do { do {
unsigned i; unsigned int i;
struct bkey *k; struct bkey *k;
struct bio_set *split = &op->c->bio_split; struct bio_set *split = &op->c->bio_split;
...@@ -328,7 +331,7 @@ void bch_data_insert(struct closure *cl) ...@@ -328,7 +331,7 @@ void bch_data_insert(struct closure *cl)
/* Congested? */ /* Congested? */
unsigned bch_get_congested(struct cache_set *c) unsigned int bch_get_congested(struct cache_set *c)
{ {
int i; int i;
long rand; long rand;
...@@ -372,8 +375,8 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) ...@@ -372,8 +375,8 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
{ {
struct cache_set *c = dc->disk.c; struct cache_set *c = dc->disk.c;
unsigned mode = cache_mode(dc); unsigned int mode = cache_mode(dc);
unsigned sectors, congested = bch_get_congested(c); unsigned int sectors, congested = bch_get_congested(c);
struct task_struct *task = current; struct task_struct *task = current;
struct io *i; struct io *i;
...@@ -469,11 +472,11 @@ struct search { ...@@ -469,11 +472,11 @@ struct search {
struct bio *cache_miss; struct bio *cache_miss;
struct bcache_device *d; struct bcache_device *d;
unsigned insert_bio_sectors; unsigned int insert_bio_sectors;
unsigned recoverable:1; unsigned int recoverable:1;
unsigned write:1; unsigned int write:1;
unsigned read_dirty_data:1; unsigned int read_dirty_data:1;
unsigned cache_missed:1; unsigned int cache_missed:1;
unsigned long start_time; unsigned long start_time;
...@@ -514,20 +517,20 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) ...@@ -514,20 +517,20 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
struct search *s = container_of(op, struct search, op); struct search *s = container_of(op, struct search, op);
struct bio *n, *bio = &s->bio.bio; struct bio *n, *bio = &s->bio.bio;
struct bkey *bio_key; struct bkey *bio_key;
unsigned ptr; unsigned int ptr;
if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
return MAP_CONTINUE; return MAP_CONTINUE;
if (KEY_INODE(k) != s->iop.inode || if (KEY_INODE(k) != s->iop.inode ||
KEY_START(k) > bio->bi_iter.bi_sector) { KEY_START(k) > bio->bi_iter.bi_sector) {
unsigned bio_sectors = bio_sectors(bio); unsigned int bio_sectors = bio_sectors(bio);
unsigned sectors = KEY_INODE(k) == s->iop.inode unsigned int sectors = KEY_INODE(k) == s->iop.inode
? min_t(uint64_t, INT_MAX, ? min_t(uint64_t, INT_MAX,
KEY_START(k) - bio->bi_iter.bi_sector) KEY_START(k) - bio->bi_iter.bi_sector)
: INT_MAX; : INT_MAX;
int ret = s->d->cache_miss(b, s, bio, sectors); int ret = s->d->cache_miss(b, s, bio, sectors);
if (ret != MAP_CONTINUE) if (ret != MAP_CONTINUE)
return ret; return ret;
...@@ -623,6 +626,7 @@ static void request_endio(struct bio *bio) ...@@ -623,6 +626,7 @@ static void request_endio(struct bio *bio)
if (bio->bi_status) { if (bio->bi_status) {
struct search *s = container_of(cl, struct search, cl); struct search *s = container_of(cl, struct search, cl);
s->iop.status = bio->bi_status; s->iop.status = bio->bi_status;
/* Only cache read errors are recoverable */ /* Only cache read errors are recoverable */
s->recoverable = false; s->recoverable = false;
...@@ -813,7 +817,8 @@ static void cached_dev_read_done(struct closure *cl) ...@@ -813,7 +817,8 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) { if (s->iop.bio) {
bio_reset(s->iop.bio); bio_reset(s->iop.bio);
s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; s->iop.bio->bi_iter.bi_sector =
s->cache_miss->bi_iter.bi_sector;
bio_copy_dev(s->iop.bio, s->cache_miss); bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL); bch_bio_map(s->iop.bio, NULL);
...@@ -856,10 +861,10 @@ static void cached_dev_read_done_bh(struct closure *cl) ...@@ -856,10 +861,10 @@ static void cached_dev_read_done_bh(struct closure *cl)
} }
static int cached_dev_cache_miss(struct btree *b, struct search *s, static int cached_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors) struct bio *bio, unsigned int sectors)
{ {
int ret = MAP_CONTINUE; int ret = MAP_CONTINUE;
unsigned reada = 0; unsigned int reada = 0;
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
struct bio *miss, *cache_bio; struct bio *miss, *cache_bio;
...@@ -1212,6 +1217,7 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, ...@@ -1212,6 +1217,7 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
struct cached_dev *dc = container_of(d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
} }
...@@ -1226,7 +1232,7 @@ static int cached_dev_congested(void *data, int bits) ...@@ -1226,7 +1232,7 @@ static int cached_dev_congested(void *data, int bits)
return 1; return 1;
if (cached_dev_get(dc)) { if (cached_dev_get(dc)) {
unsigned i; unsigned int i;
struct cache *ca; struct cache *ca;
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
...@@ -1253,9 +1259,9 @@ void bch_cached_dev_request_init(struct cached_dev *dc) ...@@ -1253,9 +1259,9 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
/* Flash backed devices */ /* Flash backed devices */
static int flash_dev_cache_miss(struct btree *b, struct search *s, static int flash_dev_cache_miss(struct btree *b, struct search *s,
struct bio *bio, unsigned sectors) struct bio *bio, unsigned int sectors)
{ {
unsigned bytes = min(sectors, bio_sectors(bio)) << 9; unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
swap(bio->bi_iter.bi_size, bytes); swap(bio->bi_iter.bi_size, bytes);
zero_fill_bio(bio); zero_fill_bio(bio);
...@@ -1338,7 +1344,7 @@ static int flash_dev_congested(void *data, int bits) ...@@ -1338,7 +1344,7 @@ static int flash_dev_congested(void *data, int bits)
struct bcache_device *d = data; struct bcache_device *d = data;
struct request_queue *q; struct request_queue *q;
struct cache *ca; struct cache *ca;
unsigned i; unsigned int i;
int ret = 0; int ret = 0;
for_each_cache(ca, d->c, i) { for_each_cache(ca, d->c, i) {
...@@ -1361,7 +1367,6 @@ void bch_flash_dev_request_init(struct bcache_device *d) ...@@ -1361,7 +1367,6 @@ void bch_flash_dev_request_init(struct bcache_device *d)
void bch_request_exit(void) void bch_request_exit(void)
{ {
if (bch_search_cache)
kmem_cache_destroy(bch_search_cache); kmem_cache_destroy(bch_search_cache);
} }
......
...@@ -8,7 +8,7 @@ struct data_insert_op { ...@@ -8,7 +8,7 @@ struct data_insert_op {
struct bio *bio; struct bio *bio;
struct workqueue_struct *wq; struct workqueue_struct *wq;
unsigned inode; unsigned int inode;
uint16_t write_point; uint16_t write_point;
uint16_t write_prio; uint16_t write_prio;
blk_status_t status; blk_status_t status;
...@@ -17,15 +17,15 @@ struct data_insert_op { ...@@ -17,15 +17,15 @@ struct data_insert_op {
uint16_t flags; uint16_t flags;
struct { struct {
unsigned bypass:1; unsigned int bypass:1;
unsigned writeback:1; unsigned int writeback:1;
unsigned flush_journal:1; unsigned int flush_journal:1;
unsigned csum:1; unsigned int csum:1;
unsigned replace:1; unsigned int replace:1;
unsigned replace_collision:1; unsigned int replace_collision:1;
unsigned insert_data_done:1; unsigned int insert_data_done:1;
}; };
}; };
...@@ -33,7 +33,7 @@ struct data_insert_op { ...@@ -33,7 +33,7 @@ struct data_insert_op {
BKEY_PADDED(replace_key); BKEY_PADDED(replace_key);
}; };
unsigned bch_get_congested(struct cache_set *); unsigned int bch_get_congested(struct cache_set *c);
void bch_data_insert(struct closure *cl); void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc); void bch_cached_dev_request_init(struct cached_dev *dc);
......
...@@ -33,11 +33,11 @@ ...@@ -33,11 +33,11 @@
* stored left shifted by 16, and scaled back in the sysfs show() function. * stored left shifted by 16, and scaled back in the sysfs show() function.
*/ */
static const unsigned DAY_RESCALE = 288; static const unsigned int DAY_RESCALE = 288;
static const unsigned HOUR_RESCALE = 12; static const unsigned int HOUR_RESCALE = 12;
static const unsigned FIVE_MINUTE_RESCALE = 1; static const unsigned int FIVE_MINUTE_RESCALE = 1;
static const unsigned accounting_delay = (HZ * 300) / 22; static const unsigned int accounting_delay = (HZ * 300) / 22;
static const unsigned accounting_weight = 32; static const unsigned int accounting_weight = 32;
/* sysfs reading/writing */ /* sysfs reading/writing */
...@@ -152,7 +152,7 @@ static void scale_accounting(struct timer_list *t) ...@@ -152,7 +152,7 @@ static void scale_accounting(struct timer_list *t)
struct cache_accounting *acc = from_timer(acc, t, timer); struct cache_accounting *acc = from_timer(acc, t, timer);
#define move_stat(name) do { \ #define move_stat(name) do { \
unsigned t = atomic_xchg(&acc->collector.name, 0); \ unsigned int t = atomic_xchg(&acc->collector.name, 0); \
t <<= 16; \ t <<= 16; \
acc->five_minute.name += t; \ acc->five_minute.name += t; \
acc->hour.name += t; \ acc->hour.name += t; \
...@@ -200,6 +200,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, ...@@ -200,6 +200,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
bool hit, bool bypass) bool hit, bool bypass)
{ {
struct cached_dev *dc = container_of(d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass); mark_cache_stats(&dc->accounting.collector, hit, bypass);
mark_cache_stats(&c->accounting.collector, hit, bypass); mark_cache_stats(&c->accounting.collector, hit, bypass);
} }
...@@ -207,6 +208,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d, ...@@ -207,6 +208,7 @@ void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
{ {
struct cached_dev *dc = container_of(d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads); atomic_inc(&dc->accounting.collector.cache_readaheads);
atomic_inc(&c->accounting.collector.cache_readaheads); atomic_inc(&c->accounting.collector.cache_readaheads);
} }
...@@ -214,6 +216,7 @@ void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d) ...@@ -214,6 +216,7 @@ void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d)
void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d) void bch_mark_cache_miss_collision(struct cache_set *c, struct bcache_device *d)
{ {
struct cached_dev *dc = container_of(d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_miss_collisions); atomic_inc(&dc->accounting.collector.cache_miss_collisions);
atomic_inc(&c->accounting.collector.cache_miss_collisions); atomic_inc(&c->accounting.collector.cache_miss_collisions);
} }
......
...@@ -23,7 +23,7 @@ struct cache_stats { ...@@ -23,7 +23,7 @@ struct cache_stats {
unsigned long cache_miss_collisions; unsigned long cache_miss_collisions;
unsigned long sectors_bypassed; unsigned long sectors_bypassed;
unsigned rescale; unsigned int rescale;
}; };
struct cache_accounting { struct cache_accounting {
...@@ -53,10 +53,13 @@ void bch_cache_accounting_clear(struct cache_accounting *acc); ...@@ -53,10 +53,13 @@ void bch_cache_accounting_clear(struct cache_accounting *acc);
void bch_cache_accounting_destroy(struct cache_accounting *acc); void bch_cache_accounting_destroy(struct cache_accounting *acc);
void bch_mark_cache_accounting(struct cache_set *, struct bcache_device *, void bch_mark_cache_accounting(struct cache_set *c, struct bcache_device *d,
bool, bool); bool hit, bool bypass);
void bch_mark_cache_readahead(struct cache_set *, struct bcache_device *); void bch_mark_cache_readahead(struct cache_set *c, struct bcache_device *d);
void bch_mark_cache_miss_collision(struct cache_set *, struct bcache_device *); void bch_mark_cache_miss_collision(struct cache_set *c,
void bch_mark_sectors_bypassed(struct cache_set *, struct cached_dev *, int); struct bcache_device *d);
void bch_mark_sectors_bypassed(struct cache_set *c,
struct cached_dev *dc,
int sectors);
#endif /* _BCACHE_STATS_H_ */ #endif /* _BCACHE_STATS_H_ */
This diff is collapsed.
...@@ -130,7 +130,9 @@ rw_attribute(btree_shrinker_disabled); ...@@ -130,7 +130,9 @@ rw_attribute(btree_shrinker_disabled);
rw_attribute(copy_gc_enabled); rw_attribute(copy_gc_enabled);
rw_attribute(size); rw_attribute(size);
static ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[], static ssize_t bch_snprint_string_list(char *buf,
size_t size,
const char * const list[],
size_t selected) size_t selected)
{ {
char *out = buf; char *out = buf;
...@@ -148,7 +150,7 @@ SHOW(__bch_cached_dev) ...@@ -148,7 +150,7 @@ SHOW(__bch_cached_dev)
{ {
struct cached_dev *dc = container_of(kobj, struct cached_dev, struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj); disk.kobj);
const char *states[] = { "no cache", "clean", "dirty", "inconsistent" }; char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
int wb = dc->writeback_running; int wb = dc->writeback_running;
#define var(stat) (dc->stat) #define var(stat) (dc->stat)
...@@ -307,7 +309,7 @@ STORE(__cached_dev) ...@@ -307,7 +309,7 @@ STORE(__cached_dev)
if (v < 0) if (v < 0)
return v; return v;
if ((unsigned) v != BDEV_CACHE_MODE(&dc->sb)) { if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
SET_BDEV_CACHE_MODE(&dc->sb, v); SET_BDEV_CACHE_MODE(&dc->sb, v);
bch_write_bdev_super(dc, NULL); bch_write_bdev_super(dc, NULL);
} }
...@@ -341,8 +343,9 @@ STORE(__cached_dev) ...@@ -341,8 +343,9 @@ STORE(__cached_dev)
add_uevent_var(env, "DRIVER=bcache"); add_uevent_var(env, "DRIVER=bcache");
add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid), add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
add_uevent_var(env, "CACHED_LABEL=%s", buf); add_uevent_var(env, "CACHED_LABEL=%s", buf);
kobject_uevent_env( kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
&disk_to_dev(dc->disk.disk)->kobj, KOBJ_CHANGE, env->envp); KOBJ_CHANGE,
env->envp);
kfree(env); kfree(env);
} }
...@@ -459,6 +462,7 @@ STORE(__bch_flash_dev) ...@@ -459,6 +462,7 @@ STORE(__bch_flash_dev)
if (attr == &sysfs_size) { if (attr == &sysfs_size) {
uint64_t v; uint64_t v;
strtoi_h_or_return(buf, v); strtoi_h_or_return(buf, v);
u->sectors = v >> 9; u->sectors = v >> 9;
...@@ -533,9 +537,9 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf) ...@@ -533,9 +537,9 @@ static int bch_bset_print_stats(struct cache_set *c, char *buf)
op.stats.floats, op.stats.failed); op.stats.floats, op.stats.failed);
} }
static unsigned bch_root_usage(struct cache_set *c) static unsigned int bch_root_usage(struct cache_set *c)
{ {
unsigned bytes = 0; unsigned int bytes = 0;
struct bkey *k; struct bkey *k;
struct btree *b; struct btree *b;
struct btree_iter iter; struct btree_iter iter;
...@@ -570,9 +574,9 @@ static size_t bch_cache_size(struct cache_set *c) ...@@ -570,9 +574,9 @@ static size_t bch_cache_size(struct cache_set *c)
return ret; return ret;
} }
static unsigned bch_cache_max_chain(struct cache_set *c) static unsigned int bch_cache_max_chain(struct cache_set *c)
{ {
unsigned ret = 0; unsigned int ret = 0;
struct hlist_head *h; struct hlist_head *h;
mutex_lock(&c->bucket_lock); mutex_lock(&c->bucket_lock);
...@@ -580,7 +584,7 @@ static unsigned bch_cache_max_chain(struct cache_set *c) ...@@ -580,7 +584,7 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
for (h = c->bucket_hash; for (h = c->bucket_hash;
h < c->bucket_hash + (1 << BUCKET_HASH_BITS); h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
h++) { h++) {
unsigned i = 0; unsigned int i = 0;
struct hlist_node *p; struct hlist_node *p;
hlist_for_each(p, h) hlist_for_each(p, h)
...@@ -593,13 +597,13 @@ static unsigned bch_cache_max_chain(struct cache_set *c) ...@@ -593,13 +597,13 @@ static unsigned bch_cache_max_chain(struct cache_set *c)
return ret; return ret;
} }
static unsigned bch_btree_used(struct cache_set *c) static unsigned int bch_btree_used(struct cache_set *c)
{ {
return div64_u64(c->gc_stats.key_bytes * 100, return div64_u64(c->gc_stats.key_bytes * 100,
(c->gc_stats.nodes ?: 1) * btree_bytes(c)); (c->gc_stats.nodes ?: 1) * btree_bytes(c));
} }
static unsigned bch_average_key_size(struct cache_set *c) static unsigned int bch_average_key_size(struct cache_set *c)
{ {
return c->gc_stats.nkeys return c->gc_stats.nkeys
? div64_u64(c->gc_stats.data, c->gc_stats.nkeys) ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
...@@ -703,6 +707,7 @@ STORE(__bch_cache_set) ...@@ -703,6 +707,7 @@ STORE(__bch_cache_set)
if (attr == &sysfs_flash_vol_create) { if (attr == &sysfs_flash_vol_create) {
int r; int r;
uint64_t v; uint64_t v;
strtoi_h_or_return(buf, v); strtoi_h_or_return(buf, v);
r = bch_flash_dev_create(c, v); r = bch_flash_dev_create(c, v);
...@@ -736,6 +741,7 @@ STORE(__bch_cache_set) ...@@ -736,6 +741,7 @@ STORE(__bch_cache_set)
if (attr == &sysfs_prune_cache) { if (attr == &sysfs_prune_cache) {
struct shrink_control sc; struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL; sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf); sc.nr_to_scan = strtoul_or_return(buf);
c->shrink.scan_objects(&c->shrink, &sc); c->shrink.scan_objects(&c->shrink, &sc);
...@@ -789,12 +795,14 @@ STORE_LOCKED(bch_cache_set) ...@@ -789,12 +795,14 @@ STORE_LOCKED(bch_cache_set)
SHOW(bch_cache_set_internal) SHOW(bch_cache_set_internal)
{ {
struct cache_set *c = container_of(kobj, struct cache_set, internal); struct cache_set *c = container_of(kobj, struct cache_set, internal);
return bch_cache_set_show(&c->kobj, attr, buf); return bch_cache_set_show(&c->kobj, attr, buf);
} }
STORE(bch_cache_set_internal) STORE(bch_cache_set_internal)
{ {
struct cache_set *c = container_of(kobj, struct cache_set, internal); struct cache_set *c = container_of(kobj, struct cache_set, internal);
return bch_cache_set_store(&c->kobj, attr, buf, size); return bch_cache_set_store(&c->kobj, attr, buf, size);
} }
...@@ -996,7 +1004,7 @@ STORE(__bch_cache) ...@@ -996,7 +1004,7 @@ STORE(__bch_cache)
if (v < 0) if (v < 0)
return v; return v;
if ((unsigned) v != CACHE_REPLACEMENT(&ca->sb)) { if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
mutex_lock(&ca->set->bucket_lock); mutex_lock(&ca->set->bucket_lock);
SET_CACHE_REPLACEMENT(&ca->sb, v); SET_CACHE_REPLACEMENT(&ca->sb, v);
mutex_unlock(&ca->set->bucket_lock); mutex_unlock(&ca->set->bucket_lock);
......
...@@ -44,9 +44,9 @@ STORE(fn) \ ...@@ -44,9 +44,9 @@ STORE(fn) \
static struct attribute sysfs_##_name = \ static struct attribute sysfs_##_name = \
{ .name = #_name, .mode = _mode } { .name = #_name, .mode = _mode }
#define write_attribute(n) __sysfs_attribute(n, S_IWUSR) #define write_attribute(n) __sysfs_attribute(n, 0200)
#define read_attribute(n) __sysfs_attribute(n, S_IRUGO) #define read_attribute(n) __sysfs_attribute(n, 0444)
#define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR) #define rw_attribute(n) __sysfs_attribute(n, 0644)
#define sysfs_printf(file, fmt, ...) \ #define sysfs_printf(file, fmt, ...) \
do { \ do { \
......
// SPDX-License-Identifier: GPL-2.0
/* /*
* random utiility code, for bcache but in theory not specific to bcache * random utiility code, for bcache but in theory not specific to bcache
* *
...@@ -133,6 +134,7 @@ bool bch_is_zero(const char *p, size_t n) ...@@ -133,6 +134,7 @@ bool bch_is_zero(const char *p, size_t n)
int bch_parse_uuid(const char *s, char *uuid) int bch_parse_uuid(const char *s, char *uuid)
{ {
size_t i, j, x; size_t i, j, x;
memset(uuid, 0, 16); memset(uuid, 0, 16);
for (i = 0, j = 0; for (i = 0, j = 0;
......
...@@ -289,10 +289,10 @@ do { \ ...@@ -289,10 +289,10 @@ do { \
#define ANYSINT_MAX(t) \ #define ANYSINT_MAX(t) \
((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1) ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
int bch_strtoint_h(const char *, int *); int bch_strtoint_h(const char *cp, int *res);
int bch_strtouint_h(const char *, unsigned int *); int bch_strtouint_h(const char *cp, unsigned int *res);
int bch_strtoll_h(const char *, long long *); int bch_strtoll_h(const char *cp, long long *res);
int bch_strtoull_h(const char *, unsigned long long *); int bch_strtoull_h(const char *cp, unsigned long long *res);
static inline int bch_strtol_h(const char *cp, long *res) static inline int bch_strtol_h(const char *cp, long *res)
{ {
...@@ -348,7 +348,7 @@ static inline int bch_strtoul_h(const char *cp, long *res) ...@@ -348,7 +348,7 @@ static inline int bch_strtoul_h(const char *cp, long *res)
snprintf(buf, size, \ snprintf(buf, size, \
__builtin_types_compatible_p(typeof(var), int) \ __builtin_types_compatible_p(typeof(var), int) \
? "%i\n" : \ ? "%i\n" : \
__builtin_types_compatible_p(typeof(var), unsigned) \ __builtin_types_compatible_p(typeof(var), unsigned int) \
? "%u\n" : \ ? "%u\n" : \
__builtin_types_compatible_p(typeof(var), long) \ __builtin_types_compatible_p(typeof(var), long) \
? "%li\n" : \ ? "%li\n" : \
...@@ -380,7 +380,7 @@ struct time_stats { ...@@ -380,7 +380,7 @@ struct time_stats {
void bch_time_stats_update(struct time_stats *stats, uint64_t time); void bch_time_stats_update(struct time_stats *stats, uint64_t time);
static inline unsigned local_clock_us(void) static inline unsigned int local_clock_us(void)
{ {
return local_clock() >> 10; return local_clock() >> 10;
} }
...@@ -403,7 +403,8 @@ do { \ ...@@ -403,7 +403,8 @@ do { \
__print_time_stat(stats, name, \ __print_time_stat(stats, name, \
average_duration, duration_units); \ average_duration, duration_units); \
sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \ sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\ div_u64((stats)->max_duration, \
NSEC_PER_ ## duration_units)); \
\ \
sysfs_print(name ## _last_ ## frequency_units, (stats)->last \ sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
? div_s64(local_clock() - (stats)->last, \ ? div_s64(local_clock() - (stats)->last, \
...@@ -560,9 +561,10 @@ static inline uint64_t bch_crc64_update(uint64_t crc, ...@@ -560,9 +561,10 @@ static inline uint64_t bch_crc64_update(uint64_t crc,
} }
/* Does linear interpolation between powers of two */ /* Does linear interpolation between powers of two */
static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits) static inline unsigned int fract_exp_two(unsigned int x,
unsigned int fract_bits)
{ {
unsigned fract = x & ~(~0 << fract_bits); unsigned int fract = x & ~(~0 << fract_bits);
x >>= fract_bits; x >>= fract_bits;
x = 1 << x; x = 1 << x;
......
...@@ -215,7 +215,8 @@ static void update_writeback_rate(struct work_struct *work) ...@@ -215,7 +215,8 @@ static void update_writeback_rate(struct work_struct *work)
smp_mb(); smp_mb();
} }
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) static unsigned int writeback_delay(struct cached_dev *dc,
unsigned int sectors)
{ {
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent) !dc->writeback_percent)
...@@ -249,6 +250,7 @@ static void dirty_init(struct keybuf_key *w) ...@@ -249,6 +250,7 @@ static void dirty_init(struct keybuf_key *w)
static void dirty_io_destructor(struct closure *cl) static void dirty_io_destructor(struct closure *cl)
{ {
struct dirty_io *io = container_of(cl, struct dirty_io, cl); struct dirty_io *io = container_of(cl, struct dirty_io, cl);
kfree(io); kfree(io);
} }
...@@ -263,7 +265,7 @@ static void write_dirty_finish(struct closure *cl) ...@@ -263,7 +265,7 @@ static void write_dirty_finish(struct closure *cl)
/* This is kind of a dumb way of signalling errors. */ /* This is kind of a dumb way of signalling errors. */
if (KEY_DIRTY(&w->key)) { if (KEY_DIRTY(&w->key)) {
int ret; int ret;
unsigned i; unsigned int i;
struct keylist keys; struct keylist keys;
bch_keylist_init(&keys); bch_keylist_init(&keys);
...@@ -377,7 +379,7 @@ static void read_dirty_submit(struct closure *cl) ...@@ -377,7 +379,7 @@ static void read_dirty_submit(struct closure *cl)
static void read_dirty(struct cached_dev *dc) static void read_dirty(struct cached_dev *dc)
{ {
unsigned delay = 0; unsigned int delay = 0;
struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w; struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
size_t size; size_t size;
int nk, i; int nk, i;
...@@ -442,7 +444,8 @@ static void read_dirty(struct cached_dev *dc) ...@@ -442,7 +444,8 @@ static void read_dirty(struct cached_dev *dc)
io = kzalloc(sizeof(struct dirty_io) + io = kzalloc(sizeof(struct dirty_io) +
sizeof(struct bio_vec) * sizeof(struct bio_vec) *
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), DIV_ROUND_UP(KEY_SIZE(&w->key),
PAGE_SECTORS),
GFP_KERNEL); GFP_KERNEL);
if (!io) if (!io)
goto err; goto err;
...@@ -465,7 +468,8 @@ static void read_dirty(struct cached_dev *dc) ...@@ -465,7 +468,8 @@ static void read_dirty(struct cached_dev *dc)
down(&dc->in_flight); down(&dc->in_flight);
/* We've acquired a semaphore for the maximum /*
* We've acquired a semaphore for the maximum
* simultaneous number of writebacks; from here * simultaneous number of writebacks; from here
* everything happens asynchronously. * everything happens asynchronously.
*/ */
...@@ -498,11 +502,11 @@ static void read_dirty(struct cached_dev *dc) ...@@ -498,11 +502,11 @@ static void read_dirty(struct cached_dev *dc)
/* Scan for dirty data */ /* Scan for dirty data */
void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
uint64_t offset, int nr_sectors) uint64_t offset, int nr_sectors)
{ {
struct bcache_device *d = c->devices[inode]; struct bcache_device *d = c->devices[inode];
unsigned stripe_offset, stripe, sectors_dirty; unsigned int stripe_offset, stripe, sectors_dirty;
if (!d) if (!d)
return; return;
...@@ -514,7 +518,7 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, ...@@ -514,7 +518,7 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
stripe_offset = offset & (d->stripe_size - 1); stripe_offset = offset & (d->stripe_size - 1);
while (nr_sectors) { while (nr_sectors) {
int s = min_t(unsigned, abs(nr_sectors), int s = min_t(unsigned int, abs(nr_sectors),
d->stripe_size - stripe_offset); d->stripe_size - stripe_offset);
if (nr_sectors < 0) if (nr_sectors < 0)
...@@ -538,7 +542,9 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, ...@@ -538,7 +542,9 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
static bool dirty_pred(struct keybuf *buf, struct bkey *k) static bool dirty_pred(struct keybuf *buf, struct bkey *k)
{ {
struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys); struct cached_dev *dc = container_of(buf,
struct cached_dev,
writeback_keys);
BUG_ON(KEY_INODE(k) != dc->disk.id); BUG_ON(KEY_INODE(k) != dc->disk.id);
...@@ -548,7 +554,7 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k) ...@@ -548,7 +554,7 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
static void refill_full_stripes(struct cached_dev *dc) static void refill_full_stripes(struct cached_dev *dc)
{ {
struct keybuf *buf = &dc->writeback_keys; struct keybuf *buf = &dc->writeback_keys;
unsigned start_stripe, stripe, next_stripe; unsigned int start_stripe, stripe, next_stripe;
bool wrapped = false; bool wrapped = false;
stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned)); stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
...@@ -688,7 +694,7 @@ static int bch_writeback_thread(void *arg) ...@@ -688,7 +694,7 @@ static int bch_writeback_thread(void *arg)
read_dirty(dc); read_dirty(dc);
if (searched_full_index) { if (searched_full_index) {
unsigned delay = dc->writeback_delay * HZ; unsigned int delay = dc->writeback_delay * HZ;
while (delay && while (delay &&
!kthread_should_stop() && !kthread_should_stop() &&
...@@ -712,7 +718,7 @@ static int bch_writeback_thread(void *arg) ...@@ -712,7 +718,7 @@ static int bch_writeback_thread(void *arg)
struct sectors_dirty_init { struct sectors_dirty_init {
struct btree_op op; struct btree_op op;
unsigned inode; unsigned int inode;
size_t count; size_t count;
struct bkey start; struct bkey start;
}; };
......
...@@ -28,7 +28,7 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) ...@@ -28,7 +28,7 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret; return ret;
} }
static inline unsigned offset_to_stripe(struct bcache_device *d, static inline unsigned int offset_to_stripe(struct bcache_device *d,
uint64_t offset) uint64_t offset)
{ {
do_div(offset, d->stripe_size); do_div(offset, d->stripe_size);
...@@ -37,9 +37,9 @@ static inline unsigned offset_to_stripe(struct bcache_device *d, ...@@ -37,9 +37,9 @@ static inline unsigned offset_to_stripe(struct bcache_device *d,
static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
uint64_t offset, uint64_t offset,
unsigned nr_sectors) unsigned int nr_sectors)
{ {
unsigned stripe = offset_to_stripe(&dc->disk, offset); unsigned int stripe = offset_to_stripe(&dc->disk, offset);
while (1) { while (1) {
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
...@@ -54,9 +54,9 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, ...@@ -54,9 +54,9 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
} }
static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
unsigned cache_mode, bool would_skip) unsigned int cache_mode, bool would_skip)
{ {
unsigned in_use = dc->disk.c->gc_stats.in_use; unsigned int in_use = dc->disk.c->gc_stats.in_use;
if (cache_mode != CACHE_MODE_WRITEBACK || if (cache_mode != CACHE_MODE_WRITEBACK ||
test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
...@@ -96,10 +96,11 @@ static inline void bch_writeback_add(struct cached_dev *dc) ...@@ -96,10 +96,11 @@ static inline void bch_writeback_add(struct cached_dev *dc)
} }
} }
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
uint64_t offset, int nr_sectors);
void bch_sectors_dirty_init(struct bcache_device *); void bch_sectors_dirty_init(struct bcache_device *d);
void bch_cached_dev_writeback_init(struct cached_dev *); void bch_cached_dev_writeback_init(struct cached_dev *dc);
int bch_cached_dev_writeback_start(struct cached_dev *); int bch_cached_dev_writeback_start(struct cached_dev *dc);
#endif #endif
...@@ -30,10 +30,10 @@ struct bkey { ...@@ -30,10 +30,10 @@ struct bkey {
BITMASK(name, struct bkey, field, offset, size) BITMASK(name, struct bkey, field, offset, size)
#define PTR_FIELD(name, offset, size) \ #define PTR_FIELD(name, offset, size) \
static inline __u64 name(const struct bkey *k, unsigned i) \ static inline __u64 name(const struct bkey *k, unsigned int i) \
{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \ { return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
\ \
static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \ static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
{ \ { \
k->ptr[i] &= ~(~(~0ULL << size) << offset); \ k->ptr[i] &= ~(~(~0ULL << size) << offset); \
k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \ k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
...@@ -117,12 +117,14 @@ static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src) ...@@ -117,12 +117,14 @@ static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
static inline struct bkey *bkey_next(const struct bkey *k) static inline struct bkey *bkey_next(const struct bkey *k)
{ {
__u64 *d = (void *) k; __u64 *d = (void *) k;
return (struct bkey *) (d + bkey_u64s(k)); return (struct bkey *) (d + bkey_u64s(k));
} }
static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys) static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
{ {
__u64 *d = (void *) k; __u64 *d = (void *) k;
return (struct bkey *) (d + nr_keys); return (struct bkey *) (d + nr_keys);
} }
/* Enough for a key with 6 pointers */ /* Enough for a key with 6 pointers */
......
...@@ -1829,6 +1829,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, ...@@ -1829,6 +1829,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
mutex_lock(&q->blk_trace_mutex); mutex_lock(&q->blk_trace_mutex);
if (attr == &dev_attr_enable) { if (attr == &dev_attr_enable) {
if (!!value == !!q->blk_trace) {
ret = 0;
goto out_unlock_bdev;
}
if (value) if (value)
ret = blk_trace_setup_queue(q, bdev); ret = blk_trace_setup_queue(q, bdev);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment