Commit 291d0e5d authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'for-linus-20180929' of git://git.kernel.dk/linux-block

Jens writes:
  "Block fixes for 4.19-rc6

   A set of fixes that should go into this release. This pull request
   contains:

   - A fix (hopefully) for the persistent grants for xen-blkfront. A
     previous fix from this series wasn't complete, hence reverted, and
     this one should hopefully be it. (Boris Ostrovsky)

   - Fix for an elevator drain warning with SMR devices, which is
     triggered when you switch schedulers (Damien)

   - bcache deadlock fix (Guoju Fang)

   - Fix for the block unplug tracepoint, which has had the
     timer/explicit flag reverted since 4.11 (Ilya)

   - Fix a regression in this series where the blk-mq timeout hook is
     invoked with the RCU read lock held, hence preventing it from
     blocking (Keith)

   - NVMe pull from Christoph, with a single multipath fix (Susobhan Dey)"

* tag 'for-linus-20180929' of git://git.kernel.dk/linux-block:
  xen/blkfront: correct purging of persistent grants
  Revert "xen/blkfront: When purging persistent grants, keep them in the buffer"
  blk-mq: I/O and timer unplugs are inverted in blktrace
  bcache: add separate workqueue for journal_write to avoid deadlock
  xen/blkfront: When purging persistent grants, keep them in the buffer
  block: fix deadline elevator drain for zoned block devices
  blk-mq: Allow blocking queue tag iter callbacks
  nvme: properly propagate errors in nvme_mpath_init
parents e7541773 133424a2
...@@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, ...@@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
/* /*
* __blk_mq_update_nr_hw_queues will update the nr_hw_queues and * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
* queue_hw_ctx after freeze the queue. So we could use q_usage_counter * queue_hw_ctx after freeze the queue, so we use q_usage_counter
* to avoid race with it. __blk_mq_update_nr_hw_queues will users * to avoid race with it.
* synchronize_rcu to ensure all of the users go out of the critical
* section below and see zeroed q_usage_counter.
*/ */
rcu_read_lock(); if (!percpu_ref_tryget(&q->q_usage_counter))
if (percpu_ref_is_zero(&q->q_usage_counter)) {
rcu_read_unlock();
return; return;
}
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags; struct blk_mq_tags *tags = hctx->tags;
...@@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, ...@@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true); bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false); bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
} }
rcu_read_unlock(); blk_queue_exit(q);
} }
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
......
...@@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(!rq->q); BUG_ON(!rq->q);
if (rq->mq_ctx != this_ctx) { if (rq->mq_ctx != this_ctx) {
if (this_ctx) { if (this_ctx) {
trace_block_unplug(this_q, depth, from_schedule); trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx, blk_mq_sched_insert_requests(this_q, this_ctx,
&ctx_list, &ctx_list,
from_schedule); from_schedule);
...@@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
* on 'ctx_list'. Do those. * on 'ctx_list'. Do those.
*/ */
if (this_ctx) { if (this_ctx) {
trace_block_unplug(this_q, depth, from_schedule); trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list, blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
from_schedule); from_schedule);
} }
......
...@@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q) ...@@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
while (e->type->ops.sq.elevator_dispatch_fn(q, 1)) while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
; ;
if (q->nr_sorted && printed++ < 10) { if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
printk(KERN_ERR "%s: forced dispatching is broken " printk(KERN_ERR "%s: forced dispatching is broken "
"(nr_sorted=%u), please report this\n", "(nr_sorted=%u), please report this\n",
q->elevator->type->elevator_name, q->nr_sorted); q->elevator->type->elevator_name, q->nr_sorted);
......
...@@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info) ...@@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
list_del(&gnt_list_entry->node); list_del(&gnt_list_entry->node);
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
rinfo->persistent_gnts_c--; rinfo->persistent_gnts_c--;
__free_page(gnt_list_entry->page); gnt_list_entry->gref = GRANT_INVALID_REF;
kfree(gnt_list_entry); list_add_tail(&gnt_list_entry->node, &rinfo->grants);
} }
spin_unlock_irqrestore(&rinfo->ring_lock, flags); spin_unlock_irqrestore(&rinfo->ring_lock, flags);
......
...@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca); ...@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq; extern struct workqueue_struct *bcache_wq;
extern struct workqueue_struct *bch_journal_wq;
extern struct mutex bch_register_lock; extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets; extern struct list_head bch_cache_sets;
......
...@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca) ...@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
closure_get(&ca->set->cl); closure_get(&ca->set->cl);
INIT_WORK(&ja->discard_work, journal_discard_work); INIT_WORK(&ja->discard_work, journal_discard_work);
schedule_work(&ja->discard_work); queue_work(bch_journal_wq, &ja->discard_work);
} }
} }
...@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl) ...@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
: &j->w[0]; : &j->w[0];
__closure_wake_up(&w->wait); __closure_wake_up(&w->wait);
continue_at_nobarrier(cl, journal_write, system_wq); continue_at_nobarrier(cl, journal_write, bch_journal_wq);
} }
static void journal_write_unlock(struct closure *cl) static void journal_write_unlock(struct closure *cl)
...@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl) ...@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
spin_unlock(&c->journal.lock); spin_unlock(&c->journal.lock);
btree_flush_write(c); btree_flush_write(c);
continue_at(cl, journal_write, system_wq); continue_at(cl, journal_write, bch_journal_wq);
return; return;
} }
......
...@@ -47,6 +47,7 @@ static int bcache_major; ...@@ -47,6 +47,7 @@ static int bcache_major;
static DEFINE_IDA(bcache_device_idx); static DEFINE_IDA(bcache_device_idx);
static wait_queue_head_t unregister_wait; static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq; struct workqueue_struct *bcache_wq;
struct workqueue_struct *bch_journal_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
/* limitation of partitions number on single bcache device */ /* limitation of partitions number on single bcache device */
...@@ -2341,6 +2342,9 @@ static void bcache_exit(void) ...@@ -2341,6 +2342,9 @@ static void bcache_exit(void)
kobject_put(bcache_kobj); kobject_put(bcache_kobj);
if (bcache_wq) if (bcache_wq)
destroy_workqueue(bcache_wq); destroy_workqueue(bcache_wq);
if (bch_journal_wq)
destroy_workqueue(bch_journal_wq);
if (bcache_major) if (bcache_major)
unregister_blkdev(bcache_major, "bcache"); unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot); unregister_reboot_notifier(&reboot);
...@@ -2370,6 +2374,10 @@ static int __init bcache_init(void) ...@@ -2370,6 +2374,10 @@ static int __init bcache_init(void)
if (!bcache_wq) if (!bcache_wq)
goto err; goto err;
bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
if (!bch_journal_wq)
goto err;
bcache_kobj = kobject_create_and_add("bcache", fs_kobj); bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
if (!bcache_kobj) if (!bcache_kobj)
goto err; goto err;
......
...@@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
INIT_WORK(&ctrl->ana_work, nvme_ana_work); INIT_WORK(&ctrl->ana_work, nvme_ana_work);
ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
if (!ctrl->ana_log_buf) if (!ctrl->ana_log_buf) {
error = -ENOMEM;
goto out; goto out;
}
error = nvme_read_ana_log(ctrl, true); error = nvme_read_ana_log(ctrl, true);
if (error) if (error)
...@@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) ...@@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
out_free_ana_log_buf: out_free_ana_log_buf:
kfree(ctrl->ana_log_buf); kfree(ctrl->ana_log_buf);
out: out:
return -ENOMEM; return error;
} }
void nvme_mpath_uninit(struct nvme_ctrl *ctrl) void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment