Commit d8c66c5d authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: fix lockdep warning on io_context release put_io_context()

11a3122f "block: strip out locking optimization in put_io_context()"
removed ioc_lock depth lockdep annoation along with locking
optimization; however, while recursing from put_io_context() is no
longer possible, ioc_release_fn() may still end up putting the last
reference of another ioc through elevator, which wlil grab ioc->lock
triggering spurious (as the ioc is always different one) A-A deadlock
warning.

As this can only happen one time from ioc_release_fn(), using non-zero
subclass from ioc_release_fn() is enough.  Use subclass 1.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent f6302f1b
...@@ -80,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work) ...@@ -80,8 +80,15 @@ static void ioc_release_fn(struct work_struct *work)
struct io_context *ioc = container_of(work, struct io_context, struct io_context *ioc = container_of(work, struct io_context,
release_work); release_work);
struct request_queue *last_q = NULL; struct request_queue *last_q = NULL;
unsigned long flags;
spin_lock_irq(&ioc->lock); /*
* Exiting icq may call into put_io_context() through elevator
* which will trigger lockdep warning. The ioc's are guaranteed to
* be different, use a different locking subclass here. Use
* irqsave variant as there's no spin_lock_irq_nested().
*/
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
while (!hlist_empty(&ioc->icq_list)) { while (!hlist_empty(&ioc->icq_list)) {
struct io_cq *icq = hlist_entry(ioc->icq_list.first, struct io_cq *icq = hlist_entry(ioc->icq_list.first,
...@@ -103,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work) ...@@ -103,15 +110,15 @@ static void ioc_release_fn(struct work_struct *work)
*/ */
if (last_q) { if (last_q) {
spin_unlock(last_q->queue_lock); spin_unlock(last_q->queue_lock);
spin_unlock_irq(&ioc->lock); spin_unlock_irqrestore(&ioc->lock, flags);
blk_put_queue(last_q); blk_put_queue(last_q);
} else { } else {
spin_unlock_irq(&ioc->lock); spin_unlock_irqrestore(&ioc->lock, flags);
} }
last_q = this_q; last_q = this_q;
spin_lock_irq(this_q->queue_lock); spin_lock_irqsave(this_q->queue_lock, flags);
spin_lock(&ioc->lock); spin_lock_nested(&ioc->lock, 1);
continue; continue;
} }
ioc_exit_icq(icq); ioc_exit_icq(icq);
...@@ -119,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work) ...@@ -119,10 +126,10 @@ static void ioc_release_fn(struct work_struct *work)
if (last_q) { if (last_q) {
spin_unlock(last_q->queue_lock); spin_unlock(last_q->queue_lock);
spin_unlock_irq(&ioc->lock); spin_unlock_irqrestore(&ioc->lock, flags);
blk_put_queue(last_q); blk_put_queue(last_q);
} else { } else {
spin_unlock_irq(&ioc->lock); spin_unlock_irqrestore(&ioc->lock, flags);
} }
kmem_cache_free(iocontext_cachep, ioc); kmem_cache_free(iocontext_cachep, ioc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment