Commit 6890ad4b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Final block fixes for 3.16

  Four small fixes that should go into 3.16, have been queued up for a
  bit and delayed due to vacation and other euro duties.  But here they
  are.  The pull request contains:

   - Fix for a reported crash with shared tagging on SCSI from Christoph

   - A regression fix for drbd.  From Lars Ellenberg.

   - Hooking up the compat ioctl for BLKZEROOUT, which requires no
     translation.  From Mikulas.

- A fix for a regression where we woud crash on queue exit if the
  root_blkg is gone/not there. From Tejun"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: provide compat ioctl for BLKZEROOUT
  blkcg: don't call into policy draining if root_blkg is already gone
  drbd: fix regression 'out of mem, failed to invoke fence-peer helper'
  block: don't assume last put of shared tags is for the host
parents d6e6c48e 3b3a1814
...@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q) ...@@ -872,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
{ {
lockdep_assert_held(q->queue_lock); lockdep_assert_held(q->queue_lock);
/*
* @q could be exiting and already have destroyed all blkgs as
* indicated by NULL root_blkg. If so, don't confuse policies.
*/
if (!q->root_blkg)
return;
blk_throtl_drain(q); blk_throtl_drain(q);
} }
......
...@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag) ...@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
EXPORT_SYMBOL(blk_queue_find_tag); EXPORT_SYMBOL(blk_queue_find_tag);
/** /**
* __blk_free_tags - release a given set of tag maintenance info * blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free * @bqt: the tag map to free
* *
* Tries to free the specified @bqt. Returns true if it was * Drop the reference count on @bqt and frees it when the last reference
* actually freed and false if there are still references using it * is dropped.
*/ */
static int __blk_free_tags(struct blk_queue_tag *bqt) void blk_free_tags(struct blk_queue_tag *bqt)
{ {
int retval; if (atomic_dec_and_test(&bqt->refcnt)) {
retval = atomic_dec_and_test(&bqt->refcnt);
if (retval) {
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
bqt->max_depth); bqt->max_depth);
...@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) ...@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
kfree(bqt); kfree(bqt);
} }
return retval;
} }
EXPORT_SYMBOL(blk_free_tags);
/** /**
* __blk_queue_free_tags - release tag maintenance info * __blk_queue_free_tags - release tag maintenance info
...@@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q) ...@@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q)
if (!bqt) if (!bqt)
return; return;
__blk_free_tags(bqt); blk_free_tags(bqt);
q->queue_tags = NULL; q->queue_tags = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
} }
/**
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* For externally managed @bqt frees the map. Callers of this
* function must guarantee to have released all the queues that
* might have been using this tag map.
*/
void blk_free_tags(struct blk_queue_tag *bqt)
{
if (unlikely(!__blk_free_tags(bqt)))
BUG();
}
EXPORT_SYMBOL(blk_free_tags);
/** /**
* blk_queue_free_tags - release tag maintenance info * blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device * @q: the request queue for the device
......
...@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case BLKROSET: case BLKROSET:
case BLKDISCARD: case BLKDISCARD:
case BLKSECDISCARD: case BLKSECDISCARD:
case BLKZEROOUT:
/* /*
* the ones below are implemented in blkdev_locked_ioctl, * the ones below are implemented in blkdev_locked_ioctl,
* but we call blkdev_ioctl, which gets the lock for us * but we call blkdev_ioctl, which gets the lock for us
......
...@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection) ...@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
struct task_struct *opa; struct task_struct *opa;
kref_get(&connection->kref); kref_get(&connection->kref);
/* We may just have force_sig()'ed this thread
* to get it out of some blocking network function.
* Clear signals; otherwise kthread_run(), which internally uses
* wait_on_completion_killable(), will mistake our pending signal
* for a new fatal signal and fail. */
flush_signals(current);
opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h"); opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) { if (IS_ERR(opa)) {
drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n"); drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment