Commit cf26a236 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.17-2022-02-11' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - NVMe pull request
      - nvme-tcp: fix bogus request completion when failing to send AER
        (Sagi Grimberg)
      - add the missing nvme_complete_req tracepoint for batched
        completion (Bean Huo)

 - Revert of the loop async autoclear issue that has continued to plague
   us this release. A few patchsets exists to improve this, but they are
   too invasive to be considered at this point (Tetsuo)

* tag 'block-5.17-2022-02-11' of git://git.kernel.dk/linux-block:
  loop: revert "make autoclear operation asynchronous"
  nvme-tcp: fix bogus request completion when failing to send AER
  nvme: add nvme_complete_req tracepoint for batched completion
parents 199b7f84 bf23747e
...@@ -1082,7 +1082,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, ...@@ -1082,7 +1082,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
return error; return error;
} }
static void __loop_clr_fd(struct loop_device *lo) static void __loop_clr_fd(struct loop_device *lo, bool release)
{ {
struct file *filp; struct file *filp;
gfp_t gfp = lo->old_gfp_mask; gfp_t gfp = lo->old_gfp_mask;
...@@ -1144,6 +1144,8 @@ static void __loop_clr_fd(struct loop_device *lo) ...@@ -1144,6 +1144,8 @@ static void __loop_clr_fd(struct loop_device *lo)
/* let user-space know about this change */ /* let user-space know about this change */
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp); mapping_set_gfp_mask(filp->f_mapping, gfp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue); blk_mq_unfreeze_queue(lo->lo_queue);
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
...@@ -1151,8 +1153,18 @@ static void __loop_clr_fd(struct loop_device *lo) ...@@ -1151,8 +1153,18 @@ static void __loop_clr_fd(struct loop_device *lo)
if (lo->lo_flags & LO_FLAGS_PARTSCAN) { if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
int err; int err;
/*
* open_mutex has been held already in release path, so don't
* acquire it if this function is called in such case.
*
* If the reread partition isn't from release path, lo_refcnt
* must be at least one and it can only become zero when the
* current holder is released.
*/
if (!release)
mutex_lock(&lo->lo_disk->open_mutex); mutex_lock(&lo->lo_disk->open_mutex);
err = bdev_disk_changed(lo->lo_disk, false); err = bdev_disk_changed(lo->lo_disk, false);
if (!release)
mutex_unlock(&lo->lo_disk->open_mutex); mutex_unlock(&lo->lo_disk->open_mutex);
if (err) if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
...@@ -1160,43 +1172,25 @@ static void __loop_clr_fd(struct loop_device *lo) ...@@ -1160,43 +1172,25 @@ static void __loop_clr_fd(struct loop_device *lo)
/* Device is gone, no point in returning error */ /* Device is gone, no point in returning error */
} }
/*
* lo->lo_state is set to Lo_unbound here after above partscan has
* finished. There cannot be anybody else entering __loop_clr_fd() as
* Lo_rundown state protects us from all the other places trying to
* change the 'lo' device.
*/
lo->lo_flags = 0; lo->lo_flags = 0;
if (!part_shift) if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART; lo->lo_disk->flags |= GENHD_FL_NO_PART;
fput(filp);
}
static void loop_rundown_completed(struct loop_device *lo)
{
mutex_lock(&lo->lo_mutex); mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound; lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex); mutex_unlock(&lo->lo_mutex);
module_put(THIS_MODULE);
}
static void loop_rundown_workfn(struct work_struct *work)
{
struct loop_device *lo = container_of(work, struct loop_device,
rundown_work);
struct block_device *bdev = lo->lo_device;
struct gendisk *disk = lo->lo_disk;
__loop_clr_fd(lo);
kobject_put(&bdev->bd_device.kobj);
module_put(disk->fops->owner);
loop_rundown_completed(lo);
}
static void loop_schedule_rundown(struct loop_device *lo) /*
{ * Need not hold lo_mutex to fput backing file. Calling fput holding
struct block_device *bdev = lo->lo_device; * lo_mutex triggers a circular lock dependency possibility warning as
struct gendisk *disk = lo->lo_disk; * fput can take open_mutex which is usually taken before lo_mutex.
*/
__module_get(disk->fops->owner); fput(filp);
kobject_get(&bdev->bd_device.kobj);
INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
queue_work(system_long_wq, &lo->rundown_work);
} }
static int loop_clr_fd(struct loop_device *lo) static int loop_clr_fd(struct loop_device *lo)
...@@ -1228,8 +1222,7 @@ static int loop_clr_fd(struct loop_device *lo) ...@@ -1228,8 +1222,7 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_rundown; lo->lo_state = Lo_rundown;
mutex_unlock(&lo->lo_mutex); mutex_unlock(&lo->lo_mutex);
__loop_clr_fd(lo); __loop_clr_fd(lo, false);
loop_rundown_completed(lo);
return 0; return 0;
} }
...@@ -1754,7 +1747,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode) ...@@ -1754,7 +1747,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
* In autoclear mode, stop the loop thread * In autoclear mode, stop the loop thread
* and remove configuration after last close. * and remove configuration after last close.
*/ */
loop_schedule_rundown(lo); __loop_clr_fd(lo, true);
return; return;
} else if (lo->lo_state == Lo_bound) { } else if (lo->lo_state == Lo_bound) {
/* /*
......
...@@ -56,7 +56,6 @@ struct loop_device { ...@@ -56,7 +56,6 @@ struct loop_device {
struct gendisk *lo_disk; struct gendisk *lo_disk;
struct mutex lo_mutex; struct mutex lo_mutex;
bool idr_visible; bool idr_visible;
struct work_struct rundown_work;
}; };
struct loop_cmd { struct loop_cmd {
......
...@@ -368,6 +368,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq); ...@@ -368,6 +368,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
void nvme_complete_batch_req(struct request *req) void nvme_complete_batch_req(struct request *req)
{ {
trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req); nvme_cleanup_cmd(req);
nvme_end_req_zoned(req); nvme_end_req_zoned(req);
} }
......
...@@ -913,7 +913,15 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue) ...@@ -913,7 +913,15 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
static void nvme_tcp_fail_request(struct nvme_tcp_request *req) static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{ {
nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR); if (nvme_tcp_async_req(req)) {
union nvme_result res = {};
nvme_complete_async_event(&req->queue->ctrl->ctrl,
cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
} else {
nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
NVME_SC_HOST_PATH_ERROR);
}
} }
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment