Commit 990f3200 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-6.1-2022-11-25' of git://git.kernel.dk/linux

Pull block fixes from Jens Axboe:

 - A few fixes for s390 sads (Stefan, Colin)

 - Ensure that ublk doesn't reorder requests, as that can be problematic
   on devices that need specific ordering (Ming)

 - Fix a queue reference leak in disk allocation handling (Christoph)

* tag 'block-6.1-2022-11-25' of git://git.kernel.dk/linux:
  ublk_drv: don't forward io commands in reserve order
  s390/dasd: fix possible buffer overflow in copy_pair_show
  s390/dasd: fix no record found for raw_track_access
  s390/dasd: increase printing of debug data payload
  s390/dasd: Fix spelling mistake "Ivalid" -> "Invalid"
  blk-mq: fix queue reference leak on blk_mq_alloc_disk_for_queue failure
parents 364eb618 7d4a9317
......@@ -4045,9 +4045,14 @@ EXPORT_SYMBOL(__blk_mq_alloc_disk);
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
struct lock_class_key *lkclass)
{
struct gendisk *disk;
if (!blk_get_queue(q))
return NULL;
return __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
if (!disk)
blk_put_queue(q);
return disk;
}
EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
......
......@@ -57,10 +57,8 @@
#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
struct ublk_rq_data {
union {
struct callback_head work;
struct llist_node node;
};
struct llist_node node;
struct callback_head work;
};
struct ublk_uring_cmd_pdu {
......@@ -766,15 +764,31 @@ static inline void __ublk_rq_task_work(struct request *req)
ubq_complete_io_cmd(io, UBLK_IO_RES_OK);
}
static inline void ublk_forward_io_cmds(struct ublk_queue *ubq)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;
io_cmds = llist_reverse_order(io_cmds);
llist_for_each_entry_safe(data, tmp, io_cmds, node)
__ublk_rq_task_work(blk_mq_rq_from_pdu(data));
}
static inline void ublk_abort_io_cmds(struct ublk_queue *ubq)
{
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data, *tmp;
llist_for_each_entry_safe(data, tmp, io_cmds, node)
__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
}
static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd)
{
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data;
llist_for_each_entry(data, io_cmds, node)
__ublk_rq_task_work(blk_mq_rq_from_pdu(data));
ublk_forward_io_cmds(ubq);
}
static void ublk_rq_task_work_fn(struct callback_head *work)
......@@ -782,14 +796,20 @@ static void ublk_rq_task_work_fn(struct callback_head *work)
struct ublk_rq_data *data = container_of(work,
struct ublk_rq_data, work);
struct request *req = blk_mq_rq_from_pdu(data);
struct ublk_queue *ubq = req->mq_hctx->driver_data;
__ublk_rq_task_work(req);
ublk_forward_io_cmds(ubq);
}
static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq)
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
{
struct ublk_io *io = &ubq->ios[rq->tag];
struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
struct ublk_io *io;
if (!llist_add(&data->node, &ubq->io_cmds))
return;
io = &ubq->ios[rq->tag];
/*
* If the check pass, we know that this is a re-issued request aborted
* previously in monitor_work because the ubq_daemon(cmd's task) is
......@@ -803,11 +823,11 @@ static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq)
* guarantees that here is a re-issued request aborted previously.
*/
if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) {
struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds);
struct ublk_rq_data *data;
llist_for_each_entry(data, io_cmds, node)
__ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data));
ublk_abort_io_cmds(ubq);
} else if (ublk_can_use_task_work(ubq)) {
if (task_work_add(ubq->ubq_daemon, &data->work,
TWA_SIGNAL_NO_IPI))
ublk_abort_io_cmds(ubq);
} else {
struct io_uring_cmd *cmd = io->cmd;
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
......@@ -817,23 +837,6 @@ static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq)
}
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq,
bool last)
{
struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq);
if (ublk_can_use_task_work(ubq)) {
enum task_work_notify_mode notify_mode = last ?
TWA_SIGNAL_NO_IPI : TWA_NONE;
if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
__ublk_abort_rq(ubq, rq);
} else {
if (llist_add(&data->node, &ubq->io_cmds))
ublk_submit_cmd(ubq, rq);
}
}
static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
......@@ -865,19 +868,11 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
ublk_queue_cmd(ubq, rq, bd->last);
ublk_queue_cmd(ubq, rq);
return BLK_STS_OK;
}
static void ublk_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
struct ublk_queue *ubq = hctx->driver_data;
if (ublk_can_use_task_work(ubq))
__set_notify_signal(ubq->ubq_daemon);
}
static int ublk_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
unsigned int hctx_idx)
{
......@@ -899,7 +894,6 @@ static int ublk_init_rq(struct blk_mq_tag_set *set, struct request *req,
static const struct blk_mq_ops ublk_mq_ops = {
.queue_rq = ublk_queue_rq,
.commit_rqs = ublk_commit_rqs,
.init_hctx = ublk_init_hctx,
.init_request = ublk_init_rq,
};
......@@ -1197,7 +1191,7 @@ static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id,
struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
ublk_queue_cmd(ubq, req, true);
ublk_queue_cmd(ubq, req);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
......
......@@ -1954,7 +1954,7 @@ dasd_copy_pair_show(struct device *dev,
break;
}
}
if (!copy->entry[i].primary)
if (i == DASD_CP_ENTRIES)
goto out;
/* print all secondary */
......
......@@ -4722,7 +4722,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
struct dasd_device *basedev;
struct req_iterator iter;
struct dasd_ccw_req *cqr;
unsigned int first_offs;
unsigned int trkcount;
unsigned long *idaws;
unsigned int size;
......@@ -4756,7 +4755,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
DASD_RAW_SECTORS_PER_TRACK;
trkcount = last_trk - first_trk + 1;
first_offs = 0;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK;
......@@ -4800,13 +4798,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
if (use_prefix) {
prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
startdev, 1, first_offs + 1, trkcount, 0, 0);
startdev, 1, 0, trkcount, 0, 0);
} else {
define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
ccw[-1].flags |= CCW_FLAG_CC;
data += sizeof(struct DE_eckd_data);
locate_record_ext(ccw++, data, first_trk, first_offs + 1,
locate_record_ext(ccw++, data, first_trk, 0,
trkcount, cmd, basedev, 0, 0);
}
......@@ -5500,7 +5498,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
* Dump the range of CCWs into 'page' buffer
* and return number of printed chars.
*/
static int
static void
dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
{
int len, count;
......@@ -5518,16 +5516,21 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
else
datap = (char *) ((addr_t) from->cda);
/* dump data (max 32 bytes) */
for (count = 0; count < from->count && count < 32; count++) {
if (count % 8 == 0) len += sprintf(page + len, " ");
if (count % 4 == 0) len += sprintf(page + len, " ");
/* dump data (max 128 bytes) */
for (count = 0; count < from->count && count < 128; count++) {
if (count % 32 == 0)
len += sprintf(page + len, "\n");
if (count % 8 == 0)
len += sprintf(page + len, " ");
if (count % 4 == 0)
len += sprintf(page + len, " ");
len += sprintf(page + len, "%02x", datap[count]);
}
len += sprintf(page + len, "\n");
from++;
}
return len;
if (len > 0)
printk(KERN_ERR "%s", page);
}
static void
......@@ -5619,37 +5622,33 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
if (req) {
/* req == NULL for unsolicited interrupts */
/* dump the Channel Program (max 140 Bytes per line) */
/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
/* Count CCW and print first CCWs (maximum 7) */
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
len = sprintf(page, PRINTK_HEADER
" Related CP in req: %p\n", req);
dasd_eckd_dump_ccw_range(first, to, page + len);
printk(KERN_ERR "%s", page);
printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req);
dasd_eckd_dump_ccw_range(first, to, page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
len = 0;
from = ++to;
fail = (struct ccw1 *)(addr_t)
irb->scsw.cmd.cpa; /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
len += sprintf(page, PRINTK_HEADER "......\n");
printk(KERN_ERR PRINTK_HEADER "......\n");
}
to = min(fail + 1, last);
len += dasd_eckd_dump_ccw_range(from, to, page + len);
dasd_eckd_dump_ccw_range(from, to, page + len);
/* print last CCWs (maximum 2) */
len = 0;
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
len += sprintf(page + len, PRINTK_HEADER "......\n");
printk(KERN_ERR PRINTK_HEADER "......\n");
}
len += dasd_eckd_dump_ccw_range(from, last, page + len);
if (len > 0)
printk(KERN_ERR "%s", page);
dasd_eckd_dump_ccw_range(from, last, page + len);
}
free_page((unsigned long) page);
}
......
......@@ -401,7 +401,7 @@ dasd_ioctl_copy_pair_swap(struct block_device *bdev, void __user *argp)
return -EFAULT;
}
if (memchr_inv(data.reserved, 0, sizeof(data.reserved))) {
pr_warn("%s: Ivalid swap data specified.\n",
pr_warn("%s: Invalid swap data specified\n",
dev_name(&device->cdev->dev));
dasd_put_device(device);
return DASD_COPYPAIRSWAP_INVALID;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment