Commit 91f85da4 authored by Bart Van Assche's avatar Bart Van Assche Committed by Jens Axboe

skd: Introduce skd_process_request()

The only functional change in this patch is that the skd_fitmsg_context
in which requests are accumulated is changed from a local variable into
a member of struct skd_device. This patch will make the blk-mq conversion
easier.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@wdc.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 6fbb2de5
......@@ -232,6 +232,7 @@ struct skd_device {
spinlock_t lock;
struct gendisk *disk;
struct request_queue *queue;
struct skd_fitmsg_context *skmsg;
struct device *class_dev;
int gendisk_on;
int sync_done;
......@@ -492,56 +493,30 @@ static bool skd_fail_all(struct request_queue *q)
}
}
static void skd_request_fn(struct request_queue *q)
static void skd_process_request(struct request *req)
{
struct request_queue *const q = req->q;
struct skd_device *skdev = q->queuedata;
struct skd_fitmsg_context *skmsg = NULL;
struct fit_msg_hdr *fmh = NULL;
struct skd_request_context *skreq;
struct request *req = NULL;
struct skd_fitmsg_context *skmsg;
struct fit_msg_hdr *fmh;
const u32 tag = blk_mq_unique_tag(req);
struct skd_request_context *const skreq = &skdev->skreq_table[tag];
struct skd_scsi_request *scsi_req;
unsigned long io_flags;
u32 lba;
u32 count;
int data_dir;
__be64 be_dmaa;
u64 cmdctxt;
u32 timo_slot;
int flush, fua;
u32 tag;
if (skdev->state != SKD_DRVR_STATE_ONLINE) {
if (skd_fail_all(q))
skd_fail_all_pending(skdev);
return;
}
if (blk_queue_stopped(skdev->queue)) {
if (atomic_read(&skdev->in_flight) >=
skdev->queue_low_water_mark)
/* There is still some kind of shortage */
return;
queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
}
WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
tag, skd_max_queue_depth, q->nr_requests);
/*
* Stop conditions:
* - There are no more native requests
* - There are already the maximum number of requests in progress
* - There are no more skd_request_context entries
* - There are no more FIT msg buffers
*/
for (;; ) {
SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
flush = fua = 0;
req = blk_peek_request(q);
/* Are there any native requests to start? */
if (req == NULL)
break;
lba = (u32)blk_rq_pos(req);
count = blk_rq_sectors(req);
data_dir = rq_data_dir(req);
......@@ -554,37 +529,8 @@ static void skd_request_fn(struct request_queue *q)
fua++;
dev_dbg(&skdev->pdev->dev,
"new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
req, lba, lba, count, count, data_dir);
/* At this point we know there is a request */
/* Are too many requets already in progress? */
if (atomic_read(&skdev->in_flight) >=
skdev->cur_max_queue_depth) {
dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n",
atomic_read(&skdev->in_flight),
skdev->cur_max_queue_depth);
break;
}
/*
* OK to now dequeue request from q.
*
* At this point we are comitted to either start or reject
* the native request. Note that skd_request_context is
* available but is still at the head of the free list.
*/
WARN_ON_ONCE(blk_queue_start_tag(q, req));
tag = blk_mq_unique_tag(req);
WARN_ONCE(tag >= skd_max_queue_depth,
"%#x > %#x (nr_requests = %lu)\n", tag,
skd_max_queue_depth, q->nr_requests);
skreq = &skdev->skreq_table[tag];
SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
"new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
lba, count, count, data_dir);
skreq->id = tag + SKD_ID_RW_REQUEST;
skreq->flush_cmd = 0;
......@@ -594,36 +540,37 @@ static void skd_request_fn(struct request_queue *q)
skreq->req = req;
skreq->fitmsg_id = 0;
skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE :
DMA_TO_DEVICE;
skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
dev_dbg(&skdev->pdev->dev, "error Out\n");
skd_end_request(skdev, skreq->req, BLK_STS_RESOURCE);
continue;
return;
}
/* Either a FIT msg is in progress or we have to start one. */
if (skmsg == NULL) {
skmsg = skdev->skmsg;
if (!skmsg) {
skmsg = &skdev->skmsg_table[tag];
skdev->skmsg = skmsg;
/* Initialize the FIT msg header */
fmh = &skmsg->msg_buf->fmh;
memset(fmh, 0, sizeof(*fmh));
fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
skmsg->length = sizeof(*fmh);
} else {
fmh = &skmsg->msg_buf->fmh;
}
skreq->fitmsg_id = skmsg->id;
scsi_req =
&skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
memset(scsi_req, 0, sizeof(*scsi_req));
be_dmaa = cpu_to_be64(skreq->sksg_dma_address);
cmdctxt = skreq->id + SKD_ID_INCR;
scsi_req->hdr.tag = cmdctxt;
scsi_req->hdr.tag = skreq->id;
scsi_req->hdr.sg_list_dma_address = be_dmaa;
if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
......@@ -636,12 +583,10 @@ static void skd_request_fn(struct request_queue *q)
if (fua)
scsi_req->cdb[1] |= SKD_FUA_NV;
scsi_req->hdr.sg_list_len_bytes =
cpu_to_be32(skreq->sg_byte_count);
scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
/* Complete resource allocations. */
skreq->state = SKD_REQ_STATE_BUSY;
skreq->id += SKD_ID_INCR;
skmsg->length += sizeof(struct skd_scsi_request);
fmh->num_protocol_cmds_coalesced++;
......@@ -662,17 +607,73 @@ static void skd_request_fn(struct request_queue *q)
*/
if (fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
skd_send_fitmsg(skdev, skmsg);
skmsg = NULL;
fmh = NULL;
skdev->skmsg = NULL;
}
}
static void skd_request_fn(struct request_queue *q)
{
struct skd_device *skdev = q->queuedata;
struct request *req;
if (skdev->state != SKD_DRVR_STATE_ONLINE) {
if (skd_fail_all(q))
skd_fail_all_pending(skdev);
return;
}
if (blk_queue_stopped(skdev->queue)) {
if (atomic_read(&skdev->in_flight) >=
skdev->queue_low_water_mark)
/* There is still some kind of shortage */
return;
queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
}
/*
* Stop conditions:
* - There are no more native requests
* - There are already the maximum number of requests in progress
* - There are no more skd_request_context entries
* - There are no more FIT msg buffers
*/
for (;; ) {
req = blk_peek_request(q);
/* Are there any native requests to start? */
if (req == NULL)
break;
/* At this point we know there is a request */
/* Are too many requets already in progress? */
if (atomic_read(&skdev->in_flight) >=
skdev->cur_max_queue_depth) {
dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n",
atomic_read(&skdev->in_flight),
skdev->cur_max_queue_depth);
break;
}
/*
* OK to now dequeue request from q.
*
* At this point we are comitted to either start or reject
* the native request. Note that skd_request_context is
* available but is still at the head of the free list.
*/
WARN_ON_ONCE(blk_queue_start_tag(q, req));
skd_process_request(req);
}
/* If the FIT msg buffer is not empty send what we got. */
if (skmsg) {
if (skdev->skmsg) {
struct fit_msg_hdr *fmh = &skdev->skmsg->msg_buf->fmh;
WARN_ON_ONCE(!fmh->num_protocol_cmds_coalesced);
skd_send_fitmsg(skdev, skmsg);
skmsg = NULL;
fmh = NULL;
skd_send_fitmsg(skdev, skdev->skmsg);
skdev->skmsg = NULL;
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment