Commit fcd37eb3 authored by Jens Axboe's avatar Jens Axboe

skd: rip out bio path

The skd driver has a selectable rq or bio based queueing model.
For 3.14, we want to turn this into a single blk-mq interface
instead. With the immutable biovecs being merged in 3.13, the
bio model would need patches to even work. So rip it out, with
a conversion pending for blk-mq in the next release.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1762b57f
...@@ -354,13 +354,7 @@ struct skd_device { ...@@ -354,13 +354,7 @@ struct skd_device {
u32 timo_slot; u32 timo_slot;
struct work_struct completion_worker; struct work_struct completion_worker;
struct bio_list bio_queue;
int queue_stopped;
struct list_head flush_list;
}; };
#define SKD_FLUSH_JOB "skd-flush-jobs" #define SKD_FLUSH_JOB "skd-flush-jobs"
...@@ -470,11 +464,6 @@ MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)"); ...@@ -470,11 +464,6 @@ MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
module_param(skd_isr_comp_limit, int, 0444); module_param(skd_isr_comp_limit, int, 0444);
MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4"); MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
static int skd_bio;
module_param(skd_bio, int, 0444);
MODULE_PARM_DESC(skd_bio,
"Register as a bio device instead of block (0, 1) default=0");
/* Major device number dynamically assigned. */ /* Major device number dynamically assigned. */
static u32 skd_major; static u32 skd_major;
...@@ -512,11 +501,6 @@ static void skd_log_skmsg(struct skd_device *skdev, ...@@ -512,11 +501,6 @@ static void skd_log_skmsg(struct skd_device *skdev,
static void skd_log_skreq(struct skd_device *skdev, static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event); struct skd_request_context *skreq, const char *event);
/* FLUSH FUA flag handling. */
static int skd_flush_cmd_enqueue(struct skd_device *, void *);
static void *skd_flush_cmd_dequeue(struct skd_device *);
/* /*
***************************************************************************** *****************************************************************************
* READ/WRITE REQUESTS * READ/WRITE REQUESTS
...@@ -524,40 +508,25 @@ static void *skd_flush_cmd_dequeue(struct skd_device *); ...@@ -524,40 +508,25 @@ static void *skd_flush_cmd_dequeue(struct skd_device *);
*/ */
static void skd_stop_queue(struct skd_device *skdev) static void skd_stop_queue(struct skd_device *skdev)
{ {
if (!skd_bio)
blk_stop_queue(skdev->queue); blk_stop_queue(skdev->queue);
else
skdev->queue_stopped = 1;
} }
static void skd_unstop_queue(struct skd_device *skdev) static void skd_unstop_queue(struct skd_device *skdev)
{ {
if (!skd_bio)
queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue); queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
else
skdev->queue_stopped = 0;
} }
static void skd_start_queue(struct skd_device *skdev) static void skd_start_queue(struct skd_device *skdev)
{ {
if (!skd_bio) {
blk_start_queue(skdev->queue); blk_start_queue(skdev->queue);
} else {
pr_err("(%s): Starting queue\n", skd_name(skdev));
skdev->queue_stopped = 0;
skd_request_fn(skdev->queue);
}
} }
static int skd_queue_stopped(struct skd_device *skdev) static int skd_queue_stopped(struct skd_device *skdev)
{ {
if (!skd_bio)
return blk_queue_stopped(skdev->queue); return blk_queue_stopped(skdev->queue);
else
return skdev->queue_stopped;
} }
static void skd_fail_all_pending_blk(struct skd_device *skdev) static void skd_fail_all_pending(struct skd_device *skdev)
{ {
struct request_queue *q = skdev->queue; struct request_queue *q = skdev->queue;
struct request *req; struct request *req;
...@@ -571,42 +540,6 @@ static void skd_fail_all_pending_blk(struct skd_device *skdev) ...@@ -571,42 +540,6 @@ static void skd_fail_all_pending_blk(struct skd_device *skdev)
} }
} }
static void skd_fail_all_pending_bio(struct skd_device *skdev)
{
struct bio *bio;
int error = -EIO;
for (;; ) {
bio = bio_list_pop(&skdev->bio_queue);
if (bio == NULL)
break;
bio_endio(bio, error);
}
}
static void skd_fail_all_pending(struct skd_device *skdev)
{
if (!skd_bio)
skd_fail_all_pending_blk(skdev);
else
skd_fail_all_pending_bio(skdev);
}
static void skd_make_request(struct request_queue *q, struct bio *bio)
{
struct skd_device *skdev = q->queuedata;
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
bio_list_add(&skdev->bio_queue, bio);
skd_request_fn(skdev->queue);
spin_unlock_irqrestore(&skdev->lock, flags);
}
static void static void
skd_prep_rw_cdb(struct skd_scsi_request *scsi_req, skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
int data_dir, unsigned lba, int data_dir, unsigned lba,
...@@ -667,18 +600,9 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, ...@@ -667,18 +600,9 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
put_unaligned_be64(lba, &buf[8]); put_unaligned_be64(lba, &buf[8]);
put_unaligned_be32(count, &buf[16]); put_unaligned_be32(count, &buf[16]);
if (!skd_bio) {
req = skreq->req; req = skreq->req;
blk_add_request_payload(req, page, len); blk_add_request_payload(req, page, len);
req->buffer = buf; req->buffer = buf;
} else {
skreq->bio->bi_io_vec->bv_page = page;
skreq->bio->bi_io_vec->bv_offset = 0;
skreq->bio->bi_io_vec->bv_len = len;
skreq->bio->bi_vcnt = 1;
skreq->bio->bi_phys_segments = 1;
}
} }
static void skd_request_fn_not_online(struct request_queue *q); static void skd_request_fn_not_online(struct request_queue *q);
...@@ -690,7 +614,6 @@ static void skd_request_fn(struct request_queue *q) ...@@ -690,7 +614,6 @@ static void skd_request_fn(struct request_queue *q)
struct fit_msg_hdr *fmh = NULL; struct fit_msg_hdr *fmh = NULL;
struct skd_request_context *skreq; struct skd_request_context *skreq;
struct request *req = NULL; struct request *req = NULL;
struct bio *bio = NULL;
struct skd_scsi_request *scsi_req; struct skd_scsi_request *scsi_req;
struct page *page; struct page *page;
unsigned long io_flags; unsigned long io_flags;
...@@ -732,7 +655,6 @@ static void skd_request_fn(struct request_queue *q) ...@@ -732,7 +655,6 @@ static void skd_request_fn(struct request_queue *q)
flush = fua = 0; flush = fua = 0;
if (!skd_bio) {
req = blk_peek_request(q); req = blk_peek_request(q);
/* Are there any native requests to start? */ /* Are there any native requests to start? */
...@@ -754,38 +676,6 @@ static void skd_request_fn(struct request_queue *q) ...@@ -754,38 +676,6 @@ static void skd_request_fn(struct request_queue *q)
"count=%u(0x%x) dir=%d\n", "count=%u(0x%x) dir=%d\n",
skdev->name, __func__, __LINE__, skdev->name, __func__, __LINE__,
req, lba, lba, count, count, data_dir); req, lba, lba, count, count, data_dir);
} else {
if (!list_empty(&skdev->flush_list)) {
/* Process data part of FLUSH request. */
bio = (struct bio *)skd_flush_cmd_dequeue(skdev);
flush++;
pr_debug("%s:%s:%d processing FLUSH request with data.\n",
skdev->name, __func__, __LINE__);
} else {
/* peek at our bio queue */
bio = bio_list_peek(&skdev->bio_queue);
}
/* Are there any native requests to start? */
if (bio == NULL)
break;
lba = (u32)bio->bi_sector;
count = bio_sectors(bio);
data_dir = bio_data_dir(bio);
io_flags = bio->bi_rw;
pr_debug("%s:%s:%d new bio=%p lba=%u(0x%x) "
"count=%u(0x%x) dir=%d\n",
skdev->name, __func__, __LINE__,
bio, lba, lba, count, count, data_dir);
if (io_flags & REQ_FLUSH)
flush++;
if (io_flags & REQ_FUA)
fua++;
}
/* At this point we know there is a request /* At this point we know there is a request
* (from our bio q or req q depending on the way * (from our bio q or req q depending on the way
...@@ -831,23 +721,9 @@ static void skd_request_fn(struct request_queue *q) ...@@ -831,23 +721,9 @@ static void skd_request_fn(struct request_queue *q)
* the native request. Note that skd_request_context is * the native request. Note that skd_request_context is
* available but is still at the head of the free list. * available but is still at the head of the free list.
*/ */
if (!skd_bio) {
blk_start_request(req); blk_start_request(req);
skreq->req = req; skreq->req = req;
skreq->fitmsg_id = 0; skreq->fitmsg_id = 0;
} else {
if (unlikely(flush == SKD_FLUSH_DATA_SECOND)) {
skreq->bio = bio;
} else {
skreq->bio = bio_list_pop(&skdev->bio_queue);
SKD_ASSERT(skreq->bio == bio);
skreq->start_time = jiffies;
part_inc_in_flight(&skdev->disk->part0,
bio_data_dir(bio));
}
skreq->fitmsg_id = 0;
}
/* Either a FIT msg is in progress or we have to start one. */ /* Either a FIT msg is in progress or we have to start one. */
if (skmsg == NULL) { if (skmsg == NULL) {
...@@ -923,8 +799,7 @@ static void skd_request_fn(struct request_queue *q) ...@@ -923,8 +799,7 @@ static void skd_request_fn(struct request_queue *q)
if (fua) if (fua)
scsi_req->cdb[1] |= SKD_FUA_NV; scsi_req->cdb[1] |= SKD_FUA_NV;
if ((!skd_bio && !req->bio) || if (!req->bio)
(skd_bio && flush == SKD_FLUSH_ZERO_SIZE_FIRST))
goto skip_sg; goto skip_sg;
error = skd_preop_sg_list(skdev, skreq); error = skd_preop_sg_list(skdev, skreq);
...@@ -1011,8 +886,7 @@ static void skd_request_fn(struct request_queue *q) ...@@ -1011,8 +886,7 @@ static void skd_request_fn(struct request_queue *q)
* If req is non-NULL it means there is something to do but * If req is non-NULL it means there is something to do but
* we are out of a resource. * we are out of a resource.
*/ */
if (((!skd_bio) && req) || if (req)
((skd_bio) && bio_list_peek(&skdev->bio_queue)))
skd_stop_queue(skdev); skd_stop_queue(skdev);
} }
...@@ -1045,7 +919,7 @@ static void skd_end_request_blk(struct skd_device *skdev, ...@@ -1045,7 +919,7 @@ static void skd_end_request_blk(struct skd_device *skdev,
__blk_end_request_all(skreq->req, error); __blk_end_request_all(skreq->req, error);
} }
static int skd_preop_sg_list_blk(struct skd_device *skdev, static int skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq) struct skd_request_context *skreq)
{ {
struct request *req = skreq->req; struct request *req = skreq->req;
...@@ -1108,7 +982,7 @@ static int skd_preop_sg_list_blk(struct skd_device *skdev, ...@@ -1108,7 +982,7 @@ static int skd_preop_sg_list_blk(struct skd_device *skdev,
return 0; return 0;
} }
static void skd_postop_sg_list_blk(struct skd_device *skdev, static void skd_postop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq) struct skd_request_context *skreq)
{ {
int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD; int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
...@@ -1124,184 +998,10 @@ static void skd_postop_sg_list_blk(struct skd_device *skdev, ...@@ -1124,184 +998,10 @@ static void skd_postop_sg_list_blk(struct skd_device *skdev,
pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir); pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
} }
static void skd_end_request_bio(struct skd_device *skdev,
struct skd_request_context *skreq, int error)
{
struct bio *bio = skreq->bio;
int rw = bio_data_dir(bio);
unsigned long io_flags = bio->bi_rw;
if ((io_flags & REQ_DISCARD) &&
(skreq->discard_page == 1)) {
pr_debug("%s:%s:%d biomode: skd_end_request: freeing DISCARD page.\n",
skdev->name, __func__, __LINE__);
free_page((unsigned long)page_address(bio->bi_io_vec->bv_page));
}
if (unlikely(error)) {
u32 lba = (u32)skreq->bio->bi_sector;
u32 count = bio_sectors(skreq->bio);
char *cmd = (rw == WRITE) ? "write" : "read";
pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
skd_name(skdev), cmd, lba, count, skreq->id);
}
{
int cpu = part_stat_lock();
if (likely(!error)) {
part_stat_inc(cpu, &skdev->disk->part0, ios[rw]);
part_stat_add(cpu, &skdev->disk->part0, sectors[rw],
bio_sectors(bio));
}
part_stat_add(cpu, &skdev->disk->part0, ticks[rw],
jiffies - skreq->start_time);
part_dec_in_flight(&skdev->disk->part0, rw);
part_stat_unlock();
}
pr_debug("%s:%s:%d id=0x%x error=%d\n",
skdev->name, __func__, __LINE__, skreq->id, error);
bio_endio(skreq->bio, error);
}
static int skd_preop_sg_list_bio(struct skd_device *skdev,
struct skd_request_context *skreq)
{
struct bio *bio = skreq->bio;
int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
int n_sg;
int i;
struct bio_vec *vec;
struct fit_sg_descriptor *sgd;
u64 dma_addr;
u32 count;
int errs = 0;
unsigned int io_flags = 0;
io_flags |= bio->bi_rw;
skreq->sg_byte_count = 0;
n_sg = skreq->n_sg = skreq->bio->bi_vcnt;
if (n_sg <= 0)
return -EINVAL;
if (n_sg > skdev->sgs_per_request) {
pr_err("(%s): sg overflow n=%d\n",
skd_name(skdev), n_sg);
skreq->n_sg = 0;
return -EIO;
}
for (i = 0; i < skreq->n_sg; i++) {
vec = bio_iovec_idx(bio, i);
dma_addr = pci_map_page(skdev->pdev,
vec->bv_page,
vec->bv_offset, vec->bv_len, pci_dir);
count = vec->bv_len;
if (count == 0 || count > 64u * 1024u || (count & 3) != 0
|| (dma_addr & 3) != 0) {
pr_err(
"(%s): Bad sg ix=%d count=%d addr=0x%llx\n",
skd_name(skdev), i, count, dma_addr);
errs++;
}
sgd = &skreq->sksg_list[i];
sgd->control = FIT_SGD_CONTROL_NOT_LAST;
sgd->byte_count = vec->bv_len;
skreq->sg_byte_count += vec->bv_len;
sgd->host_side_addr = dma_addr;
sgd->dev_side_addr = 0; /* not used */
}
skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
if (!(io_flags & REQ_DISCARD)) {
count = bio_sectors(bio) << 9u;
if (count != skreq->sg_byte_count) {
pr_err("(%s): mismatch count sg=%d req=%d\n",
skd_name(skdev), skreq->sg_byte_count, count);
errs++;
}
}
if (unlikely(skdev->dbg_level > 1)) {
pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
skdev->name, __func__, __LINE__,
skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
for (i = 0; i < n_sg; i++) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
"addr=0x%llx next=0x%llx\n",
skdev->name, __func__, __LINE__,
i, sgd->byte_count, sgd->control,
sgd->host_side_addr, sgd->next_desc_ptr);
}
}
if (errs != 0) {
skd_postop_sg_list(skdev, skreq);
skreq->n_sg = 0;
return -EIO;
}
return 0;
}
static int skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq)
{
if (!skd_bio)
return skd_preop_sg_list_blk(skdev, skreq);
else
return skd_preop_sg_list_bio(skdev, skreq);
}
static void skd_postop_sg_list_bio(struct skd_device *skdev,
struct skd_request_context *skreq)
{
int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
int i;
struct fit_sg_descriptor *sgd;
/*
* restore the next ptr for next IO request so we
* don't have to set it every time.
*/
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
skreq->sksg_dma_address +
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
for (i = 0; i < skreq->n_sg; i++) {
sgd = &skreq->sksg_list[i];
pci_unmap_page(skdev->pdev, sgd->host_side_addr,
sgd->byte_count, pci_dir);
}
}
static void skd_postop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq)
{
if (!skd_bio)
skd_postop_sg_list_blk(skdev, skreq);
else
skd_postop_sg_list_bio(skdev, skreq);
}
static void skd_end_request(struct skd_device *skdev, static void skd_end_request(struct skd_device *skdev,
struct skd_request_context *skreq, int error) struct skd_request_context *skreq, int error)
{ {
if (likely(!skd_bio))
skd_end_request_blk(skdev, skreq, error); skd_end_request_blk(skdev, skreq, error);
else
skd_end_request_bio(skdev, skreq, error);
} }
static void skd_request_fn_not_online(struct request_queue *q) static void skd_request_fn_not_online(struct request_queue *q)
...@@ -2754,14 +2454,11 @@ static void skd_resolve_req_exception(struct skd_device *skdev, ...@@ -2754,14 +2454,11 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
break; break;
case SKD_CHECK_STATUS_REQUEUE_REQUEST: case SKD_CHECK_STATUS_REQUEUE_REQUEST:
if (!skd_bio) { if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
if ((unsigned long) ++skreq->req->special <
SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry"); skd_log_skreq(skdev, skreq, "retry");
skd_requeue_request(skdev, skreq); skd_requeue_request(skdev, skreq);
break; break;
} }
}
/* fall through to report error */ /* fall through to report error */
case SKD_CHECK_STATUS_REPORT_ERROR: case SKD_CHECK_STATUS_REPORT_ERROR:
...@@ -2774,12 +2471,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev, ...@@ -2774,12 +2471,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
static void skd_requeue_request(struct skd_device *skdev, static void skd_requeue_request(struct skd_device *skdev,
struct skd_request_context *skreq) struct skd_request_context *skreq)
{ {
if (!skd_bio) {
blk_requeue_request(skdev->queue, skreq->req); blk_requeue_request(skdev->queue, skreq->req);
} else {
bio_list_add_head(&skdev->bio_queue, skreq->bio);
skreq->bio = NULL;
}
} }
...@@ -2840,11 +2532,7 @@ static void skd_release_skreq(struct skd_device *skdev, ...@@ -2840,11 +2532,7 @@ static void skd_release_skreq(struct skd_device *skdev,
/* /*
* Reset backpointer * Reset backpointer
*/ */
if (likely(!skd_bio))
skreq->req = NULL; skreq->req = NULL;
else
skreq->bio = NULL;
/* /*
* Reclaim the skd_request_context * Reclaim the skd_request_context
...@@ -3084,8 +2772,6 @@ static int skd_isr_completion_posted(struct skd_device *skdev, ...@@ -3084,8 +2772,6 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
u32 cmp_bytes = 0; u32 cmp_bytes = 0;
int rc = 0; int rc = 0;
int processed = 0; int processed = 0;
int ret;
for (;; ) { for (;; ) {
SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY); SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
...@@ -3180,8 +2866,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev, ...@@ -3180,8 +2866,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
if (skreq->n_sg > 0) if (skreq->n_sg > 0)
skd_postop_sg_list(skdev, skreq); skd_postop_sg_list(skdev, skreq);
if (((!skd_bio) && !skreq->req) || if (!skreq->req) {
((skd_bio) && !skreq->bio)) {
pr_debug("%s:%s:%d NULL backptr skdreq %p, " pr_debug("%s:%s:%d NULL backptr skdreq %p, "
"req=0x%x req_id=0x%x\n", "req=0x%x req_id=0x%x\n",
skdev->name, __func__, __LINE__, skdev->name, __func__, __LINE__,
...@@ -3191,31 +2876,11 @@ static int skd_isr_completion_posted(struct skd_device *skdev, ...@@ -3191,31 +2876,11 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
* Capture the outcome and post it back to the * Capture the outcome and post it back to the
* native request. * native request.
*/ */
if (likely(cmp_status == SAM_STAT_GOOD)) { if (likely(cmp_status == SAM_STAT_GOOD))
if (unlikely(skreq->flush_cmd)) {
if (skd_bio) {
/* if empty size bio, we are all done */
if (bio_sectors(skreq->bio) == 0) {
skd_end_request(skdev, skreq, 0); skd_end_request(skdev, skreq, 0);
} else { else
ret = skd_flush_cmd_enqueue(skdev, (void *)skreq->bio);
if (ret != 0) {
pr_err("Failed to enqueue flush bio with Data. Err=%d.\n", ret);
skd_end_request(skdev, skreq, ret);
} else {
((*enqueued)++);
}
}
} else {
skd_end_request(skdev, skreq, 0);
}
} else {
skd_end_request(skdev, skreq, 0);
}
} else {
skd_resolve_req_exception(skdev, skreq); skd_resolve_req_exception(skdev, skreq);
} }
}
/* /*
* Release the skreq, its FIT msg (if one), timeout slot, * Release the skreq, its FIT msg (if one), timeout slot,
...@@ -3645,29 +3310,20 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue) ...@@ -3645,29 +3310,20 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue)
skd_log_skreq(skdev, skreq, "recover"); skd_log_skreq(skdev, skreq, "recover");
SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0); SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
if (!skd_bio)
SKD_ASSERT(skreq->req != NULL); SKD_ASSERT(skreq->req != NULL);
else
SKD_ASSERT(skreq->bio != NULL);
/* Release DMA resources for the request. */ /* Release DMA resources for the request. */
if (skreq->n_sg > 0) if (skreq->n_sg > 0)
skd_postop_sg_list(skdev, skreq); skd_postop_sg_list(skdev, skreq);
if (!skd_bio) {
if (requeue && if (requeue &&
(unsigned long) ++skreq->req->special < (unsigned long) ++skreq->req->special <
SKD_MAX_RETRIES) SKD_MAX_RETRIES)
skd_requeue_request(skdev, skreq); skd_requeue_request(skdev, skreq);
else else
skd_end_request(skdev, skreq, -EIO); skd_end_request(skdev, skreq, -EIO);
} else
skd_end_request(skdev, skreq, -EIO);
if (!skd_bio)
skreq->req = NULL; skreq->req = NULL;
else
skreq->bio = NULL;
skreq->state = SKD_REQ_STATE_IDLE; skreq->state = SKD_REQ_STATE_IDLE;
skreq->id += SKD_ID_INCR; skreq->id += SKD_ID_INCR;
...@@ -4580,16 +4236,11 @@ static struct skd_device *skd_construct(struct pci_dev *pdev) ...@@ -4580,16 +4236,11 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
skdev->sgs_per_request = skd_sgs_per_request; skdev->sgs_per_request = skd_sgs_per_request;
skdev->dbg_level = skd_dbg_level; skdev->dbg_level = skd_dbg_level;
if (skd_bio)
bio_list_init(&skdev->bio_queue);
atomic_set(&skdev->device_count, 0); atomic_set(&skdev->device_count, 0);
spin_lock_init(&skdev->lock); spin_lock_init(&skdev->lock);
INIT_WORK(&skdev->completion_worker, skd_completion_worker); INIT_WORK(&skdev->completion_worker, skd_completion_worker);
INIT_LIST_HEAD(&skdev->flush_list);
pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__); pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
rc = skd_cons_skcomp(skdev); rc = skd_cons_skcomp(skdev);
...@@ -4941,13 +4592,7 @@ static int skd_cons_disk(struct skd_device *skdev) ...@@ -4941,13 +4592,7 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->fops = &skd_blockdev_ops; disk->fops = &skd_blockdev_ops;
disk->private_data = skdev; disk->private_data = skdev;
if (!skd_bio) {
q = blk_init_queue(skd_request_fn, &skdev->lock); q = blk_init_queue(skd_request_fn, &skdev->lock);
} else {
q = blk_alloc_queue(GFP_KERNEL);
q->queue_flags = QUEUE_FLAG_IO_STAT | QUEUE_FLAG_STACKABLE;
}
if (!q) { if (!q) {
rc = -ENOMEM; rc = -ENOMEM;
goto err_out; goto err_out;
...@@ -4957,11 +4602,6 @@ static int skd_cons_disk(struct skd_device *skdev) ...@@ -4957,11 +4602,6 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->queue = q; disk->queue = q;
q->queuedata = skdev; q->queuedata = skdev;
if (skd_bio) {
q->queue_lock = &skdev->lock;
blk_queue_make_request(q, skd_make_request);
}
blk_queue_flush(q, REQ_FLUSH | REQ_FUA); blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
blk_queue_max_segments(q, skdev->sgs_per_request); blk_queue_max_segments(q, skdev->sgs_per_request);
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
...@@ -5794,7 +5434,6 @@ static void skd_log_skreq(struct skd_device *skdev, ...@@ -5794,7 +5434,6 @@ static void skd_log_skreq(struct skd_device *skdev,
skdev->name, __func__, __LINE__, skdev->name, __func__, __LINE__,
skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg); skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
if (!skd_bio) {
if (skreq->req != NULL) { if (skreq->req != NULL) {
struct request *req = skreq->req; struct request *req = skreq->req;
u32 lba = (u32)blk_rq_pos(req); u32 lba = (u32)blk_rq_pos(req);
...@@ -5808,21 +5447,6 @@ static void skd_log_skreq(struct skd_device *skdev, ...@@ -5808,21 +5447,6 @@ static void skd_log_skreq(struct skd_device *skdev,
} else } else
pr_debug("%s:%s:%d req=NULL\n", pr_debug("%s:%s:%d req=NULL\n",
skdev->name, __func__, __LINE__); skdev->name, __func__, __LINE__);
} else {
if (skreq->bio != NULL) {
struct bio *bio = skreq->bio;
u32 lba = (u32)bio->bi_sector;
u32 count = bio_sectors(bio);
pr_debug("%s:%s:%d "
"bio=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
skdev->name, __func__, __LINE__,
bio, lba, lba, count, count,
(int)bio_data_dir(bio));
} else
pr_debug("%s:%s:%d req=NULL\n",
skdev->name, __func__, __LINE__);
}
} }
/* /*
...@@ -5918,34 +5542,5 @@ static void __exit skd_exit(void) ...@@ -5918,34 +5542,5 @@ static void __exit skd_exit(void)
kmem_cache_destroy(skd_flush_slab); kmem_cache_destroy(skd_flush_slab);
} }
static int
skd_flush_cmd_enqueue(struct skd_device *skdev, void *cmd)
{
struct skd_flush_cmd *item;
item = kmem_cache_zalloc(skd_flush_slab, GFP_ATOMIC);
if (!item) {
pr_err("skd_flush_cmd_enqueue: Failed to allocated item.\n");
return -ENOMEM;
}
item->cmd = cmd;
list_add_tail(&item->flist, &skdev->flush_list);
return 0;
}
static void *
skd_flush_cmd_dequeue(struct skd_device *skdev)
{
void *cmd;
struct skd_flush_cmd *item;
item = list_entry(skdev->flush_list.next, struct skd_flush_cmd, flist);
list_del_init(&item->flist);
cmd = item->cmd;
kmem_cache_free(skd_flush_slab, item);
return cmd;
}
module_init(skd_init); module_init(skd_init);
module_exit(skd_exit); module_exit(skd_exit);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment