Commit 8622384f authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky

s390/scm_block: make the number of reqs per HW req configurable

Introduce a module parameter to specify the number of requests
we try to handle with one HW request.
Suggested-by: default avatarPeter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent bbc610a9
...@@ -25,10 +25,14 @@ static mempool_t *aidaw_pool; ...@@ -25,10 +25,14 @@ static mempool_t *aidaw_pool;
static DEFINE_SPINLOCK(list_lock); static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(inactive_requests); static LIST_HEAD(inactive_requests);
static unsigned int nr_requests = 64; static unsigned int nr_requests = 64;
static unsigned int nr_requests_per_io = 8;
static atomic_t nr_devices = ATOMIC_INIT(0); static atomic_t nr_devices = ATOMIC_INIT(0);
module_param(nr_requests, uint, S_IRUGO); module_param(nr_requests, uint, S_IRUGO);
MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
module_param(nr_requests_per_io, uint, S_IRUGO);
MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
MODULE_DESCRIPTION("Block driver for s390 storage class memory."); MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_ALIAS("scm:scmdev*"); MODULE_ALIAS("scm:scmdev*");
...@@ -39,6 +43,7 @@ static void __scm_free_rq(struct scm_request *scmrq) ...@@ -39,6 +43,7 @@ static void __scm_free_rq(struct scm_request *scmrq)
free_page((unsigned long) scmrq->aob); free_page((unsigned long) scmrq->aob);
__scm_free_rq_cluster(scmrq); __scm_free_rq_cluster(scmrq);
kfree(scmrq->request);
kfree(aobrq); kfree(aobrq);
} }
...@@ -69,15 +74,16 @@ static int __scm_alloc_rq(void) ...@@ -69,15 +74,16 @@ static int __scm_alloc_rq(void)
scmrq = (void *) aobrq->data; scmrq = (void *) aobrq->data;
scmrq->aob = (void *) get_zeroed_page(GFP_DMA); scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
if (!scmrq->aob) { if (!scmrq->aob)
__scm_free_rq(scmrq); goto free;
return -ENOMEM;
}
if (__scm_alloc_rq_cluster(scmrq)) { scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
__scm_free_rq(scmrq); GFP_KERNEL);
return -ENOMEM; if (!scmrq->request)
} goto free;
if (__scm_alloc_rq_cluster(scmrq))
goto free;
INIT_LIST_HEAD(&scmrq->list); INIT_LIST_HEAD(&scmrq->list);
spin_lock_irq(&list_lock); spin_lock_irq(&list_lock);
...@@ -85,6 +91,9 @@ static int __scm_alloc_rq(void) ...@@ -85,6 +91,9 @@ static int __scm_alloc_rq(void)
spin_unlock_irq(&list_lock); spin_unlock_irq(&list_lock);
return 0; return 0;
free:
__scm_free_rq(scmrq);
return -ENOMEM;
} }
static int scm_alloc_rqs(unsigned int nrqs) static int scm_alloc_rqs(unsigned int nrqs)
...@@ -122,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq) ...@@ -122,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq)
u64 aidaw; u64 aidaw;
int i; int i;
for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) { for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
msb = &scmrq->aob->msb[i]; msb = &scmrq->aob->msb[i];
aidaw = msb->data_addr; aidaw = msb->data_addr;
...@@ -214,7 +223,8 @@ static inline void scm_request_init(struct scm_blk_dev *bdev, ...@@ -214,7 +223,8 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
struct aob_rq_header *aobrq = to_aobrq(scmrq); struct aob_rq_header *aobrq = to_aobrq(scmrq);
struct aob *aob = scmrq->aob; struct aob *aob = scmrq->aob;
memset(scmrq->request, 0, sizeof(scmrq->request)); memset(scmrq->request, 0,
nr_requests_per_io * sizeof(scmrq->request[0]));
memset(aob, 0, sizeof(*aob)); memset(aob, 0, sizeof(*aob));
aobrq->scmdev = bdev->scmdev; aobrq->scmdev = bdev->scmdev;
aob->request.cmd_code = ARQB_CMD_MOVE; aob->request.cmd_code = ARQB_CMD_MOVE;
...@@ -223,7 +233,7 @@ static inline void scm_request_init(struct scm_blk_dev *bdev, ...@@ -223,7 +233,7 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
scmrq->retries = 4; scmrq->retries = 4;
scmrq->error = 0; scmrq->error = 0;
/* We don't use all msbs - place aidaws at the end of the aob page. */ /* We don't use all msbs - place aidaws at the end of the aob page. */
scmrq->next_aidaw = (void *) &aob->msb[SCM_RQ_PER_IO]; scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
scm_request_cluster_init(scmrq); scm_request_cluster_init(scmrq);
} }
...@@ -242,7 +252,7 @@ void scm_request_requeue(struct scm_request *scmrq) ...@@ -242,7 +252,7 @@ void scm_request_requeue(struct scm_request *scmrq)
int i; int i;
scm_release_cluster(scmrq); scm_release_cluster(scmrq);
for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
blk_requeue_request(bdev->rq, scmrq->request[i]); blk_requeue_request(bdev->rq, scmrq->request[i]);
atomic_dec(&bdev->queued_reqs); atomic_dec(&bdev->queued_reqs);
...@@ -256,7 +266,7 @@ void scm_request_finish(struct scm_request *scmrq) ...@@ -256,7 +266,7 @@ void scm_request_finish(struct scm_request *scmrq)
int i; int i;
scm_release_cluster(scmrq); scm_release_cluster(scmrq);
for (i = 0; i < SCM_RQ_PER_IO && scmrq->request[i]; i++) for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
blk_end_request_all(scmrq->request[i], scmrq->error); blk_end_request_all(scmrq->request[i], scmrq->error);
atomic_dec(&bdev->queued_reqs); atomic_dec(&bdev->queued_reqs);
...@@ -342,7 +352,7 @@ static void scm_blk_request(struct request_queue *rq) ...@@ -342,7 +352,7 @@ static void scm_blk_request(struct request_queue *rq)
} }
blk_start_request(req); blk_start_request(req);
if (scmrq->aob->request.msb_count < SCM_RQ_PER_IO) if (scmrq->aob->request.msb_count < nr_requests_per_io)
continue; continue;
if (scm_request_start(scmrq)) if (scm_request_start(scmrq))
...@@ -551,11 +561,19 @@ void scm_blk_set_available(struct scm_blk_dev *bdev) ...@@ -551,11 +561,19 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)
spin_unlock_irqrestore(&bdev->lock, flags); spin_unlock_irqrestore(&bdev->lock, flags);
} }
static bool __init scm_blk_params_valid(void)
{
if (!nr_requests_per_io || nr_requests_per_io > 64)
return false;
return scm_cluster_size_valid();
}
static int __init scm_blk_init(void) static int __init scm_blk_init(void)
{ {
int ret = -EINVAL; int ret = -EINVAL;
if (!scm_cluster_size_valid()) if (!scm_blk_params_valid())
goto out; goto out;
ret = register_blkdev(0, "scm"); ret = register_blkdev(0, "scm");
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <asm/eadm.h> #include <asm/eadm.h>
#define SCM_NR_PARTS 8 #define SCM_NR_PARTS 8
#define SCM_RQ_PER_IO 8
#define SCM_QUEUE_DELAY 5 #define SCM_QUEUE_DELAY 5
struct scm_blk_dev { struct scm_blk_dev {
...@@ -32,7 +31,7 @@ struct scm_blk_dev { ...@@ -32,7 +31,7 @@ struct scm_blk_dev {
struct scm_request { struct scm_request {
struct scm_blk_dev *bdev; struct scm_blk_dev *bdev;
struct aidaw *next_aidaw; struct aidaw *next_aidaw;
struct request *request[SCM_RQ_PER_IO]; struct request **request;
struct aob *aob; struct aob *aob;
struct list_head list; struct list_head list;
u8 retries; u8 retries;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment