Commit de88d0d2 authored by Sebastian Ott's avatar Sebastian Ott Committed by Martin Schwidefsky

s390/scm_block: allocate aidaw pages only when necessary

AOBs (the structure describing the HW request) need to be 4K
aligned but very little of that page is actually used. With
this patch we place aidaws at the end of the AOB page and only
allocate a separate page for aidaws when we have to (lists of
aidaws must not cross page boundaries).
Signed-off-by: default avatarSebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 9d4df77f
...@@ -121,7 +121,8 @@ static void scm_request_done(struct scm_request *scmrq) ...@@ -121,7 +121,8 @@ static void scm_request_done(struct scm_request *scmrq)
u64 aidaw = msb->data_addr; u64 aidaw = msb->data_addr;
unsigned long flags; unsigned long flags;
if ((msb->flags & MSB_FLAG_IDA) && aidaw) if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
mempool_free(virt_to_page(aidaw), aidaw_pool); mempool_free(virt_to_page(aidaw), aidaw_pool);
spin_lock_irqsave(&list_lock, flags); spin_lock_irqsave(&list_lock, flags);
...@@ -134,26 +135,47 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) ...@@ -134,26 +135,47 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
} }
struct aidaw *scm_aidaw_alloc(void) static inline struct aidaw *scm_aidaw_alloc(void)
{ {
struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC); struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
return page ? page_address(page) : NULL; return page ? page_address(page) : NULL;
} }
static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
{
unsigned long _aidaw = (unsigned long) aidaw;
unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
}
struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
{
struct aidaw *aidaw;
if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
return scmrq->next_aidaw;
aidaw = scm_aidaw_alloc();
if (aidaw)
memset(aidaw, 0, PAGE_SIZE);
return aidaw;
}
static int scm_request_prepare(struct scm_request *scmrq) static int scm_request_prepare(struct scm_request *scmrq)
{ {
struct scm_blk_dev *bdev = scmrq->bdev; struct scm_blk_dev *bdev = scmrq->bdev;
struct scm_device *scmdev = bdev->gendisk->private_data; struct scm_device *scmdev = bdev->gendisk->private_data;
struct aidaw *aidaw = scm_aidaw_alloc();
struct msb *msb = &scmrq->aob->msb[0]; struct msb *msb = &scmrq->aob->msb[0];
struct req_iterator iter; struct req_iterator iter;
struct aidaw *aidaw;
struct bio_vec bv; struct bio_vec bv;
aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(scmrq->request));
if (!aidaw) if (!aidaw)
return -ENOMEM; return -ENOMEM;
memset(aidaw, 0, PAGE_SIZE);
msb->bs = MSB_BS_4K; msb->bs = MSB_BS_4K;
scmrq->aob->request.msb_count = 1; scmrq->aob->request.msb_count = 1;
msb->scm_addr = scmdev->address + msb->scm_addr = scmdev->address +
...@@ -188,6 +210,8 @@ static inline void scm_request_init(struct scm_blk_dev *bdev, ...@@ -188,6 +210,8 @@ static inline void scm_request_init(struct scm_blk_dev *bdev,
scmrq->bdev = bdev; scmrq->bdev = bdev;
scmrq->retries = 4; scmrq->retries = 4;
scmrq->error = 0; scmrq->error = 0;
/* We don't use all msbs - place aidaws at the end of the aob page. */
scmrq->next_aidaw = (void *) &aob->msb[1];
scm_request_cluster_init(scmrq); scm_request_cluster_init(scmrq);
} }
......
...@@ -30,6 +30,7 @@ struct scm_blk_dev { ...@@ -30,6 +30,7 @@ struct scm_blk_dev {
struct scm_request { struct scm_request {
struct scm_blk_dev *bdev; struct scm_blk_dev *bdev;
struct aidaw *next_aidaw;
struct request *request; struct request *request;
struct aob *aob; struct aob *aob;
struct list_head list; struct list_head list;
...@@ -54,7 +55,7 @@ void scm_blk_irq(struct scm_device *, void *, int); ...@@ -54,7 +55,7 @@ void scm_blk_irq(struct scm_device *, void *, int);
void scm_request_finish(struct scm_request *); void scm_request_finish(struct scm_request *);
void scm_request_requeue(struct scm_request *); void scm_request_requeue(struct scm_request *);
struct aidaw *scm_aidaw_alloc(void); struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
int scm_drv_init(void); int scm_drv_init(void);
void scm_drv_cleanup(void); void scm_drv_cleanup(void);
......
...@@ -131,16 +131,9 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq) ...@@ -131,16 +131,9 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq)
scmrq->cluster.state = CLUSTER_READ; scmrq->cluster.state = CLUSTER_READ;
/* fall through */ /* fall through */
case CLUSTER_READ: case CLUSTER_READ:
aidaw = scm_aidaw_alloc();
if (!aidaw)
return -ENOMEM;
memset(aidaw, 0, PAGE_SIZE);
scmrq->aob->request.msb_count = 1;
msb->bs = MSB_BS_4K; msb->bs = MSB_BS_4K;
msb->oc = MSB_OC_READ; msb->oc = MSB_OC_READ;
msb->flags = MSB_FLAG_IDA; msb->flags = MSB_FLAG_IDA;
msb->data_addr = (u64) aidaw;
msb->blk_count = write_cluster_size; msb->blk_count = write_cluster_size;
addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
...@@ -151,6 +144,12 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq) ...@@ -151,6 +144,12 @@ static int scm_prepare_cluster_request(struct scm_request *scmrq)
CLUSTER_SIZE)) CLUSTER_SIZE))
msb->blk_count = 2 * write_cluster_size; msb->blk_count = 2 * write_cluster_size;
aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
if (!aidaw)
return -ENOMEM;
scmrq->aob->request.msb_count = 1;
msb->data_addr = (u64) aidaw;
for (i = 0; i < msb->blk_count; i++) { for (i = 0; i < msb->blk_count; i++) {
aidaw->data_addr = (u64) scmrq->cluster.buf[i]; aidaw->data_addr = (u64) scmrq->cluster.buf[i];
aidaw++; aidaw++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment