Commit 7b410d07 authored by Adrian Hunter's avatar Adrian Hunter Committed by Ulf Hansson

mmc: queue: Share mmc request array between partitions

eMMC can have multiple internal partitions that are represented as separate
disks / queues. However switching between partitions is only done when the
queue is empty. Consequently the array of mmc requests that are queued can
be shared between partitions saving memory.

Keep a pointer to the mmc request queue on the card, and use that instead
of allocating a new one for each partition.
Signed-off-by: default avatarAdrian Hunter <adrian.hunter@intel.com>
Reviewed-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
parent cdf8a6fb
...@@ -2123,6 +2123,7 @@ static int mmc_blk_probe(struct mmc_card *card) ...@@ -2123,6 +2123,7 @@ static int mmc_blk_probe(struct mmc_card *card)
{ {
struct mmc_blk_data *md, *part_md; struct mmc_blk_data *md, *part_md;
char cap_str[10]; char cap_str[10];
int ret;
/* /*
* Check that the card supports the command class(es) we need. * Check that the card supports the command class(es) we need.
...@@ -2132,9 +2133,15 @@ static int mmc_blk_probe(struct mmc_card *card) ...@@ -2132,9 +2133,15 @@ static int mmc_blk_probe(struct mmc_card *card)
mmc_fixup_device(card, mmc_blk_fixups); mmc_fixup_device(card, mmc_blk_fixups);
ret = mmc_queue_alloc_shared_queue(card);
if (ret)
return ret;
md = mmc_blk_alloc(card); md = mmc_blk_alloc(card);
if (IS_ERR(md)) if (IS_ERR(md)) {
mmc_queue_free_shared_queue(card);
return PTR_ERR(md); return PTR_ERR(md);
}
string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2,
cap_str, sizeof(cap_str)); cap_str, sizeof(cap_str));
...@@ -2172,6 +2179,7 @@ static int mmc_blk_probe(struct mmc_card *card) ...@@ -2172,6 +2179,7 @@ static int mmc_blk_probe(struct mmc_card *card)
out: out:
mmc_blk_remove_parts(card, md); mmc_blk_remove_parts(card, md);
mmc_blk_remove_req(md); mmc_blk_remove_req(md);
mmc_queue_free_shared_queue(card);
return 0; return 0;
} }
...@@ -2189,6 +2197,7 @@ static void mmc_blk_remove(struct mmc_card *card) ...@@ -2189,6 +2197,7 @@ static void mmc_blk_remove(struct mmc_card *card)
pm_runtime_put_noidle(&card->dev); pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md); mmc_blk_remove_req(md);
dev_set_drvdata(&card->dev, NULL); dev_set_drvdata(&card->dev, NULL);
mmc_queue_free_shared_queue(card);
} }
static int _mmc_blk_suspend(struct mmc_card *card) static int _mmc_blk_suspend(struct mmc_card *card)
......
...@@ -149,17 +149,13 @@ static void mmc_request_fn(struct request_queue *q) ...@@ -149,17 +149,13 @@ static void mmc_request_fn(struct request_queue *q)
wake_up_process(mq->thread); wake_up_process(mq->thread);
} }
static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) static struct scatterlist *mmc_alloc_sg(int sg_len)
{ {
struct scatterlist *sg; struct scatterlist *sg;
sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL); sg = kmalloc_array(sg_len, sizeof(*sg), GFP_KERNEL);
if (!sg) if (sg)
*err = -ENOMEM;
else {
*err = 0;
sg_init_table(sg, sg_len); sg_init_table(sg, sg_len);
}
return sg; return sg;
} }
...@@ -185,6 +181,32 @@ static void mmc_queue_setup_discard(struct request_queue *q, ...@@ -185,6 +181,32 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
} }
static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq)
{
kfree(mqrq->bounce_sg);
mqrq->bounce_sg = NULL;
kfree(mqrq->sg);
mqrq->sg = NULL;
kfree(mqrq->bounce_buf);
mqrq->bounce_buf = NULL;
}
static void mmc_queue_reqs_free_bufs(struct mmc_queue_req *mqrq, int qdepth)
{
int i;
for (i = 0; i < qdepth; i++)
mmc_queue_req_free_bufs(&mqrq[i]);
}
static void mmc_queue_free_mqrqs(struct mmc_queue_req *mqrq, int qdepth)
{
mmc_queue_reqs_free_bufs(mqrq, qdepth);
kfree(mqrq);
}
static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth) static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
{ {
struct mmc_queue_req *mqrq; struct mmc_queue_req *mqrq;
...@@ -200,79 +222,137 @@ static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth) ...@@ -200,79 +222,137 @@ static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
} }
#ifdef CONFIG_MMC_BLOCK_BOUNCE #ifdef CONFIG_MMC_BLOCK_BOUNCE
static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, static int mmc_queue_alloc_bounce_bufs(struct mmc_queue_req *mqrq, int qdepth,
unsigned int bouncesz) unsigned int bouncesz)
{ {
int i; int i;
for (i = 0; i < mq->qdepth; i++) { for (i = 0; i < qdepth; i++) {
mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL); mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mq->mqrq[i].bounce_buf) if (!mqrq[i].bounce_buf)
goto out_err; return -ENOMEM;
}
return true; mqrq[i].sg = mmc_alloc_sg(1);
if (!mqrq[i].sg)
return -ENOMEM;
out_err: mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512);
while (--i >= 0) { if (!mqrq[i].bounce_sg)
kfree(mq->mqrq[i].bounce_buf); return -ENOMEM;
mq->mqrq[i].bounce_buf = NULL;
} }
pr_warn("%s: unable to allocate bounce buffers\n",
mmc_card_name(mq->card)); return 0;
return false;
} }
static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, static bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq, int qdepth,
unsigned int bouncesz) unsigned int bouncesz)
{ {
int i, ret; int ret;
for (i = 0; i < mq->qdepth; i++) { ret = mmc_queue_alloc_bounce_bufs(mqrq, qdepth, bouncesz);
mq->mqrq[i].sg = mmc_alloc_sg(1, &ret); if (ret)
if (ret) mmc_queue_reqs_free_bufs(mqrq, qdepth);
return ret;
mq->mqrq[i].bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); return !ret;
if (ret) }
return ret;
} static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
{
unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
if (host->max_segs != 1)
return 0;
if (bouncesz > host->max_req_size)
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
if (bouncesz > host->max_blk_count * 512)
bouncesz = host->max_blk_count * 512;
if (bouncesz <= 512)
return 0;
return bouncesz;
}
#else
static inline bool mmc_queue_alloc_bounce(struct mmc_queue_req *mqrq,
int qdepth, unsigned int bouncesz)
{
return false;
}
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
{
return 0; return 0;
} }
#endif #endif
static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs) static int mmc_queue_alloc_sgs(struct mmc_queue_req *mqrq, int qdepth,
int max_segs)
{ {
int i, ret; int i;
for (i = 0; i < mq->qdepth; i++) { for (i = 0; i < qdepth; i++) {
mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret); mqrq[i].sg = mmc_alloc_sg(max_segs);
if (ret) if (!mqrq[i].sg)
return ret; return -ENOMEM;
} }
return 0; return 0;
} }
static void mmc_queue_req_free_bufs(struct mmc_queue_req *mqrq) void mmc_queue_free_shared_queue(struct mmc_card *card)
{ {
kfree(mqrq->bounce_sg); if (card->mqrq) {
mqrq->bounce_sg = NULL; mmc_queue_free_mqrqs(card->mqrq, card->qdepth);
card->mqrq = NULL;
}
}
kfree(mqrq->sg); static int __mmc_queue_alloc_shared_queue(struct mmc_card *card, int qdepth)
mqrq->sg = NULL; {
struct mmc_host *host = card->host;
struct mmc_queue_req *mqrq;
unsigned int bouncesz;
int ret = 0;
kfree(mqrq->bounce_buf); if (card->mqrq)
mqrq->bounce_buf = NULL; return -EINVAL;
mqrq = mmc_queue_alloc_mqrqs(qdepth);
if (!mqrq)
return -ENOMEM;
card->mqrq = mqrq;
card->qdepth = qdepth;
bouncesz = mmc_queue_calc_bouncesz(host);
if (bouncesz && !mmc_queue_alloc_bounce(mqrq, qdepth, bouncesz)) {
bouncesz = 0;
pr_warn("%s: unable to allocate bounce buffers\n",
mmc_card_name(card));
}
card->bouncesz = bouncesz;
if (!bouncesz) {
ret = mmc_queue_alloc_sgs(mqrq, qdepth, host->max_segs);
if (ret)
goto out_err;
}
return ret;
out_err:
mmc_queue_free_shared_queue(card);
return ret;
} }
static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq) int mmc_queue_alloc_shared_queue(struct mmc_card *card)
{ {
int i; return __mmc_queue_alloc_shared_queue(card, 2);
for (i = 0; i < mq->qdepth; i++)
mmc_queue_req_free_bufs(&mq->mqrq[i]);
} }
/** /**
...@@ -289,7 +369,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -289,7 +369,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
{ {
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH; u64 limit = BLK_BOUNCE_HIGH;
bool bounce = false;
int ret = -ENOMEM; int ret = -ENOMEM;
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
...@@ -300,10 +379,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -300,10 +379,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (!mq->queue) if (!mq->queue)
return -ENOMEM; return -ENOMEM;
mq->qdepth = 2; mq->mqrq = card->mqrq;
mq->mqrq = mmc_queue_alloc_mqrqs(mq->qdepth); mq->qdepth = card->qdepth;
if (!mq->mqrq)
goto blk_cleanup;
mq->queue->queuedata = mq; mq->queue->queuedata = mq;
blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_prep_rq(mq->queue, mmc_prep_request);
...@@ -312,44 +389,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -312,44 +389,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_can_erase(card)) if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card); mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE if (card->bouncesz) {
if (host->max_segs == 1) { blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
unsigned int bouncesz; blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
blk_queue_max_segments(mq->queue, card->bouncesz / 512);
bouncesz = MMC_QUEUE_BOUNCESZ; blk_queue_max_segment_size(mq->queue, card->bouncesz);
} else {
if (bouncesz > host->max_req_size)
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
if (bouncesz > (host->max_blk_count * 512))
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512 &&
mmc_queue_alloc_bounce_bufs(mq, bouncesz)) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
if (ret)
goto cleanup_queue;
bounce = true;
}
}
#endif
if (!bounce) {
blk_queue_bounce_limit(mq->queue, limit); blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue, blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512)); min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size); blk_queue_max_segment_size(mq->queue, host->max_seg_size);
ret = mmc_queue_alloc_sgs(mq, host->max_segs);
if (ret)
goto cleanup_queue;
} }
sema_init(&mq->thread_sem, 1); sema_init(&mq->thread_sem, 1);
...@@ -364,11 +414,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -364,11 +414,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
return 0; return 0;
cleanup_queue: cleanup_queue:
mmc_queue_reqs_free_bufs(mq);
kfree(mq->mqrq);
mq->mqrq = NULL; mq->mqrq = NULL;
blk_cleanup:
blk_cleanup_queue(mq->queue); blk_cleanup_queue(mq->queue);
return ret; return ret;
} }
...@@ -390,10 +437,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq) ...@@ -390,10 +437,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q); blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
mmc_queue_reqs_free_bufs(mq);
kfree(mq->mqrq);
mq->mqrq = NULL; mq->mqrq = NULL;
mq->card = NULL; mq->card = NULL;
} }
EXPORT_SYMBOL(mmc_cleanup_queue); EXPORT_SYMBOL(mmc_cleanup_queue);
......
...@@ -51,6 +51,8 @@ struct mmc_queue { ...@@ -51,6 +51,8 @@ struct mmc_queue {
unsigned long qslots; unsigned long qslots;
}; };
extern int mmc_queue_alloc_shared_queue(struct mmc_card *card);
extern void mmc_queue_free_shared_queue(struct mmc_card *card);
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
const char *); const char *);
extern void mmc_cleanup_queue(struct mmc_queue *); extern void mmc_cleanup_queue(struct mmc_queue *);
......
...@@ -208,6 +208,7 @@ struct sdio_cis { ...@@ -208,6 +208,7 @@ struct sdio_cis {
struct mmc_host; struct mmc_host;
struct sdio_func; struct sdio_func;
struct sdio_func_tuple; struct sdio_func_tuple;
struct mmc_queue_req;
#define SDIO_MAX_FUNCS 7 #define SDIO_MAX_FUNCS 7
...@@ -300,6 +301,10 @@ struct mmc_card { ...@@ -300,6 +301,10 @@ struct mmc_card {
struct dentry *debugfs_root; struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */ struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts; unsigned int nr_parts;
struct mmc_queue_req *mqrq; /* Shared queue structure */
unsigned int bouncesz; /* Bounce buffer size */
int qdepth; /* Shared queue depth */
}; };
static inline bool mmc_large_sector(struct mmc_card *card) static inline bool mmc_large_sector(struct mmc_card *card)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment