Commit ce39f9d1 authored by Seungwon Jeon's avatar Seungwon Jeon Committed by Chris Ball

mmc: support packed write command for eMMC4.5 devices

This patch supports packed write command of eMMC4.5 devices.  Several
writes can be grouped in packed command and all data of the individual
commands can be sent in a single transfer on the bus. Large amounts of
data in one transfer rather than several data of small size are
effective for eMMC write internally.  As a result, packed command help
write throughput be improved.  The following tables show the results
of packed write.

Type A:
test     none |  packed
iozone   25.8 |  31
tiotest  27.6 |  31.2
lmdd     31.2 |  35.4

Type B:
test     none |  packed
iozone   44.1 |  51.1
tiotest  47.9 |  52.5
lmdd     51.6 |  59.2

Type C:
test     none |  packed
iozone   19.5 |  32
tiotest  19.9 |  34.5
lmdd     22.8 |  40.7
Signed-off-by: default avatarSeungwon Jeon <tgih.jun@samsung.com>
Reviewed-by: default avatarMaya Erez <merez@codeaurora.org>
Reviewed-by: default avatarNamjae Jeon <linkinjeon@gmail.com>
Signed-off-by: default avatarChris Ball <cjb@laptop.org>
parent abd9ac14
This diff is collapsed.
...@@ -362,6 +362,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq) ...@@ -362,6 +362,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
} }
EXPORT_SYMBOL(mmc_cleanup_queue); EXPORT_SYMBOL(mmc_cleanup_queue);
int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
{
struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
int ret = 0;
mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
if (!mqrq_cur->packed) {
pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
mmc_card_name(card));
ret = -ENOMEM;
goto out;
}
mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
if (!mqrq_prev->packed) {
pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
mmc_card_name(card));
kfree(mqrq_cur->packed);
mqrq_cur->packed = NULL;
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&mqrq_cur->packed->list);
INIT_LIST_HEAD(&mqrq_prev->packed->list);
out:
return ret;
}
void mmc_packed_clean(struct mmc_queue *mq)
{
struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
kfree(mqrq_cur->packed);
mqrq_cur->packed = NULL;
kfree(mqrq_prev->packed);
mqrq_prev->packed = NULL;
}
/** /**
* mmc_queue_suspend - suspend a MMC request queue * mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend * @mq: MMC queue to suspend
...@@ -406,6 +449,41 @@ void mmc_queue_resume(struct mmc_queue *mq) ...@@ -406,6 +449,41 @@ void mmc_queue_resume(struct mmc_queue *mq)
} }
} }
static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
struct mmc_packed *packed,
struct scatterlist *sg,
enum mmc_packed_type cmd_type)
{
struct scatterlist *__sg = sg;
unsigned int sg_len = 0;
struct request *req;
if (mmc_packed_wr(cmd_type)) {
unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
unsigned int len, remain, offset = 0;
u8 *buf = (u8 *)packed->cmd_hdr;
remain = hdr_sz;
do {
len = min(remain, max_seg_sz);
sg_set_buf(__sg, buf + offset, len);
offset += len;
remain -= len;
(__sg++)->page_link &= ~0x02;
sg_len++;
} while (remain);
}
list_for_each_entry(req, &packed->list, queuelist) {
sg_len += blk_rq_map_sg(mq->queue, req, __sg);
__sg = sg + (sg_len - 1);
(__sg++)->page_link &= ~0x02;
}
sg_mark_end(sg + (sg_len - 1));
return sg_len;
}
/* /*
* Prepare the sg list(s) to be handed of to the host driver * Prepare the sg list(s) to be handed of to the host driver
*/ */
...@@ -414,14 +492,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) ...@@ -414,14 +492,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
unsigned int sg_len; unsigned int sg_len;
size_t buflen; size_t buflen;
struct scatterlist *sg; struct scatterlist *sg;
enum mmc_packed_type cmd_type;
int i; int i;
if (!mqrq->bounce_buf) cmd_type = mqrq->cmd_type;
return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
if (!mqrq->bounce_buf) {
if (mmc_packed_cmd(cmd_type))
return mmc_queue_packed_map_sg(mq, mqrq->packed,
mqrq->sg, cmd_type);
else
return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
}
BUG_ON(!mqrq->bounce_sg); BUG_ON(!mqrq->bounce_sg);
sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); if (mmc_packed_cmd(cmd_type))
sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
mqrq->bounce_sg, cmd_type);
else
sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len; mqrq->bounce_sg_len = sg_len;
......
...@@ -12,6 +12,23 @@ struct mmc_blk_request { ...@@ -12,6 +12,23 @@ struct mmc_blk_request {
struct mmc_data data; struct mmc_data data;
}; };
enum mmc_packed_type {
MMC_PACKED_NONE = 0,
MMC_PACKED_WRITE,
};
#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
struct mmc_packed {
struct list_head list;
u32 cmd_hdr[1024];
unsigned int blocks;
u8 nr_entries;
u8 retries;
s16 idx_failure;
};
struct mmc_queue_req { struct mmc_queue_req {
struct request *req; struct request *req;
struct mmc_blk_request brq; struct mmc_blk_request brq;
...@@ -20,6 +37,8 @@ struct mmc_queue_req { ...@@ -20,6 +37,8 @@ struct mmc_queue_req {
struct scatterlist *bounce_sg; struct scatterlist *bounce_sg;
unsigned int bounce_sg_len; unsigned int bounce_sg_len;
struct mmc_async_req mmc_active; struct mmc_async_req mmc_active;
enum mmc_packed_type cmd_type;
struct mmc_packed *packed;
}; };
struct mmc_queue { struct mmc_queue {
...@@ -49,4 +68,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *, ...@@ -49,4 +68,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
extern void mmc_queue_bounce_pre(struct mmc_queue_req *); extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
extern void mmc_queue_bounce_post(struct mmc_queue_req *); extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
extern void mmc_packed_clean(struct mmc_queue *);
#endif #endif
...@@ -363,6 +363,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) ...@@ -363,6 +363,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
ext_csd, 512); ext_csd, 512);
} }
EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{ {
......
...@@ -310,6 +310,11 @@ static inline void mmc_part_add(struct mmc_card *card, unsigned int size, ...@@ -310,6 +310,11 @@ static inline void mmc_part_add(struct mmc_card *card, unsigned int size,
card->nr_parts++; card->nr_parts++;
} }
static inline bool mmc_large_sector(struct mmc_card *card)
{
return card->ext_csd.data_sector_size == 4096;
}
/* /*
* The world is not perfect and supplies us with broken mmc/sdio devices. * The world is not perfect and supplies us with broken mmc/sdio devices.
* For at least some of these bugs we need a work-around. * For at least some of these bugs we need a work-around.
......
...@@ -18,6 +18,9 @@ struct mmc_request; ...@@ -18,6 +18,9 @@ struct mmc_request;
struct mmc_command { struct mmc_command {
u32 opcode; u32 opcode;
u32 arg; u32 arg;
#define MMC_CMD23_ARG_REL_WR (1 << 31)
#define MMC_CMD23_ARG_PACKED ((0 << 31) | (1 << 30))
#define MMC_CMD23_ARG_TAG_REQ (1 << 29)
u32 resp[4]; u32 resp[4];
unsigned int flags; /* expected response type */ unsigned int flags; /* expected response type */
#define MMC_RSP_PRESENT (1 << 0) #define MMC_RSP_PRESENT (1 << 0)
...@@ -148,6 +151,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, ...@@ -148,6 +151,7 @@ extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
extern void mmc_start_bkops(struct mmc_card *card, bool from_exception); extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool); extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool);
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd);
#define MMC_ERASE_ARG 0x00000000 #define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000 #define MMC_SECURE_ERASE_ARG 0x80000000
......
...@@ -465,6 +465,11 @@ static inline int mmc_host_uhs(struct mmc_host *host) ...@@ -465,6 +465,11 @@ static inline int mmc_host_uhs(struct mmc_host *host)
MMC_CAP_UHS_DDR50); MMC_CAP_UHS_DDR50);
} }
static inline int mmc_host_packed_wr(struct mmc_host *host)
{
return host->caps2 & MMC_CAP2_PACKED_WR;
}
#ifdef CONFIG_MMC_CLKGATE #ifdef CONFIG_MMC_CLKGATE
void mmc_host_clk_hold(struct mmc_host *host); void mmc_host_clk_hold(struct mmc_host *host);
void mmc_host_clk_release(struct mmc_host *host); void mmc_host_clk_release(struct mmc_host *host);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment