Commit 54d49d77 authored by Per Forlin's avatar Per Forlin Committed by Chris Ball

mmc: block: add a block request prepare function

Break out code from mmc_blk_issue_rw_rq to create a block request prepare
function. This doesn't change any functionallity. This helps when handling
more than one active block request.
Signed-off-by: default avatarPer Forlin <per.forlin@linaro.org>
Acked-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
Reviewed-by: default avatarVenkatraman S <svenkatr@ti.com>
Tested-by: default avatarSourav Poddar <sourav.poddar@ti.com>
Tested-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarChris Ball <cjb@laptop.org>
parent 97868a2b
...@@ -812,12 +812,15 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, ...@@ -812,12 +812,15 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
R1_CC_ERROR | /* Card controller error */ \ R1_CC_ERROR | /* Card controller error */ \
R1_ERROR) /* General/unknown error */ R1_ERROR) /* General/unknown error */
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_card *card,
int disable_multi,
struct mmc_queue *mq)
{ {
u32 readcmd, writecmd;
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mqrq->req;
struct mmc_blk_data *md = mq->data; struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
int ret = 1, disable_multi = 0, retry = 0;
/* /*
* Reliable writes are used to implement Forced Unit Access and * Reliable writes are used to implement Forced Unit Access and
...@@ -828,119 +831,126 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) ...@@ -828,119 +831,126 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
(rq_data_dir(req) == WRITE) && (rq_data_dir(req) == WRITE) &&
(md->flags & MMC_BLK_REL_WR); (md->flags & MMC_BLK_REL_WR);
do { memset(brq, 0, sizeof(struct mmc_blk_request));
u32 readcmd, writecmd; brq->mrq.cmd = &brq->cmd;
brq->mrq.data = &brq->data;
memset(brq, 0, sizeof(struct mmc_blk_request));
brq->mrq.cmd = &brq->cmd;
brq->mrq.data = &brq->data;
brq->cmd.arg = blk_rq_pos(req);
if (!mmc_card_blockaddr(card))
brq->cmd.arg <<= 9;
brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
brq->data.blksz = 512;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
brq->data.blocks = blk_rq_sectors(req);
/*
* The block layer doesn't support all sector count
* restrictions, so we need to be prepared for too big
* requests.
*/
if (brq->data.blocks > card->host->max_blk_count)
brq->data.blocks = card->host->max_blk_count;
/* brq->cmd.arg = blk_rq_pos(req);
* After a read error, we redo the request one sector at a time if (!mmc_card_blockaddr(card))
* in order to accurately determine which sectors can be read brq->cmd.arg <<= 9;
* successfully. brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
*/ brq->data.blksz = 512;
if (disable_multi && brq->data.blocks > 1) brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->data.blocks = 1; brq->stop.arg = 0;
brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
brq->data.blocks = blk_rq_sectors(req);
if (brq->data.blocks > 1 || do_rel_wr) { /*
/* SPI multiblock writes terminate using a special * The block layer doesn't support all sector count
* token, not a STOP_TRANSMISSION request. * restrictions, so we need to be prepared for too big
*/ * requests.
if (!mmc_host_is_spi(card->host) || */
rq_data_dir(req) == READ) if (brq->data.blocks > card->host->max_blk_count)
brq->mrq.stop = &brq->stop; brq->data.blocks = card->host->max_blk_count;
readcmd = MMC_READ_MULTIPLE_BLOCK;
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
} else {
brq->mrq.stop = NULL;
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
if (rq_data_dir(req) == READ) {
brq->cmd.opcode = readcmd;
brq->data.flags |= MMC_DATA_READ;
} else {
brq->cmd.opcode = writecmd;
brq->data.flags |= MMC_DATA_WRITE;
}
if (do_rel_wr) /*
mmc_apply_rel_rw(brq, card, req); * After a read error, we redo the request one sector at a time
* in order to accurately determine which sectors can be read
* successfully.
*/
if (disable_multi && brq->data.blocks > 1)
brq->data.blocks = 1;
/* if (brq->data.blocks > 1 || do_rel_wr) {
* Pre-defined multi-block transfers are preferable to /* SPI multiblock writes terminate using a special
* open ended-ones (and necessary for reliable writes). * token, not a STOP_TRANSMISSION request.
* However, it is not sufficient to just send CMD23,
* and avoid the final CMD12, as on an error condition
* CMD12 (stop) needs to be sent anyway. This, coupled
* with Auto-CMD23 enhancements provided by some
* hosts, means that the complexity of dealing
* with this is best left to the host. If CMD23 is
* supported by card and host, we'll fill sbc in and let
* the host deal with handling it correctly. This means
* that for hosts that don't expose MMC_CAP_CMD23, no
* change of behavior will be observed.
*
* N.B: Some MMC cards experience perf degradation.
* We'll avoid using CMD23-bounded multiblock writes for
* these, while retaining features like reliable writes.
*/ */
if (!mmc_host_is_spi(card->host) ||
rq_data_dir(req) == READ)
brq->mrq.stop = &brq->stop;
readcmd = MMC_READ_MULTIPLE_BLOCK;
writecmd = MMC_WRITE_MULTIPLE_BLOCK;
} else {
brq->mrq.stop = NULL;
readcmd = MMC_READ_SINGLE_BLOCK;
writecmd = MMC_WRITE_BLOCK;
}
if (rq_data_dir(req) == READ) {
brq->cmd.opcode = readcmd;
brq->data.flags |= MMC_DATA_READ;
} else {
brq->cmd.opcode = writecmd;
brq->data.flags |= MMC_DATA_WRITE;
}
if ((md->flags & MMC_BLK_CMD23) && if (do_rel_wr)
mmc_op_multi(brq->cmd.opcode) && mmc_apply_rel_rw(brq, card, req);
(do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
brq->sbc.arg = brq->data.blocks |
(do_rel_wr ? (1 << 31) : 0);
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
mmc_set_data_timeout(&brq->data, card); /*
* Pre-defined multi-block transfers are preferable to
* open ended-ones (and necessary for reliable writes).
* However, it is not sufficient to just send CMD23,
* and avoid the final CMD12, as on an error condition
* CMD12 (stop) needs to be sent anyway. This, coupled
* with Auto-CMD23 enhancements provided by some
* hosts, means that the complexity of dealing
* with this is best left to the host. If CMD23 is
* supported by card and host, we'll fill sbc in and let
* the host deal with handling it correctly. This means
* that for hosts that don't expose MMC_CAP_CMD23, no
* change of behavior will be observed.
*
* N.B: Some MMC cards experience perf degradation.
* We'll avoid using CMD23-bounded multiblock writes for
* these, while retaining features like reliable writes.
*/
brq->data.sg = mq->mqrq_cur->sg; if ((md->flags & MMC_BLK_CMD23) &&
brq->data.sg_len = mmc_queue_map_sg(mq, mq->mqrq_cur); mmc_op_multi(brq->cmd.opcode) &&
(do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
brq->sbc.arg = brq->data.blocks |
(do_rel_wr ? (1 << 31) : 0);
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
/* mmc_set_data_timeout(&brq->data, card);
* Adjust the sg list so it is the same size as the
* request. brq->data.sg = mqrq->sg;
*/ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
if (brq->data.blocks != blk_rq_sectors(req)) {
int i, data_size = brq->data.blocks << 9; /*
struct scatterlist *sg; * Adjust the sg list so it is the same size as the
* request.
for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { */
data_size -= sg->length; if (brq->data.blocks != blk_rq_sectors(req)) {
if (data_size <= 0) { int i, data_size = brq->data.blocks << 9;
sg->length += data_size; struct scatterlist *sg;
i++;
break; for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
} data_size -= sg->length;
if (data_size <= 0) {
sg->length += data_size;
i++;
break;
} }
brq->data.sg_len = i;
} }
brq->data.sg_len = i;
}
mmc_queue_bounce_pre(mq->mqrq_cur); mmc_queue_bounce_pre(mqrq);
}
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
int ret = 1, disable_multi = 0, retry = 0;
do {
mmc_blk_rw_rq_prep(mq->mqrq_cur, card, disable_multi, mq);
mmc_wait_for_req(card->host, &brq->mrq); mmc_wait_for_req(card->host, &brq->mrq);
mmc_queue_bounce_post(mq->mqrq_cur); mmc_queue_bounce_post(mq->mqrq_cur);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment