Commit 03d640ae authored by Linus Walleij's avatar Linus Walleij Committed by Ulf Hansson

mmc: block: delete packed command support

I've had it with this code now.

The packed command support is a complex hurdle in the MMC/SD block
layer, around 500+ lines of code which was introduced in 2013 in

commit ce39f9d1 ("mmc: support packed write command for eMMC4.5
devices")
commit abd9ac14 ("mmc: add packed command feature of eMMC4.5")

...and since then it has been rotting. The original author of the
code has disappeared from the community and the mail address is
bouncing.

For the code to be exercised the host must flag that it supports
packed commands, so in mmc_blk_prep_packed_list() which is called for
every single request, the following construction appears:

u8 max_packed_rw = 0;

if ((rq_data_dir(cur) == WRITE) &&
    mmc_host_packed_wr(card->host))
        max_packed_rw = card->ext_csd.max_packed_writes;

if (max_packed_rw == 0)
    goto no_packed;

This has the following logical deductions:

- Only WRITE commands can really be packed, so the solution is
  only half-done: we support packed WRITE but not packed READ.
  The packed command support has not been finalized by supporting
  reads in three years!

- mmc_host_packed_wr() is just a static inline that checks
  host->caps2 & MMC_CAP2_PACKED_WR. The problem with this is
  that NO upstream host sets this capability flag! No driver
  in the kernel is using it, and we can't test it. Packed
  command may be supported in out-of-tree code, but I doubt
  it. I doubt that the code is even working anymore due to
  other refactorings in the MMC block layer, who would
  notice if patches affecting it broke packed commands?
  No one.

- There is no Device Tree binding or code to mark a host as
  supporting packed read or write commands, just this flag
  in caps2, so for sure there are not any DT systems using
  it either.

It has other problems as well: mmc_blk_prep_packed_list() is
speculatively picking requests out of the request queue with
blk_fetch_request() making the MMC/SD stack harder to convert
to the multiqueue block layer. By this we get rid of an
obstacle.

The way I see it this is just cruft littering the MMC/SD
stack.

Cc: Namjae Jeon <namjae.jeon@samsung.com>
Cc: Maya Erez <qca_merez@qca.qualcomm.com>
Acked-by: default avatarJaehoon Chung <jh80.chung@samsung.com>
Signed-off-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
parent d3c6aac3
This diff is collapsed.
......@@ -406,41 +406,6 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
struct mmc_packed *packed,
struct scatterlist *sg,
enum mmc_packed_type cmd_type)
{
struct scatterlist *__sg = sg;
unsigned int sg_len = 0;
struct request *req;
if (mmc_packed_wr(cmd_type)) {
unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
unsigned int len, remain, offset = 0;
u8 *buf = (u8 *)packed->cmd_hdr;
remain = hdr_sz;
do {
len = min(remain, max_seg_sz);
sg_set_buf(__sg, buf + offset, len);
offset += len;
remain -= len;
sg_unmark_end(__sg++);
sg_len++;
} while (remain);
}
list_for_each_entry(req, &packed->list, queuelist) {
sg_len += blk_rq_map_sg(mq->queue, req, __sg);
__sg = sg + (sg_len - 1);
sg_unmark_end(__sg++);
}
sg_mark_end(sg + (sg_len - 1));
return sg_len;
}
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
......@@ -449,26 +414,14 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
enum mmc_packed_type cmd_type;
int i;
cmd_type = mqrq->cmd_type;
if (!mqrq->bounce_buf) {
if (mmc_packed_cmd(cmd_type))
return mmc_queue_packed_map_sg(mq, mqrq->packed,
mqrq->sg, cmd_type);
else
return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
}
if (!mqrq->bounce_buf)
return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
BUG_ON(!mqrq->bounce_sg);
if (mmc_packed_cmd(cmd_type))
sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
mqrq->bounce_sg, cmd_type);
else
sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
......
......@@ -22,23 +22,6 @@ struct mmc_blk_request {
int retune_retry_done;
};
enum mmc_packed_type {
MMC_PACKED_NONE = 0,
MMC_PACKED_WRITE,
};
#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
struct mmc_packed {
struct list_head list;
__le32 cmd_hdr[1024];
unsigned int blocks;
u8 nr_entries;
u8 retries;
s16 idx_failure;
};
struct mmc_queue_req {
struct request *req;
struct mmc_blk_request brq;
......@@ -47,8 +30,6 @@ struct mmc_queue_req {
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
struct mmc_async_req mmc_active;
enum mmc_packed_type cmd_type;
struct mmc_packed *packed;
};
struct mmc_queue {
......
......@@ -494,11 +494,6 @@ static inline int mmc_host_uhs(struct mmc_host *host)
MMC_CAP_UHS_DDR50);
}
static inline int mmc_host_packed_wr(struct mmc_host *host)
{
return host->caps2 & MMC_CAP2_PACKED_WR;
}
static inline int mmc_card_hs(struct mmc_card *card)
{
return card->host->ios.timing == MMC_TIMING_SD_HS ||
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment