Commit 031b8140 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mmc-v4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc

Pull MMC fixes from Ulf Hansson:
 "MMC core:

   - Fix driver strength selection when selecting hs400es

   - Delete bounce buffer handling:

     This change fixes a problem related to how bounce buffers are being
     allocated. However, instead of trying to fix that, let's just
     remove the mmc bounce buffer code altogether, as it has practically
     no use.

  MMC host:

   - meson-gx: A couple of fixes related to clock/phase/tuning

   - sdhci-xenon: Fix clock resource by adding an optional bus clock"

* tag 'mmc-v4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc:
  mmc: sdhci-xenon: Fix clock resource by adding an optional bus clock
  mmc: meson-gx: include tx phase in the tuning process
  mmc: meson-gx: fix rx phase reset
  mmc: meson-gx: make sure the clock is rounded down
  mmc: Delete bounce buffer handling
  mmc: core: add driver strength selection when selecting hs400es
parents 1c86f2e4 bb16ea17
...@@ -16,11 +16,13 @@ Required Properties: ...@@ -16,11 +16,13 @@ Required Properties:
- clocks: - clocks:
Array of clocks required for SDHC. Array of clocks required for SDHC.
Require at least input clock for Xenon IP core. Require at least input clock for Xenon IP core. For Armada AP806 and
CP110, the AXI clock is also mandatory.
- clock-names: - clock-names:
Array of names corresponding to clocks property. Array of names corresponding to clocks property.
The input clock for Xenon IP core should be named as "core". The input clock for Xenon IP core should be named as "core".
The input clock for the AXI bus must be named as "axi".
- reg: - reg:
* For "marvell,armada-3700-sdhci", two register areas. * For "marvell,armada-3700-sdhci", two register areas.
...@@ -106,8 +108,8 @@ Example: ...@@ -106,8 +108,8 @@ Example:
compatible = "marvell,armada-ap806-sdhci"; compatible = "marvell,armada-ap806-sdhci";
reg = <0xaa0000 0x1000>; reg = <0xaa0000 0x1000>;
interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH> interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>
clocks = <&emmc_clk>; clocks = <&emmc_clk>,<&axi_clk>;
clock-names = "core"; clock-names = "core", "axi";
bus-width = <4>; bus-width = <4>;
marvell,xenon-phy-slow-mode; marvell,xenon-phy-slow-mode;
marvell,xenon-tun-count = <11>; marvell,xenon-tun-count = <11>;
...@@ -126,8 +128,8 @@ Example: ...@@ -126,8 +128,8 @@ Example:
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH> interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>
vqmmc-supply = <&sd_vqmmc_regulator>; vqmmc-supply = <&sd_vqmmc_regulator>;
vmmc-supply = <&sd_vmmc_regulator>; vmmc-supply = <&sd_vmmc_regulator>;
clocks = <&sdclk>; clocks = <&sdclk>, <&axi_clk>;
clock-names = "core"; clock-names = "core", "axi";
bus-width = <4>; bus-width = <4>;
marvell,xenon-tun-count = <9>; marvell,xenon-tun-count = <9>;
}; };
......
...@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, ...@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
} }
mqrq->areq.mrq = &brq->mrq; mqrq->areq.mrq = &brq->mrq;
mmc_queue_bounce_pre(mqrq);
} }
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
...@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) ...@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
brq = &mq_rq->brq; brq = &mq_rq->brq;
old_req = mmc_queue_req_to_req(mq_rq); old_req = mmc_queue_req_to_req(mq_rq);
type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
switch (status) { switch (status) {
case MMC_BLK_SUCCESS: case MMC_BLK_SUCCESS:
......
...@@ -1286,6 +1286,23 @@ int mmc_hs400_to_hs200(struct mmc_card *card) ...@@ -1286,6 +1286,23 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
return err; return err;
} }
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
mmc_driver_type_mask(0);
drive_strength = mmc_select_drive_strength(card,
card->ext_csd.hs200_max_dtr,
card_drv_type, &drv_type);
card->drive_strength = drive_strength;
if (drv_type)
mmc_set_driver_type(card->host, drv_type);
}
static int mmc_select_hs400es(struct mmc_card *card) static int mmc_select_hs400es(struct mmc_card *card)
{ {
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
...@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card) ...@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err; goto out_err;
} }
mmc_select_driver_type(card);
/* Switch card to HS400 */ /* Switch card to HS400 */
val = EXT_CSD_TIMING_HS400 | val = EXT_CSD_TIMING_HS400 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT; card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
...@@ -1374,23 +1393,6 @@ static int mmc_select_hs400es(struct mmc_card *card) ...@@ -1374,23 +1393,6 @@ static int mmc_select_hs400es(struct mmc_card *card)
return err; return err;
} }
static void mmc_select_driver_type(struct mmc_card *card)
{
int card_drv_type, drive_strength, drv_type;
card_drv_type = card->ext_csd.raw_driver_strength |
mmc_driver_type_mask(0);
drive_strength = mmc_select_drive_strength(card,
card->ext_csd.hs200_max_dtr,
card_drv_type, &drv_type);
card->drive_strength = drive_strength;
if (drv_type)
mmc_set_driver_type(card->host, drv_type);
}
/* /*
* For device supporting HS200 mode, the following sequence * For device supporting HS200 mode, the following sequence
* should be done before executing the tuning process. * should be done before executing the tuning process.
......
...@@ -23,8 +23,6 @@ ...@@ -23,8 +23,6 @@
#include "core.h" #include "core.h"
#include "card.h" #include "card.h"
#define MMC_QUEUE_BOUNCESZ 65536
/* /*
* Prepare a MMC request. This just filters out odd stuff. * Prepare a MMC request. This just filters out odd stuff.
*/ */
...@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q, ...@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
} }
static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
{
unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
return 0;
if (bouncesz > host->max_req_size)
bouncesz = host->max_req_size;
if (bouncesz > host->max_seg_size)
bouncesz = host->max_seg_size;
if (bouncesz > host->max_blk_count * 512)
bouncesz = host->max_blk_count * 512;
if (bouncesz <= 512)
return 0;
return bouncesz;
}
/** /**
* mmc_init_request() - initialize the MMC-specific per-request data * mmc_init_request() - initialize the MMC-specific per-request data
* @q: the request queue * @q: the request queue
...@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req, ...@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
struct mmc_card *card = mq->card; struct mmc_card *card = mq->card;
struct mmc_host *host = card->host; struct mmc_host *host = card->host;
if (card->bouncesz) { mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); if (!mq_rq->sg)
if (!mq_rq->bounce_buf) return -ENOMEM;
return -ENOMEM;
if (card->bouncesz > 512) {
mq_rq->sg = mmc_alloc_sg(1, gfp);
if (!mq_rq->sg)
return -ENOMEM;
mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
gfp);
if (!mq_rq->bounce_sg)
return -ENOMEM;
}
} else {
mq_rq->bounce_buf = NULL;
mq_rq->bounce_sg = NULL;
mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
if (!mq_rq->sg)
return -ENOMEM;
}
return 0; return 0;
} }
...@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) ...@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
{ {
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
/* It is OK to kfree(NULL) so this will be smooth */
kfree(mq_rq->bounce_sg);
mq_rq->bounce_sg = NULL;
kfree(mq_rq->bounce_buf);
mq_rq->bounce_buf = NULL;
kfree(mq_rq->sg); kfree(mq_rq->sg);
mq_rq->sg = NULL; mq_rq->sg = NULL;
} }
...@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
/*
* mmc_init_request() depends on card->bouncesz so it must be calculated
* before blk_init_allocated_queue() starts allocating requests.
*/
card->bouncesz = mmc_queue_calc_bouncesz(host);
mq->card = card; mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL); mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue) if (!mq->queue)
...@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, ...@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_can_erase(card)) if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card); mmc_queue_setup_discard(mq->queue, card);
if (card->bouncesz) { blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); blk_queue_max_hw_sectors(mq->queue,
blk_queue_max_segments(mq->queue, card->bouncesz / 512); min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segment_size(mq->queue, card->bouncesz); blk_queue_max_segments(mq->queue, host->max_segs);
} else { blk_queue_max_segment_size(mq->queue, host->max_seg_size);
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
}
sema_init(&mq->thread_sem, 1); sema_init(&mq->thread_sem, 1);
...@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq) ...@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
*/ */
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{ {
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
struct request *req = mmc_queue_req_to_req(mqrq); struct request *req = mmc_queue_req_to_req(mqrq);
int i;
if (!mqrq->bounce_buf)
return blk_rq_map_sg(mq->queue, req, mqrq->sg);
sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
mqrq->bounce_sg_len = sg_len;
buflen = 0;
for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
return 1;
}
/*
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
{
if (!mqrq->bounce_buf)
return;
if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
return;
sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
mqrq->bounce_buf, mqrq->sg[0].length);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
if (!mqrq->bounce_buf)
return;
if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
return;
sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, return blk_rq_map_sg(mq->queue, req, mqrq->sg);
mqrq->bounce_buf, mqrq->sg[0].length);
} }
...@@ -49,9 +49,6 @@ enum mmc_drv_op { ...@@ -49,9 +49,6 @@ enum mmc_drv_op {
struct mmc_queue_req { struct mmc_queue_req {
struct mmc_blk_request brq; struct mmc_blk_request brq;
struct scatterlist *sg; struct scatterlist *sg;
char *bounce_buf;
struct scatterlist *bounce_sg;
unsigned int bounce_sg_len;
struct mmc_async_req areq; struct mmc_async_req areq;
enum mmc_drv_op drv_op; enum mmc_drv_op drv_op;
int drv_op_result; int drv_op_result;
...@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, ...@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
extern void mmc_cleanup_queue(struct mmc_queue *); extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *); extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *); extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *, extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *); struct mmc_queue_req *);
extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_access_rpmb(struct mmc_queue *); extern int mmc_access_rpmb(struct mmc_queue *);
......
...@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) ...@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
*/ */
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF; MMC_CAP_3_3V_DDR;
if (host->use_sg) if (host->use_sg)
mmc->max_segs = 16; mmc->max_segs = 16;
......
...@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host) ...@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
div->shift = __ffs(CLK_DIV_MASK); div->shift = __ffs(CLK_DIV_MASK);
div->width = __builtin_popcountl(CLK_DIV_MASK); div->width = __builtin_popcountl(CLK_DIV_MASK);
div->hw.init = &init; div->hw.init = &init;
div->flags = (CLK_DIVIDER_ONE_BASED | div->flags = CLK_DIVIDER_ONE_BASED;
CLK_DIVIDER_ROUND_CLOSEST);
clk = devm_clk_register(host->dev, &div->hw); clk = devm_clk_register(host->dev, &div->hw);
if (WARN_ON(IS_ERR(clk))) if (WARN_ON(IS_ERR(clk)))
...@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, ...@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{ {
struct meson_host *host = mmc_priv(mmc); struct meson_host *host = mmc_priv(mmc);
int ret;
/*
* If this is the initial tuning, try to get a sane Rx starting
* phase before doing the actual tuning.
*/
if (!mmc->doing_retune) {
ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
if (ret)
return ret;
}
ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
if (ret)
return ret;
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
} }
...@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ...@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_UP: case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc)) if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
/* Reset phases */
clk_set_phase(host->rx_clk, 0);
clk_set_phase(host->tx_clk, 270);
break; break;
case MMC_POWER_ON: case MMC_POWER_ON:
...@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ...@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->vqmmc_enabled = true; host->vqmmc_enabled = true;
} }
/* Reset rx phase */
clk_set_phase(host->rx_clk, 0);
break; break;
} }
......
...@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev) ...@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
pxamci_init_ocr(host); pxamci_init_ocr(host);
/* mmc->caps = 0;
* This architecture used to disable bounce buffers through its
* defconfig, now it is done at runtime as a host property.
*/
mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
host->cmdat = 0; host->cmdat = 0;
if (!cpu_is_pxa25x()) { if (!cpu_is_pxa25x()) {
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
......
...@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev) ...@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev)
{ {
struct sdhci_pltfm_host *pltfm_host; struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host; struct sdhci_host *host;
struct xenon_priv *priv;
int err; int err;
host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata, host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
...@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev) ...@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev)
return PTR_ERR(host); return PTR_ERR(host);
pltfm_host = sdhci_priv(host); pltfm_host = sdhci_priv(host);
priv = sdhci_pltfm_priv(pltfm_host);
/* /*
* Link Xenon specific mmc_host_ops function, * Link Xenon specific mmc_host_ops function,
...@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev) ...@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev)
if (err) if (err)
goto free_pltfm; goto free_pltfm;
priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
if (IS_ERR(priv->axi_clk)) {
err = PTR_ERR(priv->axi_clk);
if (err == -EPROBE_DEFER)
goto err_clk;
} else {
err = clk_prepare_enable(priv->axi_clk);
if (err)
goto err_clk;
}
err = mmc_of_parse(host->mmc); err = mmc_of_parse(host->mmc);
if (err) if (err)
goto err_clk; goto err_clk_axi;
sdhci_get_of_property(pdev); sdhci_get_of_property(pdev);
...@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev) ...@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev)
/* Xenon specific dt parse */ /* Xenon specific dt parse */
err = xenon_probe_dt(pdev); err = xenon_probe_dt(pdev);
if (err) if (err)
goto err_clk; goto err_clk_axi;
err = xenon_sdhc_prepare(host); err = xenon_sdhc_prepare(host);
if (err) if (err)
goto err_clk; goto err_clk_axi;
pm_runtime_get_noresume(&pdev->dev); pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev); pm_runtime_set_active(&pdev->dev);
...@@ -527,6 +540,8 @@ static int xenon_probe(struct platform_device *pdev) ...@@ -527,6 +540,8 @@ static int xenon_probe(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev); pm_runtime_put_noidle(&pdev->dev);
xenon_sdhc_unprepare(host); xenon_sdhc_unprepare(host);
err_clk_axi:
clk_disable_unprepare(priv->axi_clk);
err_clk: err_clk:
clk_disable_unprepare(pltfm_host->clk); clk_disable_unprepare(pltfm_host->clk);
free_pltfm: free_pltfm:
...@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev) ...@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev)
{ {
struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
pm_runtime_get_sync(&pdev->dev); pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
...@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev) ...@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev)
sdhci_remove_host(host, 0); sdhci_remove_host(host, 0);
xenon_sdhc_unprepare(host); xenon_sdhc_unprepare(host);
clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(pltfm_host->clk); clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev); sdhci_pltfm_free(pdev);
......
...@@ -83,6 +83,7 @@ struct xenon_priv { ...@@ -83,6 +83,7 @@ struct xenon_priv {
unsigned char bus_width; unsigned char bus_width;
unsigned char timing; unsigned char timing;
unsigned int clock; unsigned int clock;
struct clk *axi_clk;
int phy_type; int phy_type;
/* /*
......
...@@ -316,7 +316,7 @@ struct mmc_host { ...@@ -316,7 +316,7 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */
#define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */ /* (1 << 21) is free for reuse */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment