Commit 88e1f06c authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://bk.arm.linux.org.uk/linux-2.6-mmc

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 32f6e431 b0e7d727
/* /*
* linux/drivers/mmc/mmc.c * linux/drivers/mmc/mmc.c
* *
* Copyright (C) 2003 Russell King, All Rights Reserved. * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/pagemap.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/mmc/card.h> #include <linux/mmc/card.h>
...@@ -212,7 +213,7 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card) ...@@ -212,7 +213,7 @@ int __mmc_claim_host(struct mmc_host *host, struct mmc_card *card)
cmd.opcode = MMC_SELECT_CARD; cmd.opcode = MMC_SELECT_CARD;
cmd.arg = card->rca << 16; cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SHORT | MMC_RSP_CRC; cmd.flags = MMC_RSP_R1;
err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
} }
...@@ -430,7 +431,7 @@ static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) ...@@ -430,7 +431,7 @@ static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
cmd.opcode = MMC_SEND_OP_COND; cmd.opcode = MMC_SEND_OP_COND;
cmd.arg = ocr; cmd.arg = ocr;
cmd.flags = MMC_RSP_SHORT; cmd.flags = MMC_RSP_R3;
for (i = 100; i; i--) { for (i = 100; i; i--) {
err = mmc_wait_for_cmd(host, &cmd, 0); err = mmc_wait_for_cmd(host, &cmd, 0);
...@@ -468,7 +469,7 @@ static void mmc_discover_cards(struct mmc_host *host) ...@@ -468,7 +469,7 @@ static void mmc_discover_cards(struct mmc_host *host)
cmd.opcode = MMC_ALL_SEND_CID; cmd.opcode = MMC_ALL_SEND_CID;
cmd.arg = 0; cmd.arg = 0;
cmd.flags = MMC_RSP_LONG | MMC_RSP_CRC; cmd.flags = MMC_RSP_R2;
err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
if (err == MMC_ERR_TIMEOUT) { if (err == MMC_ERR_TIMEOUT) {
...@@ -497,7 +498,7 @@ static void mmc_discover_cards(struct mmc_host *host) ...@@ -497,7 +498,7 @@ static void mmc_discover_cards(struct mmc_host *host)
cmd.opcode = MMC_SET_RELATIVE_ADDR; cmd.opcode = MMC_SET_RELATIVE_ADDR;
cmd.arg = card->rca << 16; cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SHORT | MMC_RSP_CRC; cmd.flags = MMC_RSP_R1;
err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
if (err != MMC_ERR_NONE) if (err != MMC_ERR_NONE)
...@@ -518,7 +519,7 @@ static void mmc_read_csds(struct mmc_host *host) ...@@ -518,7 +519,7 @@ static void mmc_read_csds(struct mmc_host *host)
cmd.opcode = MMC_SEND_CSD; cmd.opcode = MMC_SEND_CSD;
cmd.arg = card->rca << 16; cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_LONG | MMC_RSP_CRC; cmd.flags = MMC_RSP_R2;
err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
if (err != MMC_ERR_NONE) { if (err != MMC_ERR_NONE) {
...@@ -566,7 +567,7 @@ static void mmc_check_cards(struct mmc_host *host) ...@@ -566,7 +567,7 @@ static void mmc_check_cards(struct mmc_host *host)
cmd.opcode = MMC_SEND_STATUS; cmd.opcode = MMC_SEND_STATUS;
cmd.arg = card->rca << 16; cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SHORT | MMC_RSP_CRC; cmd.flags = MMC_RSP_R1;
err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES); err = mmc_wait_for_cmd(host, &cmd, CMD_RETRIES);
if (err == MMC_ERR_NONE) if (err == MMC_ERR_NONE)
...@@ -715,14 +716,21 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) ...@@ -715,14 +716,21 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
if (host) { if (host) {
memset(host, 0, sizeof(struct mmc_host) + extra); memset(host, 0, sizeof(struct mmc_host) + extra);
host->priv = host + 1;
spin_lock_init(&host->lock); spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq); init_waitqueue_head(&host->wq);
INIT_LIST_HEAD(&host->cards); INIT_LIST_HEAD(&host->cards);
INIT_WORK(&host->detect, mmc_rescan, host); INIT_WORK(&host->detect, mmc_rescan, host);
host->dev = dev; host->dev = dev;
/*
* By default, hosts do not support SGIO or large requests.
* They have to set these according to their abilities.
*/
host->max_hw_segs = 1;
host->max_phys_segs = 1;
host->max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
host->max_seg_size = PAGE_CACHE_SIZE;
} }
return host; return host;
......
...@@ -43,7 +43,6 @@ ...@@ -43,7 +43,6 @@
#define MMC_SHIFT 3 #define MMC_SHIFT 3
static int mmc_major; static int mmc_major;
static int maxsectors = 8;
/* /*
* There is one mmc_blk_data per slot. * There is one mmc_blk_data per slot.
...@@ -180,22 +179,22 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -180,22 +179,22 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.mrq.data = &brq.data; brq.mrq.data = &brq.data;
brq.cmd.arg = req->sector << 9; brq.cmd.arg = req->sector << 9;
brq.cmd.flags = MMC_RSP_SHORT | MMC_RSP_CRC; brq.cmd.flags = MMC_RSP_R1;
brq.data.req = req; brq.data.req = req;
brq.data.timeout_ns = card->csd.tacc_ns * 10; brq.data.timeout_ns = card->csd.tacc_ns * 10;
brq.data.timeout_clks = card->csd.tacc_clks * 10; brq.data.timeout_clks = card->csd.tacc_clks * 10;
brq.data.blksz_bits = md->block_bits; brq.data.blksz_bits = md->block_bits;
brq.data.blocks = req->current_nr_sectors >> (md->block_bits - 9); brq.data.blocks = req->nr_sectors >> (md->block_bits - 9);
brq.stop.opcode = MMC_STOP_TRANSMISSION; brq.stop.opcode = MMC_STOP_TRANSMISSION;
brq.stop.arg = 0; brq.stop.arg = 0;
brq.stop.flags = MMC_RSP_SHORT | MMC_RSP_CRC | MMC_RSP_BUSY; brq.stop.flags = MMC_RSP_R1B;
if (rq_data_dir(req) == READ) { if (rq_data_dir(req) == READ) {
brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK; brq.cmd.opcode = brq.data.blocks > 1 ? MMC_READ_MULTIPLE_BLOCK : MMC_READ_SINGLE_BLOCK;
brq.data.flags |= MMC_DATA_READ; brq.data.flags |= MMC_DATA_READ;
} else { } else {
brq.cmd.opcode = MMC_WRITE_BLOCK; brq.cmd.opcode = MMC_WRITE_BLOCK;
brq.cmd.flags |= MMC_RSP_BUSY; brq.cmd.flags = MMC_RSP_R1B;
brq.data.flags |= MMC_DATA_WRITE; brq.data.flags |= MMC_DATA_WRITE;
brq.data.blocks = 1; brq.data.blocks = 1;
} }
...@@ -225,7 +224,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -225,7 +224,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
cmd.opcode = MMC_SEND_STATUS; cmd.opcode = MMC_SEND_STATUS;
cmd.arg = card->rca << 16; cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SHORT | MMC_RSP_CRC; cmd.flags = MMC_RSP_R1;
err = mmc_wait_for_cmd(card->host, &cmd, 5); err = mmc_wait_for_cmd(card->host, &cmd, 5);
if (err) { if (err) {
printk(KERN_ERR "%s: error %d requesting status\n", printk(KERN_ERR "%s: error %d requesting status\n",
...@@ -334,11 +333,10 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) ...@@ -334,11 +333,10 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
sprintf(md->disk->disk_name, "mmcblk%d", devidx); sprintf(md->disk->disk_name, "mmcblk%d", devidx);
sprintf(md->disk->devfs_name, "mmc/blk%d", devidx); sprintf(md->disk->devfs_name, "mmc/blk%d", devidx);
md->block_bits = md->queue.card->csd.read_blkbits; md->block_bits = card->csd.read_blkbits;
blk_queue_max_sectors(md->queue.queue, maxsectors);
blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits); blk_queue_hardsect_size(md->queue.queue, 1 << md->block_bits);
set_capacity(md->disk, md->queue.card->csd.capacity); set_capacity(md->disk, card->csd.capacity);
} }
out: out:
return md; return md;
...@@ -353,7 +351,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) ...@@ -353,7 +351,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
mmc_card_claim_host(card); mmc_card_claim_host(card);
cmd.opcode = MMC_SET_BLOCKLEN; cmd.opcode = MMC_SET_BLOCKLEN;
cmd.arg = 1 << card->csd.read_blkbits; cmd.arg = 1 << card->csd.read_blkbits;
cmd.flags = MMC_RSP_SHORT | MMC_RSP_CRC; cmd.flags = MMC_RSP_R1;
err = mmc_wait_for_cmd(card->host, &cmd, 5); err = mmc_wait_for_cmd(card->host, &cmd, 5);
mmc_card_release_host(card); mmc_card_release_host(card);
...@@ -440,7 +438,7 @@ static int mmc_blk_resume(struct mmc_card *card) ...@@ -440,7 +438,7 @@ static int mmc_blk_resume(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card); struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) { if (md) {
mmc_blk_set_blksize(md, md->queue.card); mmc_blk_set_blksize(md, card);
blk_start_queue(md->queue.queue); blk_start_queue(md->queue.queue);
} }
return 0; return 0;
...@@ -489,9 +487,6 @@ static void __exit mmc_blk_exit(void) ...@@ -489,9 +487,6 @@ static void __exit mmc_blk_exit(void)
module_init(mmc_blk_init); module_init(mmc_blk_init);
module_exit(mmc_blk_exit); module_exit(mmc_blk_exit);
module_param(maxsectors, int, 0444);
MODULE_PARM_DESC(maxsectors, "Maximum number of sectors for a single request");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver"); MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
...@@ -124,11 +124,12 @@ static void mmc_request(request_queue_t *q) ...@@ -124,11 +124,12 @@ static void mmc_request(request_queue_t *q)
*/ */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{ {
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH; u64 limit = BLK_BOUNCE_HIGH;
int ret; int ret;
if (card->host->dev->dma_mask && *card->host->dev->dma_mask) if (host->dev->dma_mask && *host->dev->dma_mask)
limit = *card->host->dev->dma_mask; limit = *host->dev->dma_mask;
mq->card = card; mq->card = card;
mq->queue = blk_init_queue(mmc_request, lock); mq->queue = blk_init_queue(mmc_request, lock);
...@@ -137,6 +138,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock ...@@ -137,6 +138,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
blk_queue_prep_rq(mq->queue, mmc_prep_request); blk_queue_prep_rq(mq->queue, mmc_prep_request);
blk_queue_bounce_limit(mq->queue, limit); blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_sectors(mq->queue, host->max_sectors);
blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
mq->queue->queuedata = mq; mq->queue->queuedata = mq;
mq->req = NULL; mq->req = NULL;
......
...@@ -43,10 +43,9 @@ static void ...@@ -43,10 +43,9 @@ static void
mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
{ {
writel(0, host->base + MMCICOMMAND); writel(0, host->base + MMCICOMMAND);
host->mrq = NULL; host->mrq = NULL;
host->cmd = NULL; host->cmd = NULL;
host->data = NULL;
host->buffer = NULL;
if (mrq->data) if (mrq->data)
mrq->data->bytes_xfered = host->data_xfered; mrq->data->bytes_xfered = host->data_xfered;
...@@ -60,6 +59,13 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) ...@@ -60,6 +59,13 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
spin_lock(&host->lock); spin_lock(&host->lock);
} }
static void mmci_stop_data(struct mmci_host *host)
{
writel(0, host->base + MMCIDATACTRL);
writel(0, host->base + MMCIMASK1);
host->data = NULL;
}
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
{ {
unsigned int datactrl, timeout, irqmask; unsigned int datactrl, timeout, irqmask;
...@@ -69,7 +75,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) ...@@ -69,7 +75,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
1 << data->blksz_bits, data->blocks, data->flags); 1 << data->blksz_bits, data->blocks, data->flags);
host->data = data; host->data = data;
host->buffer = data->req->buffer; host->offset = 0;
host->size = data->blocks << data->blksz_bits; host->size = data->blocks << data->blksz_bits;
host->data_xfered = 0; host->data_xfered = 0;
...@@ -94,6 +100,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) ...@@ -94,6 +100,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
} }
writel(datactrl, base + MMCIDATACTRL); writel(datactrl, base + MMCIDATACTRL);
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
writel(irqmask, base + MMCIMASK1); writel(irqmask, base + MMCIMASK1);
} }
...@@ -147,7 +154,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, ...@@ -147,7 +154,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
status |= MCI_DATAEND; status |= MCI_DATAEND;
} }
if (status & MCI_DATAEND) { if (status & MCI_DATAEND) {
host->data = NULL; mmci_stop_data(host);
if (!data->stop) { if (!data->stop) {
mmci_request_end(host, data->mrq); mmci_request_end(host, data->mrq);
} else { } else {
...@@ -182,72 +190,171 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, ...@@ -182,72 +190,171 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
} }
} }
/* static int mmci_pio_read(struct mmci_host *host, struct request *req, u32 status)
* PIO data transfer IRQ handler.
*/
static irqreturn_t mmci_pio_irq(int irq, void *dev_id, struct pt_regs *regs)
{ {
struct mmci_host *host = dev_id;
void *base = host->base; void *base = host->base;
u32 status;
int ret = 0; int ret = 0;
do { do {
status = readl(base + MMCISTATUS); unsigned long flags;
unsigned int bio_remain;
char *buffer;
if (!(status & (MCI_RXDATAAVLBL|MCI_RXFIFOHALFFULL| /*
MCI_TXFIFOHALFEMPTY))) * Check for data available.
*/
if (!(status & MCI_RXDATAAVLBL))
break; break;
DBG(host, "irq1 %08x\n", status); /*
* Map the BIO buffer.
*/
buffer = bio_kmap_irq(req->cbio, &flags);
bio_remain = (req->current_nr_sectors << 9) - host->offset;
do {
int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
if (count > bio_remain)
count = bio_remain;
if (status & (MCI_RXDATAAVLBL|MCI_RXFIFOHALFFULL)) { if (count > 0) {
unsigned int count = host->size - (readl(base + MMCIFIFOCNT) << 2); ret = 1;
if (count < 0) readsl(base + MMCIFIFO, buffer + host->offset, count >> 2);
count = 0; host->offset += count;
if (count && host->buffer) {
readsl(base + MMCIFIFO, host->buffer, count >> 2);
host->buffer += count;
host->size -= count; host->size -= count;
if (host->size == 0) bio_remain -= count;
host->buffer = NULL; if (bio_remain == 0)
} else { goto next_bio;
static int first = 1;
if (first) {
first = 0;
printk(KERN_ERR "MMCI: sinking excessive data\n");
}
readl(base + MMCIFIFO);
} }
}
status = readl(base + MMCISTATUS);
} while (status & MCI_RXDATAAVLBL);
bio_kunmap_irq(buffer, &flags);
break;
next_bio:
bio_kunmap_irq(buffer, &flags);
/*
* Ok, we've completed that BIO, move on to next
* BIO in the chain. Note: this doesn't actually
* complete the BIO!
*/
if (!process_that_request_first(req, req->current_nr_sectors))
break;
host->offset = 0;
status = readl(base + MMCISTATUS);
} while (1);
/*
* If we're nearing the end of the read, switch to
* "any data available" mode.
*/
if (host->size < MCI_FIFOSIZE)
writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1);
return ret;
}
static int mmci_pio_write(struct mmci_host *host, struct request *req, u32 status)
{
void *base = host->base;
int ret = 0;
do {
unsigned long flags;
unsigned int bio_remain;
char *buffer;
/* /*
* We only need to test the half-empty flag here - if * We only need to test the half-empty flag here - if
* the FIFO is completely empty, then by definition * the FIFO is completely empty, then by definition
* it is more than half empty. * it is more than half empty.
*/ */
if (status & MCI_TXFIFOHALFEMPTY) { if (!(status & MCI_TXFIFOHALFEMPTY))
unsigned int maxcnt = status & MCI_TXFIFOEMPTY ? break;
MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
unsigned int count = min(host->size, maxcnt);
writesl(base + MMCIFIFO, host->buffer, count >> 2); /*
* Map the BIO buffer.
*/
buffer = bio_kmap_irq(req->cbio, &flags);
bio_remain = (req->current_nr_sectors << 9) - host->offset;
host->buffer += count; do {
unsigned int count, maxcnt;
maxcnt = status & MCI_TXFIFOEMPTY ?
MCI_FIFOSIZE : MCI_FIFOHALFSIZE;
count = min(bio_remain, maxcnt);
writesl(base + MMCIFIFO, buffer + host->offset, count >> 2);
host->offset += count;
host->size -= count; host->size -= count;
bio_remain -= count;
/* ret = 1;
* If we run out of data, disable the data IRQs;
* this prevents a race where the FIFO becomes
* empty before the chip itself has disabled the
* data path.
*/
if (host->size == 0)
writel(0, base + MMCIMASK1);
}
ret = 1; if (bio_remain == 0)
} while (status); goto next_bio;
status = readl(base + MMCISTATUS);
} while (status & MCI_TXFIFOHALFEMPTY);
bio_kunmap_irq(buffer, &flags);
break;
next_bio:
bio_kunmap_irq(buffer, &flags);
/*
* Ok, we've completed that BIO, move on to next
* BIO in the chain. Note: this doesn't actually
* complete the BIO!
*/
if (!process_that_request_first(req, req->current_nr_sectors))
break;
host->offset = 0;
status = readl(base + MMCISTATUS);
} while (1);
return ret;
}
/*
* PIO data transfer IRQ handler.
*/
static irqreturn_t mmci_pio_irq(int irq, void *dev_id, struct pt_regs *regs)
{
struct mmci_host *host = dev_id;
struct request *req;
void *base = host->base;
u32 status;
int ret = 0;
status = readl(base + MMCISTATUS);
DBG(host, "irq1 %08x\n", status);
req = host->data->req;
if (status & MCI_RXACTIVE)
ret = mmci_pio_read(host, req, status);
else if (status & MCI_TXACTIVE)
ret = mmci_pio_write(host, req, status);
/*
* If we run out of data, disable the data IRQs; this
* prevents a race where the FIFO becomes empty before
* the chip itself has disabled the data path, and
* stops us racing with our data end IRQ.
*/
if (host->size == 0) {
writel(0, base + MMCIMASK1);
writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);
}
return IRQ_RETVAL(ret); return IRQ_RETVAL(ret);
} }
...@@ -268,11 +375,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id, struct pt_regs *regs) ...@@ -268,11 +375,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id, struct pt_regs *regs)
struct mmc_data *data; struct mmc_data *data;
status = readl(host->base + MMCISTATUS); status = readl(host->base + MMCISTATUS);
status &= readl(host->base + MMCIMASK0);
writel(status, host->base + MMCICLEAR); writel(status, host->base + MMCICLEAR);
if (!(status & MCI_IRQMASK))
break;
DBG(host, "irq0 %08x\n", status); DBG(host, "irq0 %08x\n", status);
data = host->data; data = host->data;
...@@ -427,6 +532,25 @@ static int mmci_probe(struct amba_device *dev, void *id) ...@@ -427,6 +532,25 @@ static int mmci_probe(struct amba_device *dev, void *id)
mmc->f_max = min(host->mclk, fmax); mmc->f_max = min(host->mclk, fmax);
mmc->ocr_avail = plat->ocr_mask; mmc->ocr_avail = plat->ocr_mask;
/*
* We can do SGIO
*/
mmc->max_hw_segs = 16;
mmc->max_phys_segs = 16;
/*
* Since we only have a 16-bit data length register, we must
* ensure that we don't exceed 2^16-1 bytes in a single request.
* Choose 64 (512-byte) sectors as the limit.
*/
mmc->max_sectors = 64;
/*
* Set the maximum segment size. Since we aren't doing DMA
* (yet) we are only limited by the data length register.
*/
mmc->max_seg_size = mmc->max_sectors << 9;
spin_lock_init(&host->lock); spin_lock_init(&host->lock);
writel(0, host->base + MMCIMASK0); writel(0, host->base + MMCIMASK0);
......
...@@ -103,18 +103,15 @@ ...@@ -103,18 +103,15 @@
#define MMCIFIFOCNT 0x048 #define MMCIFIFOCNT 0x048
#define MMCIFIFO 0x080 /* to 0x0bc */ #define MMCIFIFO 0x080 /* to 0x0bc */
#define MCI_IRQMASK \
(MCI_CMDCRCFAIL|MCI_DATACRCFAIL|MCI_CMDTIMEOUT|MCI_DATATIMEOUT| \
MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_CMDRESPEND|MCI_CMDSENT| \
MCI_DATAEND|MCI_DATABLOCKEND)
#define MCI_IRQENABLE \ #define MCI_IRQENABLE \
(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \ (MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK| \ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK)
MCI_DATABLOCKENDMASK)
#define MCI_FIFOSIZE 16 /*
* The size of the FIFO in bytes.
*/
#define MCI_FIFOSIZE (16*4)
#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2) #define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
...@@ -141,8 +138,6 @@ struct mmci_host { ...@@ -141,8 +138,6 @@ struct mmci_host {
unsigned int oldstat; unsigned int oldstat;
/* pio stuff */ /* pio stuff */
void *buffer; unsigned int offset;
unsigned int size; unsigned int size;
}; };
#define to_mmci_host(mmc) container_of(mmc, struct mmci_host, mmc)
...@@ -530,6 +530,7 @@ static int pxamci_remove(struct device *dev) ...@@ -530,6 +530,7 @@ static int pxamci_remove(struct device *dev)
return 0; return 0;
} }
#ifdef CONFIG_PM
static int pxamci_suspend(struct device *dev, u32 state, u32 level) static int pxamci_suspend(struct device *dev, u32 state, u32 level)
{ {
struct mmc_host *mmc = dev_get_drvdata(dev); struct mmc_host *mmc = dev_get_drvdata(dev);
...@@ -551,6 +552,10 @@ static int pxamci_resume(struct device *dev, u32 level) ...@@ -551,6 +552,10 @@ static int pxamci_resume(struct device *dev, u32 level)
return ret; return ret;
} }
#else
#define pxamci_suspend NULL
#define pxamci_resume NULL
#endif
static struct device_driver pxamci_driver = { static struct device_driver pxamci_driver = {
.name = "pxa2xx-mci", .name = "pxa2xx-mci",
......
...@@ -64,12 +64,18 @@ struct device; ...@@ -64,12 +64,18 @@ struct device;
struct mmc_host { struct mmc_host {
struct device *dev; struct device *dev;
struct mmc_host_ops *ops; struct mmc_host_ops *ops;
void *priv;
unsigned int f_min; unsigned int f_min;
unsigned int f_max; unsigned int f_max;
u32 ocr_avail; u32 ocr_avail;
char host_name[8]; char host_name[8];
/* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
unsigned short max_hw_segs; /* see blk_queue_max_hw_segments */
unsigned short max_phys_segs; /* see blk_queue_max_phys_segments */
unsigned short max_sectors; /* see blk_queue_max_sectors */
unsigned short unused;
/* private data */ /* private data */
struct mmc_ios ios; /* current io bus settings */ struct mmc_ios ios; /* current io bus settings */
u32 ocr; /* the current OCR setting */ u32 ocr; /* the current OCR setting */
......
...@@ -28,6 +28,16 @@ struct mmc_command { ...@@ -28,6 +28,16 @@ struct mmc_command {
#define MMC_RSP_CRC (1 << 3) /* expect valid crc */ #define MMC_RSP_CRC (1 << 3) /* expect valid crc */
#define MMC_RSP_BUSY (1 << 4) /* card may send busy */ #define MMC_RSP_BUSY (1 << 4) /* card may send busy */
/*
* These are the response types, and correspond to valid bit
* patterns of the above flags. One additional valid pattern
* is all zeros, which means we don't expect a response.
*/
#define MMC_RSP_R1 (MMC_RSP_SHORT|MMC_RSP_CRC)
#define MMC_RSP_R1B (MMC_RSP_SHORT|MMC_RSP_CRC|MMC_RSP_BUSY)
#define MMC_RSP_R2 (MMC_RSP_LONG|MMC_RSP_CRC)
#define MMC_RSP_R3 (MMC_RSP_SHORT)
unsigned int retries; /* max number of retries */ unsigned int retries; /* max number of retries */
unsigned int error; /* command error */ unsigned int error; /* command error */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment