Commit d3fa72e4 authored by Ralf Baechle's avatar Ralf Baechle Committed by Linus Torvalds

[PATCH] Pass struct dev pointer to dma_cache_sync()

Pass struct dev pointer to dma_cache_sync()

dma_cache_sync() is ill-designed in that it does not have a struct device
pointer argument which makes proper support for systems that consist of a
mix of coherent and non-coherent DMA devices hard.  Change dma_cache_sync
to take a struct device pointer as first argument and fix all its callers
to pass it.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f67637ee
...@@ -459,7 +459,7 @@ anything like this. You must also be extra careful about accessing ...@@ -459,7 +459,7 @@ anything like this. You must also be extra careful about accessing
memory you intend to sync partially. memory you intend to sync partially.
void void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
Do a partial sync of memory that was allocated by Do a partial sync of memory that was allocated by
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <asm/addrspace.h> #include <asm/addrspace.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
void dma_cache_sync(void *vaddr, size_t size, int direction) void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
{ {
/* /*
* No need to sync an uncached area * No need to sync an uncached area
......
...@@ -197,7 +197,7 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) ...@@ -197,7 +197,7 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
EXPORT_SYMBOL(dma_is_consistent); EXPORT_SYMBOL(dma_is_consistent);
void dma_cache_sync(void *vaddr, size_t size, void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
......
...@@ -204,7 +204,7 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) ...@@ -204,7 +204,7 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
EXPORT_SYMBOL(dma_is_consistent); EXPORT_SYMBOL(dma_is_consistent);
void dma_cache_sync(void *vaddr, size_t size, void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
......
...@@ -370,7 +370,8 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) ...@@ -370,7 +370,8 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
EXPORT_SYMBOL(dma_is_consistent); EXPORT_SYMBOL(dma_is_consistent);
void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{ {
if (direction == DMA_NONE) if (direction == DMA_NONE)
return; return;
......
...@@ -306,7 +306,8 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr) ...@@ -306,7 +306,8 @@ int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
EXPORT_SYMBOL(dma_is_consistent); EXPORT_SYMBOL(dma_is_consistent);
void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction) void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{ {
if (direction == DMA_NONE) if (direction == DMA_NONE)
return; return;
......
This diff is collapsed.
...@@ -362,11 +362,11 @@ NCR_700_detect(struct scsi_host_template *tpnt, ...@@ -362,11 +362,11 @@ NCR_700_detect(struct scsi_host_template *tpnt,
for (j = 0; j < PATCHES; j++) for (j = 0; j < PATCHES; j++)
script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]); script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
/* now patch up fixed addresses. */ /* now patch up fixed addresses. */
script_patch_32(script, MessageLocation, script_patch_32(hostdata->dev, script, MessageLocation,
pScript + MSGOUT_OFFSET); pScript + MSGOUT_OFFSET);
script_patch_32(script, StatusAddress, script_patch_32(hostdata->dev, script, StatusAddress,
pScript + STATUS_OFFSET); pScript + STATUS_OFFSET);
script_patch_32(script, ReceiveMsgAddress, script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
pScript + MSGIN_OFFSET); pScript + MSGIN_OFFSET);
hostdata->script = script; hostdata->script = script;
...@@ -821,8 +821,9 @@ process_extended_message(struct Scsi_Host *host, ...@@ -821,8 +821,9 @@ process_extended_message(struct Scsi_Host *host,
shost_printk(KERN_WARNING, host, shost_printk(KERN_WARNING, host,
"Unexpected SDTR msg\n"); "Unexpected SDTR msg\n");
hostdata->msgout[0] = A_REJECT_MSG; hostdata->msgout[0] = A_REJECT_MSG;
dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
script_patch_16(hostdata->script, MessageCount, 1); script_patch_16(hostdata->dev, hostdata->script,
MessageCount, 1);
/* SendMsgOut returns, so set up the return /* SendMsgOut returns, so set up the return
* address */ * address */
resume_offset = hostdata->pScript + Ent_SendMessageWithATN; resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
...@@ -833,8 +834,9 @@ process_extended_message(struct Scsi_Host *host, ...@@ -833,8 +834,9 @@ process_extended_message(struct Scsi_Host *host,
printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n", printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
host->host_no, pun, lun); host->host_no, pun, lun);
hostdata->msgout[0] = A_REJECT_MSG; hostdata->msgout[0] = A_REJECT_MSG;
dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
script_patch_16(hostdata->script, MessageCount, 1); script_patch_16(hostdata->dev, hostdata->script, MessageCount,
1);
resume_offset = hostdata->pScript + Ent_SendMessageWithATN; resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
break; break;
...@@ -847,8 +849,9 @@ process_extended_message(struct Scsi_Host *host, ...@@ -847,8 +849,9 @@ process_extended_message(struct Scsi_Host *host,
printk("\n"); printk("\n");
/* just reject it */ /* just reject it */
hostdata->msgout[0] = A_REJECT_MSG; hostdata->msgout[0] = A_REJECT_MSG;
dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
script_patch_16(hostdata->script, MessageCount, 1); script_patch_16(hostdata->dev, hostdata->script, MessageCount,
1);
/* SendMsgOut returns, so set up the return /* SendMsgOut returns, so set up the return
* address */ * address */
resume_offset = hostdata->pScript + Ent_SendMessageWithATN; resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
...@@ -929,8 +932,9 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata ...@@ -929,8 +932,9 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
printk("\n"); printk("\n");
/* just reject it */ /* just reject it */
hostdata->msgout[0] = A_REJECT_MSG; hostdata->msgout[0] = A_REJECT_MSG;
dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE); dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
script_patch_16(hostdata->script, MessageCount, 1); script_patch_16(hostdata->dev, hostdata->script, MessageCount,
1);
/* SendMsgOut returns, so set up the return /* SendMsgOut returns, so set up the return
* address */ * address */
resume_offset = hostdata->pScript + Ent_SendMessageWithATN; resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
...@@ -939,7 +943,7 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata ...@@ -939,7 +943,7 @@ process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata
} }
NCR_700_writel(temp, host, TEMP_REG); NCR_700_writel(temp, host, TEMP_REG);
/* set us up to receive another message */ /* set us up to receive another message */
dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE); dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
return resume_offset; return resume_offset;
} }
...@@ -1019,9 +1023,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, ...@@ -1019,9 +1023,9 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
slot->SG[1].ins = bS_to_host(SCRIPT_RETURN); slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
slot->SG[1].pAddr = 0; slot->SG[1].pAddr = 0;
slot->resume_offset = hostdata->pScript; slot->resume_offset = hostdata->pScript;
dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE); dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE); dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
/* queue the command for reissue */ /* queue the command for reissue */
slot->state = NCR_700_SLOT_QUEUED; slot->state = NCR_700_SLOT_QUEUED;
slot->flags = NCR_700_FLAG_AUTOSENSE; slot->flags = NCR_700_FLAG_AUTOSENSE;
...@@ -1136,11 +1140,12 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, ...@@ -1136,11 +1140,12 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
hostdata->cmd = slot->cmnd; hostdata->cmd = slot->cmnd;
/* re-patch for this command */ /* re-patch for this command */
script_patch_32_abs(hostdata->script, CommandAddress, script_patch_32_abs(hostdata->dev, hostdata->script,
slot->pCmd); CommandAddress, slot->pCmd);
script_patch_16(hostdata->script, script_patch_16(hostdata->dev, hostdata->script,
CommandCount, slot->cmnd->cmd_len); CommandCount, slot->cmnd->cmd_len);
script_patch_32_abs(hostdata->script, SGScriptStartAddress, script_patch_32_abs(hostdata->dev, hostdata->script,
SGScriptStartAddress,
to32bit(&slot->pSG[0].ins)); to32bit(&slot->pSG[0].ins));
/* Note: setting SXFER only works if we're /* Note: setting SXFER only works if we're
...@@ -1150,13 +1155,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, ...@@ -1150,13 +1155,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
* should therefore always clear ACK */ * should therefore always clear ACK */
NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device), NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
host, SXFER_REG); host, SXFER_REG);
dma_cache_sync(hostdata->msgin, dma_cache_sync(hostdata->dev, hostdata->msgin,
MSG_ARRAY_SIZE, DMA_FROM_DEVICE); MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
dma_cache_sync(hostdata->msgout, dma_cache_sync(hostdata->dev, hostdata->msgout,
MSG_ARRAY_SIZE, DMA_TO_DEVICE); MSG_ARRAY_SIZE, DMA_TO_DEVICE);
/* I'm just being paranoid here, the command should /* I'm just being paranoid here, the command should
* already have been flushed from the cache */ * already have been flushed from the cache */
dma_cache_sync(slot->cmnd->cmnd, dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
slot->cmnd->cmd_len, DMA_TO_DEVICE); slot->cmnd->cmd_len, DMA_TO_DEVICE);
...@@ -1220,7 +1225,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp, ...@@ -1220,7 +1225,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
hostdata->reselection_id = reselection_id; hostdata->reselection_id = reselection_id;
/* just in case we have a stale simple tag message, clear it */ /* just in case we have a stale simple tag message, clear it */
hostdata->msgin[1] = 0; hostdata->msgin[1] = 0;
dma_cache_sync(hostdata->msgin, dma_cache_sync(hostdata->dev, hostdata->msgin,
MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL); MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
if(hostdata->tag_negotiated & (1<<reselection_id)) { if(hostdata->tag_negotiated & (1<<reselection_id)) {
resume_offset = hostdata->pScript + Ent_GetReselectionWithTag; resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
...@@ -1336,7 +1341,7 @@ process_selection(struct Scsi_Host *host, __u32 dsp) ...@@ -1336,7 +1341,7 @@ process_selection(struct Scsi_Host *host, __u32 dsp)
hostdata->cmd = NULL; hostdata->cmd = NULL;
/* clear any stale simple tag message */ /* clear any stale simple tag message */
hostdata->msgin[1] = 0; hostdata->msgin[1] = 0;
dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if(id == 0xff) { if(id == 0xff) {
...@@ -1433,29 +1438,30 @@ NCR_700_start_command(struct scsi_cmnd *SCp) ...@@ -1433,29 +1438,30 @@ NCR_700_start_command(struct scsi_cmnd *SCp)
NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION); NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
} }
script_patch_16(hostdata->script, MessageCount, count); script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
script_patch_ID(hostdata->script, script_patch_ID(hostdata->dev, hostdata->script,
Device_ID, 1<<scmd_id(SCp)); Device_ID, 1<<scmd_id(SCp));
script_patch_32_abs(hostdata->script, CommandAddress, script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
slot->pCmd); slot->pCmd);
script_patch_16(hostdata->script, CommandCount, SCp->cmd_len); script_patch_16(hostdata->dev, hostdata->script, CommandCount,
SCp->cmd_len);
/* finally plumb the beginning of the SG list into the script /* finally plumb the beginning of the SG list into the script
* */ * */
script_patch_32_abs(hostdata->script, SGScriptStartAddress, script_patch_32_abs(hostdata->dev, hostdata->script,
to32bit(&slot->pSG[0].ins)); SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
NCR_700_clear_fifo(SCp->device->host); NCR_700_clear_fifo(SCp->device->host);
if(slot->resume_offset == 0) if(slot->resume_offset == 0)
slot->resume_offset = hostdata->pScript; slot->resume_offset = hostdata->pScript;
/* now perform all the writebacks and invalidates */ /* now perform all the writebacks and invalidates */
dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE); dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE); dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE); dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
/* set the synchronous period/offset */ /* set the synchronous period/offset */
NCR_700_writeb(NCR_700_get_SXFER(SCp->device), NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
...@@ -1631,7 +1637,7 @@ NCR_700_intr(int irq, void *dev_id) ...@@ -1631,7 +1637,7 @@ NCR_700_intr(int irq, void *dev_id)
slot->SG[i].ins = bS_to_host(SCRIPT_NOP); slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
slot->SG[i].pAddr = 0; slot->SG[i].pAddr = 0;
} }
dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
/* and pretend we disconnected after /* and pretend we disconnected after
* the command phase */ * the command phase */
resume_offset = hostdata->pScript + Ent_MsgInDuringData; resume_offset = hostdata->pScript + Ent_MsgInDuringData;
...@@ -1897,9 +1903,9 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) ...@@ -1897,9 +1903,9 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
} }
slot->SG[i].ins = bS_to_host(SCRIPT_RETURN); slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
slot->SG[i].pAddr = 0; slot->SG[i].pAddr = 0;
dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE); dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
DEBUG((" SETTING %08lx to %x\n", DEBUG((" SETTING %08lx to %x\n",
(&slot->pSG[i].ins), (&slot->pSG[i].ins),
slot->SG[i].ins)); slot->SG[i].ins));
} }
slot->resume_offset = 0; slot->resume_offset = 0;
......
...@@ -415,31 +415,31 @@ struct NCR_700_Host_Parameters { ...@@ -415,31 +415,31 @@ struct NCR_700_Host_Parameters {
#define NCR_710_MIN_XFERP 0 #define NCR_710_MIN_XFERP 0
#define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */ #define NCR_700_MIN_PERIOD 25 /* for SDTR message, 100ns */
#define script_patch_32(script, symbol, value) \ #define script_patch_32(dev, script, symbol, value) \
{ \ { \
int i; \ int i; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
__u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \ __u32 val = bS_to_cpu((script)[A_##symbol##_used[i]]) + value; \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \ (script)[A_##symbol##_used[i]] = bS_to_host(val); \
dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
DEBUG((" script, patching %s at %d to 0x%lx\n", \ DEBUG((" script, patching %s at %d to 0x%lx\n", \
#symbol, A_##symbol##_used[i], (value))); \ #symbol, A_##symbol##_used[i], (value))); \
} \ } \
} }
#define script_patch_32_abs(script, symbol, value) \ #define script_patch_32_abs(dev, script, symbol, value) \
{ \ { \
int i; \ int i; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
(script)[A_##symbol##_used[i]] = bS_to_host(value); \ (script)[A_##symbol##_used[i]] = bS_to_host(value); \
dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
DEBUG((" script, patching %s at %d to 0x%lx\n", \ DEBUG((" script, patching %s at %d to 0x%lx\n", \
#symbol, A_##symbol##_used[i], (value))); \ #symbol, A_##symbol##_used[i], (value))); \
} \ } \
} }
/* Used for patching the SCSI ID in the SELECT instruction */ /* Used for patching the SCSI ID in the SELECT instruction */
#define script_patch_ID(script, symbol, value) \ #define script_patch_ID(dev, script, symbol, value) \
{ \ { \
int i; \ int i; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
...@@ -447,13 +447,13 @@ struct NCR_700_Host_Parameters { ...@@ -447,13 +447,13 @@ struct NCR_700_Host_Parameters {
val &= 0xff00ffff; \ val &= 0xff00ffff; \
val |= ((value) & 0xff) << 16; \ val |= ((value) & 0xff) << 16; \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \ (script)[A_##symbol##_used[i]] = bS_to_host(val); \
dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
DEBUG((" script, patching ID field %s at %d to 0x%x\n", \ DEBUG((" script, patching ID field %s at %d to 0x%x\n", \
#symbol, A_##symbol##_used[i], val)); \ #symbol, A_##symbol##_used[i], val)); \
} \ } \
} }
#define script_patch_16(script, symbol, value) \ #define script_patch_16(dev, script, symbol, value) \
{ \ { \
int i; \ int i; \
for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \ for(i=0; i< (sizeof(A_##symbol##_used) / sizeof(__u32)); i++) { \
...@@ -461,7 +461,7 @@ struct NCR_700_Host_Parameters { ...@@ -461,7 +461,7 @@ struct NCR_700_Host_Parameters {
val &= 0xffff0000; \ val &= 0xffff0000; \
val |= ((value) & 0xffff); \ val |= ((value) & 0xffff); \
(script)[A_##symbol##_used[i]] = bS_to_host(val); \ (script)[A_##symbol##_used[i]] = bS_to_host(val); \
dma_cache_sync(&(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \ dma_cache_sync((dev), &(script)[A_##symbol##_used[i]], 4, DMA_TO_DEVICE); \
DEBUG((" script, patching short field %s at %d to 0x%x\n", \ DEBUG((" script, patching short field %s at %d to 0x%x\n", \
#symbol, A_##symbol##_used[i], val)); \ #symbol, A_##symbol##_used[i], val)); \
} \ } \
......
...@@ -555,7 +555,7 @@ mpsc_sdma_start_tx(struct mpsc_port_info *pi) ...@@ -555,7 +555,7 @@ mpsc_sdma_start_tx(struct mpsc_port_info *pi)
if (!mpsc_sdma_tx_active(pi)) { if (!mpsc_sdma_tx_active(pi)) {
txre = (struct mpsc_tx_desc *)(pi->txr + txre = (struct mpsc_tx_desc *)(pi->txr +
(pi->txr_tail * MPSC_TXRE_SIZE)); (pi->txr_tail * MPSC_TXRE_SIZE));
dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
invalidate_dcache_range((ulong)txre, invalidate_dcache_range((ulong)txre,
...@@ -931,7 +931,7 @@ mpsc_init_rings(struct mpsc_port_info *pi) ...@@ -931,7 +931,7 @@ mpsc_init_rings(struct mpsc_port_info *pi)
} }
txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */ txre->link = cpu_to_be32(pi->txr_p); /* Wrap last back to first */
dma_cache_sync((void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE, dma_cache_sync(pi->port.dev, (void *) pi->dma_region, MPSC_DMA_ALLOC_SIZE,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
...@@ -1005,7 +1005,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi) ...@@ -1005,7 +1005,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi)
rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE)); rxre = (struct mpsc_rx_desc *)(pi->rxr + (pi->rxr_posn*MPSC_RXRE_SIZE));
dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
invalidate_dcache_range((ulong)rxre, invalidate_dcache_range((ulong)rxre,
...@@ -1029,7 +1029,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi) ...@@ -1029,7 +1029,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi)
} }
bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE); bp = pi->rxb + (pi->rxr_posn * MPSC_RXBE_SIZE);
dma_cache_sync((void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE); dma_cache_sync(pi->port.dev, (void *) bp, MPSC_RXBE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
invalidate_dcache_range((ulong)bp, invalidate_dcache_range((ulong)bp,
...@@ -1098,7 +1098,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi) ...@@ -1098,7 +1098,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi)
SDMA_DESC_CMDSTAT_F | SDMA_DESC_CMDSTAT_F |
SDMA_DESC_CMDSTAT_L); SDMA_DESC_CMDSTAT_L);
wmb(); wmb();
dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL); dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
flush_dcache_range((ulong)rxre, flush_dcache_range((ulong)rxre,
...@@ -1109,7 +1109,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi) ...@@ -1109,7 +1109,7 @@ mpsc_rx_intr(struct mpsc_port_info *pi)
pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1); pi->rxr_posn = (pi->rxr_posn + 1) & (MPSC_RXR_ENTRIES - 1);
rxre = (struct mpsc_rx_desc *)(pi->rxr + rxre = (struct mpsc_rx_desc *)(pi->rxr +
(pi->rxr_posn * MPSC_RXRE_SIZE)); (pi->rxr_posn * MPSC_RXRE_SIZE));
dma_cache_sync((void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE); dma_cache_sync(pi->port.dev, (void *)rxre, MPSC_RXRE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
invalidate_dcache_range((ulong)rxre, invalidate_dcache_range((ulong)rxre,
...@@ -1143,7 +1143,7 @@ mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr) ...@@ -1143,7 +1143,7 @@ mpsc_setup_tx_desc(struct mpsc_port_info *pi, u32 count, u32 intr)
SDMA_DESC_CMDSTAT_EI SDMA_DESC_CMDSTAT_EI
: 0)); : 0));
wmb(); wmb();
dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL); dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
flush_dcache_range((ulong)txre, flush_dcache_range((ulong)txre,
...@@ -1192,7 +1192,7 @@ mpsc_copy_tx_data(struct mpsc_port_info *pi) ...@@ -1192,7 +1192,7 @@ mpsc_copy_tx_data(struct mpsc_port_info *pi)
else /* All tx data copied into ring bufs */ else /* All tx data copied into ring bufs */
return; return;
dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
flush_dcache_range((ulong)bp, flush_dcache_range((ulong)bp,
...@@ -1217,7 +1217,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi) ...@@ -1217,7 +1217,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi)
txre = (struct mpsc_tx_desc *)(pi->txr + txre = (struct mpsc_tx_desc *)(pi->txr +
(pi->txr_tail * MPSC_TXRE_SIZE)); (pi->txr_tail * MPSC_TXRE_SIZE));
dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE); dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE, DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
invalidate_dcache_range((ulong)txre, invalidate_dcache_range((ulong)txre,
...@@ -1235,7 +1235,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi) ...@@ -1235,7 +1235,7 @@ mpsc_tx_intr(struct mpsc_port_info *pi)
txre = (struct mpsc_tx_desc *)(pi->txr + txre = (struct mpsc_tx_desc *)(pi->txr +
(pi->txr_tail * MPSC_TXRE_SIZE)); (pi->txr_tail * MPSC_TXRE_SIZE));
dma_cache_sync((void *) txre, MPSC_TXRE_SIZE, dma_cache_sync(pi->port.dev, (void *) txre, MPSC_TXRE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
...@@ -1652,7 +1652,7 @@ mpsc_console_write(struct console *co, const char *s, uint count) ...@@ -1652,7 +1652,7 @@ mpsc_console_write(struct console *co, const char *s, uint count)
count--; count--;
} }
dma_cache_sync((void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL); dma_cache_sync(pi->port.dev, (void *) bp, MPSC_TXBE_SIZE, DMA_BIDIRECTIONAL);
#if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE) #if defined(CONFIG_PPC32) && !defined(CONFIG_NOT_COHERENT_CACHE)
if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */ if (pi->cache_mgmt) /* GT642[46]0 Res #COMM-2 */
flush_dcache_range((ulong)bp, flush_dcache_range((ulong)bp,
......
...@@ -60,7 +60,7 @@ int dma_set_mask(struct device *dev, u64 mask); ...@@ -60,7 +60,7 @@ int dma_set_mask(struct device *dev, u64 mask);
#define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0) #define dma_sync_single_range(dev, addr, off, size, dir) do { } while (0)
#define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0) #define dma_sync_sg_for_cpu(dev, sg, nents, dir) do { } while (0)
#define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0) #define dma_sync_sg_for_device(dev, sg, nents, dir) do { } while (0)
#define dma_cache_sync(va, size, dir) do { } while (0) #define dma_cache_sync(dev, va, size, dir) do { } while (0)
#define dma_get_cache_alignment() L1_CACHE_BYTES #define dma_get_cache_alignment() L1_CACHE_BYTES
......
...@@ -8,7 +8,8 @@ ...@@ -8,7 +8,8 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/io.h> #include <asm/io.h>
extern void dma_cache_sync(void *vaddr, size_t size, int direction); extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
int direction);
/* /*
* Return whether the given device DMA address mask can be supported * Return whether the given device DMA address mask can be supported
......
...@@ -159,7 +159,7 @@ dma_get_cache_alignment(void) ...@@ -159,7 +159,7 @@ dma_get_cache_alignment(void)
#define dma_is_consistent(d, h) (1) #define dma_is_consistent(d, h) (1)
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
} }
......
...@@ -175,7 +175,7 @@ int dma_get_cache_alignment(void) ...@@ -175,7 +175,7 @@ int dma_get_cache_alignment(void)
#define dma_is_consistent(d, h) (1) #define dma_is_consistent(d, h) (1)
static inline static inline
void dma_cache_sync(void *vaddr, size_t size, void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
flush_write_buffers(); flush_write_buffers();
......
...@@ -295,7 +295,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, ...@@ -295,7 +295,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
} }
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
/* could define this in terms of the dma_cache ... operations, /* could define this in terms of the dma_cache ... operations,
......
...@@ -159,7 +159,7 @@ dma_get_cache_alignment(void) ...@@ -159,7 +159,7 @@ dma_get_cache_alignment(void)
#define dma_is_consistent(d, h) (1) #define dma_is_consistent(d, h) (1)
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
flush_write_buffers(); flush_write_buffers();
......
...@@ -50,7 +50,8 @@ dma_set_mask (struct device *dev, u64 mask) ...@@ -50,7 +50,8 @@ dma_set_mask (struct device *dev, u64 mask)
extern int dma_get_cache_alignment(void); extern int dma_get_cache_alignment(void);
static inline void static inline void
dma_cache_sync (void *vaddr, size_t size, enum dma_data_direction dir) dma_cache_sync (struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{ {
/* /*
* IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to
......
...@@ -41,7 +41,7 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, ...@@ -41,7 +41,7 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{ {
dma_free_coherent(dev, size, addr, handle); dma_free_coherent(dev, size, addr, handle);
} }
static inline void dma_cache_sync(void *vaddr, size_t size, static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
/* we use coherent allocation, so not much to do here. */ /* we use coherent allocation, so not much to do here. */
......
...@@ -65,7 +65,7 @@ dma_get_cache_alignment(void) ...@@ -65,7 +65,7 @@ dma_get_cache_alignment(void)
extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr); extern int dma_is_consistent(struct device *dev, dma_addr_t dma_addr);
extern void dma_cache_sync(void *vaddr, size_t size, extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction); enum dma_data_direction direction);
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
......
...@@ -197,7 +197,7 @@ dma_is_consistent(struct device *dev, dma_addr_t dma_addr) ...@@ -197,7 +197,7 @@ dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
} }
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
if(hppa_dma_ops->dma_sync_single_for_cpu) if(hppa_dma_ops->dma_sync_single_for_cpu)
......
...@@ -378,7 +378,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev, ...@@ -378,7 +378,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
dma_sync_single_for_device(dev, dma_handle, offset + size, direction); dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
} }
static inline void dma_cache_sync(void *vaddr, size_t size, static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
......
...@@ -53,7 +53,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, ...@@ -53,7 +53,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
consistent_free(vaddr, size); consistent_free(vaddr, size);
} }
static inline void dma_cache_sync(void *vaddr, size_t size, static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
consistent_sync(vaddr, size, (int)dir); consistent_sync(vaddr, size, (int)dir);
......
...@@ -35,7 +35,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, ...@@ -35,7 +35,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
consistent_free(NULL, size, vaddr, dma_handle); consistent_free(NULL, size, vaddr, dma_handle);
} }
static inline void dma_cache_sync(void *vaddr, size_t size, static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dma_cache_wback_inv((unsigned long)vaddr, size); dma_cache_wback_inv((unsigned long)vaddr, size);
......
...@@ -210,7 +210,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, ...@@ -210,7 +210,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
} }
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
/* could define this in terms of the dma_cache ... operations, /* could define this in terms of the dma_cache ... operations,
......
...@@ -112,7 +112,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, ...@@ -112,7 +112,7 @@ dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
} }
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
BUG(); BUG();
......
...@@ -185,7 +185,8 @@ static inline int dma_get_cache_alignment(void) ...@@ -185,7 +185,8 @@ static inline int dma_get_cache_alignment(void)
extern int dma_set_mask(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask);
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir) dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{ {
flush_write_buffers(); flush_write_buffers();
} }
......
...@@ -173,7 +173,7 @@ dma_get_cache_alignment(void) ...@@ -173,7 +173,7 @@ dma_get_cache_alignment(void)
#define dma_is_consistent(d, h) (1) #define dma_is_consistent(d, h) (1)
static inline void static inline void
dma_cache_sync(void *vaddr, size_t size, dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {
consistent_sync(vaddr, size, direction); consistent_sync(vaddr, size, direction);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment