Commit e214806d authored by Michael Schmitz's avatar Michael Schmitz Committed by Martin K. Petersen

scsi: a3000: Convert m68k WD33C93 drivers to DMA API

Use dma_map_single() for a3000 driver (leave bounce buffer logic
unchanged).

Use dma_set_mask_and_coherent() to avoid explicit cache flushes.

Compile-tested only.

CC: linux-scsi@vger.kernel.org
Link: https://lore.kernel.org/r/6d1d88ee-1cf6-c735-1e6d-bafd2096e322@gmail.com
Link: https://lore.kernel.org/r/20220630033302.3183-2-schmitzmic@gmail.comReviewed-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarMichael Schmitz <schmitzmic@gmail.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>

--

Changes from v1:

- restore bounce buffer allocation (dropped in v1)

Arnd Bergmann:
- reorder dma mapping and bounce buffer copy
parent a2417db3
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -25,8 +26,11 @@ ...@@ -25,8 +26,11 @@
struct a3000_hostdata { struct a3000_hostdata {
struct WD33C93_hostdata wh; struct WD33C93_hostdata wh;
struct a3000_scsiregs *regs; struct a3000_scsiregs *regs;
struct device *dev;
}; };
#define DMA_DIR(d) ((d == DATA_OUT_DIR) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
static irqreturn_t a3000_intr(int irq, void *data) static irqreturn_t a3000_intr(int irq, void *data)
{ {
struct Scsi_Host *instance = data; struct Scsi_Host *instance = data;
...@@ -49,20 +53,38 @@ static irqreturn_t a3000_intr(int irq, void *data) ...@@ -49,20 +53,38 @@ static irqreturn_t a3000_intr(int irq, void *data)
static int dma_setup(struct scsi_cmnd *cmd, int dir_in) static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
{ {
struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd); struct scsi_pointer *scsi_pointer = WD33C93_scsi_pointer(cmd);
unsigned long len = scsi_pointer->this_residual;
struct Scsi_Host *instance = cmd->device->host; struct Scsi_Host *instance = cmd->device->host;
struct a3000_hostdata *hdata = shost_priv(instance); struct a3000_hostdata *hdata = shost_priv(instance);
struct WD33C93_hostdata *wh = &hdata->wh; struct WD33C93_hostdata *wh = &hdata->wh;
struct a3000_scsiregs *regs = hdata->regs; struct a3000_scsiregs *regs = hdata->regs;
unsigned short cntr = CNTR_PDMD | CNTR_INTEN; unsigned short cntr = CNTR_PDMD | CNTR_INTEN;
unsigned long addr = virt_to_bus(scsi_pointer->ptr); dma_addr_t addr;
addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
len, DMA_DIR(dir_in));
if (dma_mapping_error(hdata->dev, addr)) {
dev_warn(hdata->dev, "cannot map SCSI data block %p\n",
scsi_pointer->ptr);
return 1;
}
scsi_pointer->dma_handle = addr;
/* /*
* if the physical address has the wrong alignment, or if * if the physical address has the wrong alignment, or if
* physical address is bad, or if it is a write and at the * physical address is bad, or if it is a write and at the
* end of a physical memory chunk, then allocate a bounce * end of a physical memory chunk, then allocate a bounce
* buffer * buffer
* MSch 20220629 - only wrong alignment tested - bounce
* buffer returned by kmalloc is guaranteed to be aligned
*/ */
if (addr & A3000_XFER_MASK) { if (addr & A3000_XFER_MASK) {
WARN_ONCE(1, "Invalid alignment for DMA!");
/* drop useless mapping */
dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
scsi_pointer->this_residual,
DMA_DIR(dir_in));
wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff; wh->dma_bounce_len = (scsi_pointer->this_residual + 511) & ~0x1ff;
wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len, wh->dma_bounce_buffer = kmalloc(wh->dma_bounce_len,
GFP_KERNEL); GFP_KERNEL);
...@@ -70,6 +92,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) ...@@ -70,6 +92,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* can't allocate memory; use PIO */ /* can't allocate memory; use PIO */
if (!wh->dma_bounce_buffer) { if (!wh->dma_bounce_buffer) {
wh->dma_bounce_len = 0; wh->dma_bounce_len = 0;
scsi_pointer->dma_handle = (dma_addr_t) NULL;
return 1; return 1;
} }
...@@ -79,7 +102,15 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) ...@@ -79,7 +102,15 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
scsi_pointer->this_residual); scsi_pointer->this_residual);
} }
addr = virt_to_bus(wh->dma_bounce_buffer); addr = dma_map_single(hdata->dev, scsi_pointer->ptr,
len, DMA_DIR(dir_in));
if (dma_mapping_error(hdata->dev, addr)) {
dev_warn(hdata->dev,
"cannot map SCSI data block %p\n",
scsi_pointer->ptr);
return 1;
}
scsi_pointer->dma_handle = addr;
} }
/* setup dma direction */ /* setup dma direction */
...@@ -94,13 +125,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in) ...@@ -94,13 +125,7 @@ static int dma_setup(struct scsi_cmnd *cmd, int dir_in)
/* setup DMA *physical* address */ /* setup DMA *physical* address */
regs->ACR = addr; regs->ACR = addr;
if (dir_in) { /* no more cache flush here - dma_map_single() takes care */
/* invalidate any cache */
cache_clear(addr, scsi_pointer->this_residual);
} else {
/* push any dirty cache */
cache_push(addr, scsi_pointer->this_residual);
}
/* start DMA */ /* start DMA */
mb(); /* make sure setup is completed */ mb(); /* make sure setup is completed */
...@@ -151,6 +176,10 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt, ...@@ -151,6 +176,10 @@ static void dma_stop(struct Scsi_Host *instance, struct scsi_cmnd *SCpnt,
regs->CNTR = CNTR_PDMD | CNTR_INTEN; regs->CNTR = CNTR_PDMD | CNTR_INTEN;
mb(); /* make sure CNTR is updated before next IO */ mb(); /* make sure CNTR is updated before next IO */
dma_unmap_single(hdata->dev, scsi_pointer->dma_handle,
scsi_pointer->this_residual,
DMA_DIR(wh->dma_dir));
/* copy from a bounce buffer, if necessary */ /* copy from a bounce buffer, if necessary */
if (status && wh->dma_bounce_buffer) { if (status && wh->dma_bounce_buffer) {
if (SCpnt) { if (SCpnt) {
...@@ -193,6 +222,11 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) ...@@ -193,6 +222,11 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wd33c93_regs wdregs; wd33c93_regs wdregs;
struct a3000_hostdata *hdata; struct a3000_hostdata *hdata;
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_warn(&pdev->dev, "cannot use 32 bit DMA\n");
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) if (!res)
return -ENODEV; return -ENODEV;
...@@ -216,6 +250,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev) ...@@ -216,6 +250,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
wdregs.SCMD = &regs->SCMD; wdregs.SCMD = &regs->SCMD;
hdata = shost_priv(instance); hdata = shost_priv(instance);
hdata->dev = &pdev->dev;
hdata->wh.no_sync = 0xff; hdata->wh.no_sync = 0xff;
hdata->wh.fast = 0; hdata->wh.fast = 0;
hdata->wh.dma_mode = CTRL_DMA; hdata->wh.dma_mode = CTRL_DMA;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment