ide: switch to DMA-mapping API part #2

Follow-up to commit 5c05ff68
("ide: switch to DMA-mapping API"):

* pci_{alloc,free}_consistent() -> dma_{alloc,free}_coherent()
  in ide_{allocate,release}_dma_engine().

* Add ->prd_max_nents and ->prd_ent_size fields to ide_hwif_t
  (+ set default values in ide_allocate_dma_engine()).

* Make ide_{allocate,release}_dma_engine() available also
  for CONFIG_BLK_DEV_IDEDMA_SFF=n.  Then convert au1xxx-ide.c,
  scc_pata.c and sgiioc4.c to use them.

* Add missing ->init_dma method to scc_pata.

This patch also fixes:
- ->dmatable_cpu leak for au1xxx-ide
- too early realease of ->dmatable_cpu for scc_pata
- wrong amount of ->dmatable_cpu memory being freed for sgiioc4

While at it:
- remove superfluous ->dma_base check from ide_unregister()
- return -ENOMEM on error in ide_release_dma_engine()
- beautify error message in ide_release_dma_engine()
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent ffa15a69
...@@ -844,36 +844,43 @@ void ide_dma_timeout(ide_drive_t *drive) ...@@ -844,36 +844,43 @@ void ide_dma_timeout(ide_drive_t *drive)
} }
EXPORT_SYMBOL_GPL(ide_dma_timeout); EXPORT_SYMBOL_GPL(ide_dma_timeout);
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
void ide_release_dma_engine(ide_hwif_t *hwif) void ide_release_dma_engine(ide_hwif_t *hwif)
{ {
if (hwif->dmatable_cpu) { if (hwif->dmatable_cpu) {
struct pci_dev *pdev = to_pci_dev(hwif->dev); int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES, dma_free_coherent(hwif->dev, prd_size,
hwif->dmatable_cpu, hwif->dmatable_dma); hwif->dmatable_cpu, hwif->dmatable_dma);
hwif->dmatable_cpu = NULL; hwif->dmatable_cpu = NULL;
} }
} }
EXPORT_SYMBOL_GPL(ide_release_dma_engine);
int ide_allocate_dma_engine(ide_hwif_t *hwif) int ide_allocate_dma_engine(ide_hwif_t *hwif)
{ {
struct pci_dev *pdev = to_pci_dev(hwif->dev); int prd_size;
hwif->dmatable_cpu = pci_alloc_consistent(pdev, if (hwif->prd_max_nents == 0)
PRD_ENTRIES * PRD_BYTES, hwif->prd_max_nents = PRD_ENTRIES;
&hwif->dmatable_dma); if (hwif->prd_ent_size == 0)
hwif->prd_ent_size = PRD_BYTES;
if (hwif->dmatable_cpu) prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
return 0;
printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n", hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
&hwif->dmatable_dma,
GFP_ATOMIC);
if (hwif->dmatable_cpu == NULL) {
printk(KERN_ERR "%s: unable to allocate PRD table\n",
hwif->name); hwif->name);
return -ENOMEM;
}
return 1; return 0;
} }
EXPORT_SYMBOL_GPL(ide_allocate_dma_engine); EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
const struct ide_dma_ops sff_dma_ops = { const struct ide_dma_ops sff_dma_ops = {
.dma_host_set = ide_dma_host_set, .dma_host_set = ide_dma_host_set,
.dma_setup = ide_dma_setup, .dma_setup = ide_dma_setup,
......
...@@ -227,7 +227,6 @@ void ide_unregister(ide_hwif_t *hwif) ...@@ -227,7 +227,6 @@ void ide_unregister(ide_hwif_t *hwif)
kfree(hwif->sg_table); kfree(hwif->sg_table);
unregister_blkdev(hwif->major, hwif->name); unregister_blkdev(hwif->major, hwif->name);
if (hwif->dma_base)
ide_release_dma_engine(hwif); ide_release_dma_engine(hwif);
mutex_unlock(&ide_cfg_mtx); mutex_unlock(&ide_cfg_mtx);
......
...@@ -428,9 +428,8 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d) ...@@ -428,9 +428,8 @@ static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan, auide->rx_desc_head = (void*)au1xxx_dbdma_ring_alloc(auide->rx_chan,
NUM_DESCRIPTORS); NUM_DESCRIPTORS);
hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, /* FIXME: check return value */
PRD_ENTRIES * PRD_BYTES, /* 1 Page */ (void)ide_allocate_dma_engine(hwif);
&hwif->dmatable_dma, GFP_KERNEL);
au1xxx_dbdma_start( auide->tx_chan ); au1xxx_dbdma_start( auide->tx_chan );
au1xxx_dbdma_start( auide->rx_chan ); au1xxx_dbdma_start( auide->rx_chan );
......
...@@ -821,6 +821,12 @@ static void __devinit init_iops_scc(ide_hwif_t *hwif) ...@@ -821,6 +821,12 @@ static void __devinit init_iops_scc(ide_hwif_t *hwif)
init_mmio_iops_scc(hwif); init_mmio_iops_scc(hwif);
} }
static int __devinit scc_init_dma(ide_hwif_t *hwif,
const struct ide_port_info *d)
{
return ide_allocate_dma_engine(hwif);
}
static u8 scc_cable_detect(ide_hwif_t *hwif) static u8 scc_cable_detect(ide_hwif_t *hwif)
{ {
return ATA_CBL_PATA80; return ATA_CBL_PATA80;
...@@ -885,6 +891,7 @@ static const struct ide_dma_ops scc_dma_ops = { ...@@ -885,6 +891,7 @@ static const struct ide_dma_ops scc_dma_ops = {
{ \ { \
.name = name_str, \ .name = name_str, \
.init_iops = init_iops_scc, \ .init_iops = init_iops_scc, \
.init_dma = scc_init_dma, \
.init_hwif = init_hwif_scc, \ .init_hwif = init_hwif_scc, \
.tp_ops = &scc_tp_ops, \ .tp_ops = &scc_tp_ops, \
.port_ops = &scc_port_ops, \ .port_ops = &scc_port_ops, \
...@@ -922,13 +929,6 @@ static void __devexit scc_remove(struct pci_dev *dev) ...@@ -922,13 +929,6 @@ static void __devexit scc_remove(struct pci_dev *dev)
{ {
struct scc_ports *ports = pci_get_drvdata(dev); struct scc_ports *ports = pci_get_drvdata(dev);
struct ide_host *host = ports->host; struct ide_host *host = ports->host;
ide_hwif_t *hwif = host->ports[0];
if (hwif->dmatable_cpu) {
pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
hwif->dmatable_cpu, hwif->dmatable_dma);
hwif->dmatable_cpu = NULL;
}
ide_host_remove(host); ide_host_remove(host);
......
...@@ -357,14 +357,13 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d) ...@@ -357,14 +357,13 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
} }
hwif->dma_base = (unsigned long) virt_dma_base; hwif->dma_base = (unsigned long) virt_dma_base;
hwif->dmatable_cpu = pci_alloc_consistent(dev, hwif->sg_max_nents = IOC4_PRD_ENTRIES;
IOC4_PRD_ENTRIES * IOC4_PRD_BYTES,
&hwif->dmatable_dma);
if (!hwif->dmatable_cpu) hwif->prd_max_nents = IOC4_PRD_ENTRIES;
goto dma_pci_alloc_failure; hwif->prd_ent_size = IOC4_PRD_BYTES;
hwif->sg_max_nents = IOC4_PRD_ENTRIES; if (ide_allocate_dma_engine(hwif))
goto dma_pci_alloc_failure;
pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE, pad = pci_alloc_consistent(dev, IOC4_IDE_CACHELINE_SIZE,
(dma_addr_t *)&hwif->extra_base); (dma_addr_t *)&hwif->extra_base);
...@@ -373,8 +372,8 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d) ...@@ -373,8 +372,8 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
return 0; return 0;
} }
pci_free_consistent(dev, IOC4_PRD_ENTRIES * IOC4_PRD_BYTES, ide_release_dma_engine(hwif);
hwif->dmatable_cpu, hwif->dmatable_dma);
printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n", printk(KERN_ERR "%s(%s) -- ERROR: Unable to allocate DMA maps\n",
__func__, hwif->name); __func__, hwif->name);
printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name); printk(KERN_INFO "%s: changing from DMA to PIO mode", hwif->name);
......
...@@ -788,6 +788,12 @@ typedef struct hwif_s { ...@@ -788,6 +788,12 @@ typedef struct hwif_s {
unsigned int *dmatable_cpu; unsigned int *dmatable_cpu;
/* dma physical region descriptor table (dma view) */ /* dma physical region descriptor table (dma view) */
dma_addr_t dmatable_dma; dma_addr_t dmatable_dma;
/* maximum number of PRD table entries */
int prd_max_nents;
/* PRD entry size in bytes */
int prd_ent_size;
/* Scatter-gather list used to build the above */ /* Scatter-gather list used to build the above */
struct scatterlist *sg_table; struct scatterlist *sg_table;
int sg_max_nents; /* Maximum number of entries in it */ int sg_max_nents; /* Maximum number of entries in it */
...@@ -1423,14 +1429,14 @@ int ide_set_dma(ide_drive_t *); ...@@ -1423,14 +1429,14 @@ int ide_set_dma(ide_drive_t *);
void ide_check_dma_crc(ide_drive_t *); void ide_check_dma_crc(ide_drive_t *);
ide_startstop_t ide_dma_intr(ide_drive_t *); ide_startstop_t ide_dma_intr(ide_drive_t *);
int ide_allocate_dma_engine(ide_hwif_t *);
void ide_release_dma_engine(ide_hwif_t *);
int ide_build_sglist(ide_drive_t *, struct request *); int ide_build_sglist(ide_drive_t *, struct request *);
void ide_destroy_dmatable(ide_drive_t *); void ide_destroy_dmatable(ide_drive_t *);
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
extern int ide_build_dmatable(ide_drive_t *, struct request *); extern int ide_build_dmatable(ide_drive_t *, struct request *);
int ide_allocate_dma_engine(ide_hwif_t *);
void ide_release_dma_engine(ide_hwif_t *);
void ide_dma_host_set(ide_drive_t *, int); void ide_dma_host_set(ide_drive_t *, int);
extern int ide_dma_setup(ide_drive_t *); extern int ide_dma_setup(ide_drive_t *);
void ide_dma_exec_cmd(ide_drive_t *, u8); void ide_dma_exec_cmd(ide_drive_t *, u8);
...@@ -1453,11 +1459,8 @@ static inline void ide_dma_on(ide_drive_t *drive) { ; } ...@@ -1453,11 +1459,8 @@ static inline void ide_dma_on(ide_drive_t *drive) { ; }
static inline void ide_dma_verbose(ide_drive_t *drive) { ; } static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
static inline int ide_set_dma(ide_drive_t *drive) { return 1; } static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
#endif /* CONFIG_BLK_DEV_IDEDMA */
#ifndef CONFIG_BLK_DEV_IDEDMA_SFF
static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
#endif #endif /* CONFIG_BLK_DEV_IDEDMA */
#ifdef CONFIG_BLK_DEV_IDEACPI #ifdef CONFIG_BLK_DEV_IDEACPI
extern int ide_acpi_exec_tfs(ide_drive_t *drive); extern int ide_acpi_exec_tfs(ide_drive_t *drive);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment