Commit 5657933d authored by Bart Van Assche's avatar Bart Van Assche Committed by Doug Ledford

treewide: Move dma_ops from struct dev_archdata into struct device

Some but not all architectures provide set_dma_ops(). Move dma_ops
from struct dev_archdata into struct device such that it becomes
possible on all architectures to configure dma_ops per device.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Acked-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Juergen Gross <jgross@suse.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Cc: Russell King <linux@armlinux.org.uk>
Cc: x86@kernel.org
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5299709d
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#define ASMARM_DEVICE_H #define ASMARM_DEVICE_H
struct dev_archdata { struct dev_archdata {
const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMABOUNCE #ifdef CONFIG_DMABOUNCE
struct dmabounce_device_info *dmabounce; struct dmabounce_device_info *dmabounce;
#endif #endif
......
...@@ -18,8 +18,8 @@ extern const struct dma_map_ops arm_coherent_dma_ops; ...@@ -18,8 +18,8 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->dma_ops)
return dev->archdata.dma_ops; return dev->dma_ops;
return &arm_dma_ops; return &arm_dma_ops;
} }
...@@ -34,7 +34,7 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev) ...@@ -34,7 +34,7 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{ {
BUG_ON(!dev); BUG_ON(!dev);
dev->archdata.dma_ops = ops; dev->dma_ops = ops;
} }
#define HAVE_ARCH_DMA_SUPPORTED 1 #define HAVE_ARCH_DMA_SUPPORTED 1
......
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#define __ASM_DEVICE_H #define __ASM_DEVICE_H
struct dev_archdata { struct dev_archdata {
const struct dma_map_ops *dma_ops;
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
void *iommu; /* private IOMMU data */ void *iommu; /* private IOMMU data */
#endif #endif
......
...@@ -29,8 +29,8 @@ extern const struct dma_map_ops dummy_dma_ops; ...@@ -29,8 +29,8 @@ extern const struct dma_map_ops dummy_dma_ops;
static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev) static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->dma_ops)
return dev->archdata.dma_ops; return dev->dma_ops;
/* /*
* We expect no ISA devices, and all other DMA masters are expected to * We expect no ISA devices, and all other DMA masters are expected to
......
...@@ -837,7 +837,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops, ...@@ -837,7 +837,7 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
return false; return false;
} }
dev->archdata.dma_ops = &iommu_dma_ops; dev->dma_ops = &iommu_dma_ops;
return true; return true;
} }
...@@ -941,7 +941,7 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ...@@ -941,7 +941,7 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_teardown_dma_ops(struct device *dev) void arch_teardown_dma_ops(struct device *dev)
{ {
dev->archdata.dma_ops = NULL; dev->dma_ops = NULL;
} }
#else #else
...@@ -955,8 +955,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ...@@ -955,8 +955,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
{ {
if (!dev->archdata.dma_ops) if (!dev->dma_ops)
dev->archdata.dma_ops = &swiotlb_dma_ops; dev->dma_ops = &swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent; dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu); __iommu_setup_dma_ops(dev, dma_base, size, iommu);
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* This file is released under the GPLv2 * This file is released under the GPLv2
*/ */
struct dev_archdata { struct dev_archdata {
const struct dma_map_ops *dma_ops;
}; };
struct pdev_archdata { struct pdev_archdata {
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
static inline const struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->dma_ops)
return dev->archdata.dma_ops; return dev->dma_ops;
return &dma_noop_ops; return &dma_noop_ops;
} }
......
...@@ -6,12 +6,7 @@ ...@@ -6,12 +6,7 @@
#ifndef _ASM_MIPS_DEVICE_H #ifndef _ASM_MIPS_DEVICE_H
#define _ASM_MIPS_DEVICE_H #define _ASM_MIPS_DEVICE_H
struct dma_map_ops;
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */
const struct dma_map_ops *dma_ops;
#ifdef CONFIG_DMA_PERDEV_COHERENT #ifdef CONFIG_DMA_PERDEV_COHERENT
/* Non-zero if DMA is coherent with CPU caches */ /* Non-zero if DMA is coherent with CPU caches */
bool dma_coherent; bool dma_coherent;
......
...@@ -13,8 +13,8 @@ extern const struct dma_map_ops *mips_dma_map_ops; ...@@ -13,8 +13,8 @@ extern const struct dma_map_ops *mips_dma_map_ops;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->dma_ops)
return dev->archdata.dma_ops; return dev->dma_ops;
else else
return mips_dma_map_ops; return mips_dma_map_ops;
} }
......
...@@ -167,7 +167,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev) ...@@ -167,7 +167,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
} }
dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops; dev->dev.dma_ops = octeon_pci_dma_map_ops;
return 0; return 0;
} }
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#ifndef _ASM_POWERPC_DEVICE_H #ifndef _ASM_POWERPC_DEVICE_H
#define _ASM_POWERPC_DEVICE_H #define _ASM_POWERPC_DEVICE_H
struct dma_map_ops;
struct device_node; struct device_node;
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
struct pci_dn; struct pci_dn;
...@@ -20,9 +19,6 @@ struct iommu_table; ...@@ -20,9 +19,6 @@ struct iommu_table;
* drivers/macintosh/macio_asic.c * drivers/macintosh/macio_asic.c
*/ */
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */
const struct dma_map_ops *dma_ops;
/* /*
* These two used to be a union. However, with the hybrid ops we need * These two used to be a union. However, with the hybrid ops we need
* both so here we store both a DMA offset for direct mappings and * both so here we store both a DMA offset for direct mappings and
......
...@@ -88,12 +88,12 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev) ...@@ -88,12 +88,12 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
if (unlikely(dev == NULL)) if (unlikely(dev == NULL))
return NULL; return NULL;
return dev->archdata.dma_ops; return dev->dma_ops;
} }
static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{ {
dev->archdata.dma_ops = ops; dev->dma_ops = ops;
} }
/* /*
......
...@@ -33,7 +33,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev) ...@@ -33,7 +33,7 @@ static u64 __maybe_unused get_pfn_limit(struct device *dev)
struct dev_archdata __maybe_unused *sd = &dev->archdata; struct dev_archdata __maybe_unused *sd = &dev->archdata;
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops) if (sd->max_direct_dma_addr && dev->dma_ops == &swiotlb_dma_ops)
pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT);
#endif #endif
......
...@@ -692,7 +692,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action, ...@@ -692,7 +692,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
return 0; return 0;
/* We use the PCI DMA ops */ /* We use the PCI DMA ops */
dev->archdata.dma_ops = get_pci_dma_ops(); dev->dma_ops = get_pci_dma_ops();
cell_dma_dev_setup(dev); cell_dma_dev_setup(dev);
......
...@@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) ...@@ -186,7 +186,7 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
*/ */
if (dev->vendor == 0x1959 && dev->device == 0xa007 && if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
!firmware_has_feature(FW_FEATURE_LPAR)) { !firmware_has_feature(FW_FEATURE_LPAR)) {
dev->dev.archdata.dma_ops = &dma_direct_ops; dev->dev.dma_ops = &dma_direct_ops;
/* /*
* Set the coherent DMA mask to prevent the iommu * Set the coherent DMA mask to prevent the iommu
* being used unnecessarily * being used unnecessarily
......
...@@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action, ...@@ -363,7 +363,7 @@ static int pcmcia_notify(struct notifier_block *nb, unsigned long action,
return 0; return 0;
/* We use the direct ops for localbus */ /* We use the direct ops for localbus */
dev->archdata.dma_ops = &dma_direct_ops; dev->dma_ops = &dma_direct_ops;
return 0; return 0;
} }
......
...@@ -756,11 +756,11 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev) ...@@ -756,11 +756,11 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
switch (dev->dev_type) { switch (dev->dev_type) {
case PS3_DEVICE_TYPE_IOC0: case PS3_DEVICE_TYPE_IOC0:
dev->core.archdata.dma_ops = &ps3_ioc0_dma_ops; dev->core.dma_ops = &ps3_ioc0_dma_ops;
dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count); dev_set_name(&dev->core, "ioc0_%02x", ++dev_ioc0_count);
break; break;
case PS3_DEVICE_TYPE_SB: case PS3_DEVICE_TYPE_SB:
dev->core.archdata.dma_ops = &ps3_sb_dma_ops; dev->core.dma_ops = &ps3_sb_dma_ops;
dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count); dev_set_name(&dev->core, "sb_%02x", ++dev_sb_count);
break; break;
......
...@@ -169,7 +169,7 @@ static int ibmebus_create_device(struct device_node *dn) ...@@ -169,7 +169,7 @@ static int ibmebus_create_device(struct device_node *dn)
return -ENOMEM; return -ENOMEM;
dev->dev.bus = &ibmebus_bus_type; dev->dev.bus = &ibmebus_bus_type;
dev->dev.archdata.dma_ops = &ibmebus_dma_ops; dev->dev.dma_ops = &ibmebus_dma_ops;
ret = of_device_add(dev); ret = of_device_add(dev);
if (ret) if (ret)
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* This file is released under the GPLv2 * This file is released under the GPLv2
*/ */
struct dev_archdata { struct dev_archdata {
const struct dma_map_ops *dma_ops;
}; };
struct pdev_archdata { struct pdev_archdata {
......
...@@ -14,8 +14,8 @@ extern const struct dma_map_ops s390_pci_dma_ops; ...@@ -14,8 +14,8 @@ extern const struct dma_map_ops s390_pci_dma_ops;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->dma_ops)
return dev->archdata.dma_ops; return dev->dma_ops;
return &dma_noop_ops; return &dma_noop_ops;
} }
......
...@@ -641,7 +641,7 @@ int pcibios_add_device(struct pci_dev *pdev) ...@@ -641,7 +641,7 @@ int pcibios_add_device(struct pci_dev *pdev)
int i; int i;
pdev->dev.groups = zpci_attr_groups; pdev->dev.groups = zpci_attr_groups;
pdev->dev.archdata.dma_ops = &s390_pci_dma_ops; pdev->dev.dma_ops = &s390_pci_dma_ops;
zpci_map_resources(pdev); zpci_map_resources(pdev);
for (i = 0; i < PCI_BAR_COUNT; i++) { for (i = 0; i < PCI_BAR_COUNT; i++) {
......
...@@ -17,9 +17,6 @@ ...@@ -17,9 +17,6 @@
#define _ASM_TILE_DEVICE_H #define _ASM_TILE_DEVICE_H
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */
const struct dma_map_ops *dma_ops;
/* Offset of the DMA address from the PA. */ /* Offset of the DMA address from the PA. */
dma_addr_t dma_offset; dma_addr_t dma_offset;
......
...@@ -31,8 +31,8 @@ extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops; ...@@ -31,8 +31,8 @@ extern const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->dma_ops)
return dev->archdata.dma_ops; return dev->dma_ops;
else else
return tile_dma_map_ops; return tile_dma_map_ops;
} }
...@@ -61,7 +61,7 @@ static inline void dma_mark_clean(void *addr, size_t size) {} ...@@ -61,7 +61,7 @@ static inline void dma_mark_clean(void *addr, size_t size) {}
static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
{ {
dev->archdata.dma_ops = ops; dev->dma_ops = ops;
} }
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
......
...@@ -2,9 +2,6 @@ ...@@ -2,9 +2,6 @@
#define _ASM_X86_DEVICE_H #define _ASM_X86_DEVICE_H
struct dev_archdata { struct dev_archdata {
#ifdef CONFIG_X86_DEV_DMA_OPS
const struct dma_map_ops *dma_ops;
#endif
#if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU)
void *iommu; /* hook for IOMMU specific extension */ void *iommu; /* hook for IOMMU specific extension */
#endif #endif
......
...@@ -32,10 +32,10 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev) ...@@ -32,10 +32,10 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
#ifndef CONFIG_X86_DEV_DMA_OPS #ifndef CONFIG_X86_DEV_DMA_OPS
return dma_ops; return dma_ops;
#else #else
if (unlikely(!dev) || !dev->archdata.dma_ops) if (unlikely(!dev) || !dev->dma_ops)
return dma_ops; return dma_ops;
else else
return dev->archdata.dma_ops; return dev->dma_ops;
#endif #endif
} }
......
...@@ -1177,7 +1177,7 @@ static int __init calgary_init(void) ...@@ -1177,7 +1177,7 @@ static int __init calgary_init(void)
tbl = find_iommu_table(&dev->dev); tbl = find_iommu_table(&dev->dev);
if (translation_enabled(tbl)) if (translation_enabled(tbl))
dev->dev.archdata.dma_ops = &calgary_dma_ops; dev->dev.dma_ops = &calgary_dma_ops;
} }
return ret; return ret;
...@@ -1201,7 +1201,7 @@ static int __init calgary_init(void) ...@@ -1201,7 +1201,7 @@ static int __init calgary_init(void)
calgary_disable_translation(dev); calgary_disable_translation(dev);
calgary_free_bus(dev); calgary_free_bus(dev);
pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */ pci_dev_put(dev); /* Undo calgary_init_one()'s pci_dev_get() */
dev->dev.archdata.dma_ops = NULL; dev->dev.dma_ops = NULL;
} while (1); } while (1);
return ret; return ret;
......
...@@ -667,7 +667,7 @@ static void set_dma_domain_ops(struct pci_dev *pdev) ...@@ -667,7 +667,7 @@ static void set_dma_domain_ops(struct pci_dev *pdev)
spin_lock(&dma_domain_list_lock); spin_lock(&dma_domain_list_lock);
list_for_each_entry(domain, &dma_domain_list, node) { list_for_each_entry(domain, &dma_domain_list, node) {
if (pci_domain_nr(pdev->bus) == domain->domain_nr) { if (pci_domain_nr(pdev->bus) == domain->domain_nr) {
pdev->dev.archdata.dma_ops = domain->dma_ops; pdev->dev.dma_ops = domain->dma_ops;
break; break;
} }
} }
......
...@@ -203,7 +203,7 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev) ...@@ -203,7 +203,7 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev)
return; return;
pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1);
pdev->dev.archdata.dma_ops = &sta2x11_dma_ops; pdev->dev.dma_ops = &sta2x11_dma_ops;
/* We must enable all devices as master, for audio DMA to work */ /* We must enable all devices as master, for audio DMA to work */
pci_set_master(pdev); pci_set_master(pdev);
...@@ -223,7 +223,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) ...@@ -223,7 +223,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{ {
struct sta2x11_mapping *map; struct sta2x11_mapping *map;
if (dev->archdata.dma_ops != &sta2x11_dma_ops) { if (dev->dma_ops != &sta2x11_dma_ops) {
if (!dev->dma_mask) if (!dev->dma_mask)
return false; return false;
return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <= *dev->dma_mask;
...@@ -247,7 +247,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) ...@@ -247,7 +247,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
*/ */
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
if (dev->archdata.dma_ops != &sta2x11_dma_ops) if (dev->dma_ops != &sta2x11_dma_ops)
return paddr; return paddr;
return p2a(paddr, to_pci_dev(dev)); return p2a(paddr, to_pci_dev(dev));
} }
...@@ -259,7 +259,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) ...@@ -259,7 +259,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
*/ */
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{ {
if (dev->archdata.dma_ops != &sta2x11_dma_ops) if (dev->dma_ops != &sta2x11_dma_ops)
return daddr; return daddr;
return a2p(daddr, to_pci_dev(dev)); return a2p(daddr, to_pci_dev(dev));
} }
......
...@@ -6,11 +6,7 @@ ...@@ -6,11 +6,7 @@
#ifndef _ASM_XTENSA_DEVICE_H #ifndef _ASM_XTENSA_DEVICE_H
#define _ASM_XTENSA_DEVICE_H #define _ASM_XTENSA_DEVICE_H
struct dma_map_ops;
struct dev_archdata { struct dev_archdata {
/* DMA operations on that device */
const struct dma_map_ops *dma_ops;
}; };
struct pdev_archdata { struct pdev_archdata {
......
...@@ -22,8 +22,8 @@ extern const struct dma_map_ops xtensa_dma_map_ops; ...@@ -22,8 +22,8 @@ extern const struct dma_map_ops xtensa_dma_map_ops;
static inline const struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->archdata.dma_ops) if (dev && dev->dma_ops)
return dev->archdata.dma_ops; return dev->dma_ops;
else else
return &xtensa_dma_map_ops; return &xtensa_dma_map_ops;
} }
......
...@@ -2465,7 +2465,7 @@ static void srpt_add_one(struct ib_device *device) ...@@ -2465,7 +2465,7 @@ static void srpt_add_one(struct ib_device *device)
int i; int i;
pr_debug("device = %p, device->dma_ops = %p\n", device, pr_debug("device = %p, device->dma_ops = %p\n", device,
device->dma_ops); device->dma_device->dma_ops);
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL); sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev) if (!sdev)
......
...@@ -515,7 +515,7 @@ static void iommu_uninit_device(struct device *dev) ...@@ -515,7 +515,7 @@ static void iommu_uninit_device(struct device *dev)
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
/* Remove dma-ops */ /* Remove dma-ops */
dev->archdata.dma_ops = NULL; dev->dma_ops = NULL;
/* /*
* We keep dev_data around for unplugged devices and reuse it when the * We keep dev_data around for unplugged devices and reuse it when the
...@@ -2164,7 +2164,7 @@ static int amd_iommu_add_device(struct device *dev) ...@@ -2164,7 +2164,7 @@ static int amd_iommu_add_device(struct device *dev)
dev_name(dev)); dev_name(dev));
iommu_ignore_device(dev); iommu_ignore_device(dev);
dev->archdata.dma_ops = &nommu_dma_ops; dev->dma_ops = &nommu_dma_ops;
goto out; goto out;
} }
init_iommu_group(dev); init_iommu_group(dev);
...@@ -2181,7 +2181,7 @@ static int amd_iommu_add_device(struct device *dev) ...@@ -2181,7 +2181,7 @@ static int amd_iommu_add_device(struct device *dev)
if (domain->type == IOMMU_DOMAIN_IDENTITY) if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true; dev_data->passthrough = true;
else else
dev->archdata.dma_ops = &amd_iommu_dma_ops; dev->dma_ops = &amd_iommu_dma_ops;
out: out:
iommu_completion_wait(iommu); iommu_completion_wait(iommu);
......
...@@ -158,7 +158,7 @@ mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ ...@@ -158,7 +158,7 @@ mbus_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_
mbdev->dev.parent = pdev; mbdev->dev.parent = pdev;
mbdev->id.device = id; mbdev->id.device = id;
mbdev->id.vendor = MBUS_DEV_ANY_ID; mbdev->id.vendor = MBUS_DEV_ANY_ID;
mbdev->dev.archdata.dma_ops = dma_ops; mbdev->dev.dma_ops = dma_ops;
mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask; mbdev->dev.dma_mask = &mbdev->dev.coherent_dma_mask;
dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64)); dma_set_mask(&mbdev->dev, DMA_BIT_MASK(64));
mbdev->dev.release = mbus_release_dev; mbdev->dev.release = mbus_release_dev;
......
...@@ -154,7 +154,7 @@ scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_ ...@@ -154,7 +154,7 @@ scif_register_device(struct device *pdev, int id, const struct dma_map_ops *dma_
sdev->dev.parent = pdev; sdev->dev.parent = pdev;
sdev->id.device = id; sdev->id.device = id;
sdev->id.vendor = SCIF_DEV_ANY_ID; sdev->id.vendor = SCIF_DEV_ANY_ID;
sdev->dev.archdata.dma_ops = dma_ops; sdev->dev.dma_ops = dma_ops;
sdev->dev.release = scif_release_dev; sdev->dev.release = scif_release_dev;
sdev->hw_ops = hw_ops; sdev->hw_ops = hw_ops;
sdev->dnode = dnode; sdev->dnode = dnode;
......
...@@ -154,7 +154,7 @@ vop_register_device(struct device *pdev, int id, ...@@ -154,7 +154,7 @@ vop_register_device(struct device *pdev, int id,
vdev->dev.parent = pdev; vdev->dev.parent = pdev;
vdev->id.device = id; vdev->id.device = id;
vdev->id.vendor = VOP_DEV_ANY_ID; vdev->id.vendor = VOP_DEV_ANY_ID;
vdev->dev.archdata.dma_ops = dma_ops; vdev->dev.dma_ops = dma_ops;
vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask; vdev->dev.dma_mask = &vdev->dev.coherent_dma_mask;
dma_set_mask(&vdev->dev, DMA_BIT_MASK(64)); dma_set_mask(&vdev->dev, DMA_BIT_MASK(64));
vdev->dev.release = vop_release_dev; vdev->dev.release = vop_release_dev;
......
...@@ -921,6 +921,7 @@ struct device { ...@@ -921,6 +921,7 @@ struct device {
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */ int numa_node; /* NUMA node this device is close to */
#endif #endif
const struct dma_map_ops *dma_ops;
u64 *dma_mask; /* dma mask (if dma'able device) */ u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for u64 coherent_dma_mask;/* Like dma_mask, but for
alloc_coherent mappings as alloc_coherent mappings as
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment