Commit d7e02a93 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: remove leftover NULL device support

Most dma_map_ops implementations already had some issues with a NULL
device, or did simply crash if one was fed to them.  Now that we have
cleaned up all the obvious offenders we can stop to pretend we
support this mode.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 5ab6a91a
...@@ -365,13 +365,12 @@ __get_free_pages() (but takes size instead of a page order). If your ...@@ -365,13 +365,12 @@ __get_free_pages() (but takes size instead of a page order). If your
driver needs regions sized smaller than a page, you may prefer using driver needs regions sized smaller than a page, you may prefer using
the dma_pool interface, described below. the dma_pool interface, described below.
The consistent DMA mapping interfaces, for non-NULL dev, will by The consistent DMA mapping interfaces, will by default return a DMA address
default return a DMA address which is 32-bit addressable. Even if the which is 32-bit addressable. Even if the device indicates (via the DMA mask)
device indicates (via DMA mask) that it may address the upper 32-bits, that it may address the upper 32-bits, consistent allocation will only
consistent allocation will only return > 32-bit addresses for DMA if return > 32-bit addresses for DMA if the consistent DMA mask has been
the consistent DMA mask has been explicitly changed via explicitly changed via dma_set_coherent_mask(). This is true of the
dma_set_coherent_mask(). This is true of the dma_pool interface as dma_pool interface as well.
well.
dma_alloc_coherent() returns two values: the virtual address which you dma_alloc_coherent() returns two values: the virtual address which you
can use to access it from the CPU and dma_handle which you pass to the can use to access it from the CPU and dma_handle which you pass to the
......
...@@ -267,9 +267,9 @@ size_t dma_direct_max_mapping_size(struct device *dev); ...@@ -267,9 +267,9 @@ size_t dma_direct_max_mapping_size(struct device *dev);
static inline const struct dma_map_ops *get_dma_ops(struct device *dev) static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{ {
if (dev && dev->dma_ops) if (dev->dma_ops)
return dev->dma_ops; return dev->dma_ops;
return get_arch_dma_ops(dev ? dev->bus : NULL); return get_arch_dma_ops(dev->bus);
} }
static inline void set_dma_ops(struct device *dev, static inline void set_dma_ops(struct device *dev,
...@@ -650,7 +650,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size, ...@@ -650,7 +650,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline u64 dma_get_mask(struct device *dev) static inline u64 dma_get_mask(struct device *dev)
{ {
if (dev && dev->dma_mask && *dev->dma_mask) if (dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask; return *dev->dma_mask;
return DMA_BIT_MASK(32); return DMA_BIT_MASK(32);
} }
......
...@@ -311,7 +311,7 @@ static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr, ...@@ -311,7 +311,7 @@ static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
size_t size) size_t size)
{ {
return swiotlb_force != SWIOTLB_FORCE && return swiotlb_force != SWIOTLB_FORCE &&
(!dev || dma_capable(dev, dma_addr, size)); dma_capable(dev, dma_addr, size);
} }
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment