Commit 515dcc2e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.15-2' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - fix more dma-debug fallout (Gerald Schaefer, Hamza Mahfooz)

 - fix a kerneldoc warning (Logan Gunthorpe)

* tag 'dma-mapping-5.15-2' of git://git.infradead.org/users/hch/dma-mapping:
  dma-debug: teach add_dma_entry() about DMA_ATTR_SKIP_CPU_SYNC
  dma-debug: fix sg checks in debug_dma_map_sg()
  dma-mapping: fix the kerneldoc for dma_map_sgtable()
parents 8e37395c c2bbf9d1
...@@ -552,7 +552,7 @@ static void active_cacheline_remove(struct dma_debug_entry *entry) ...@@ -552,7 +552,7 @@ static void active_cacheline_remove(struct dma_debug_entry *entry)
* Wrapper function for adding an entry to the hash. * Wrapper function for adding an entry to the hash.
* This function takes care of locking itself. * This function takes care of locking itself.
*/ */
static void add_dma_entry(struct dma_debug_entry *entry) static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
{ {
struct hash_bucket *bucket; struct hash_bucket *bucket;
unsigned long flags; unsigned long flags;
...@@ -566,7 +566,7 @@ static void add_dma_entry(struct dma_debug_entry *entry) ...@@ -566,7 +566,7 @@ static void add_dma_entry(struct dma_debug_entry *entry)
if (rc == -ENOMEM) { if (rc == -ENOMEM) {
pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true; global_disable = true;
} else if (rc == -EEXIST) { } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
err_printk(entry->dev, entry, err_printk(entry->dev, entry,
"cacheline tracking EEXIST, overlapping mappings aren't supported\n"); "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
} }
...@@ -1191,7 +1191,8 @@ void debug_dma_map_single(struct device *dev, const void *addr, ...@@ -1191,7 +1191,8 @@ void debug_dma_map_single(struct device *dev, const void *addr,
EXPORT_SYMBOL(debug_dma_map_single); EXPORT_SYMBOL(debug_dma_map_single);
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
size_t size, int direction, dma_addr_t dma_addr) size_t size, int direction, dma_addr_t dma_addr,
unsigned long attrs)
{ {
struct dma_debug_entry *entry; struct dma_debug_entry *entry;
...@@ -1222,7 +1223,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, ...@@ -1222,7 +1223,7 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
check_for_illegal_area(dev, addr, size); check_for_illegal_area(dev, addr, size);
} }
add_dma_entry(entry); add_dma_entry(entry, attrs);
} }
void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
...@@ -1280,7 +1281,8 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, ...@@ -1280,7 +1281,8 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
} }
void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction) int nents, int mapped_ents, int direction,
unsigned long attrs)
{ {
struct dma_debug_entry *entry; struct dma_debug_entry *entry;
struct scatterlist *s; struct scatterlist *s;
...@@ -1289,6 +1291,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -1289,6 +1291,12 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
if (unlikely(dma_debug_disabled())) if (unlikely(dma_debug_disabled()))
return; return;
for_each_sg(sg, s, nents, i) {
check_for_stack(dev, sg_page(s), s->offset);
if (!PageHighMem(sg_page(s)))
check_for_illegal_area(dev, sg_virt(s), s->length);
}
for_each_sg(sg, s, mapped_ents, i) { for_each_sg(sg, s, mapped_ents, i) {
entry = dma_entry_alloc(); entry = dma_entry_alloc();
if (!entry) if (!entry)
...@@ -1304,15 +1312,9 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -1304,15 +1312,9 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
entry->sg_call_ents = nents; entry->sg_call_ents = nents;
entry->sg_mapped_ents = mapped_ents; entry->sg_mapped_ents = mapped_ents;
check_for_stack(dev, sg_page(s), s->offset);
if (!PageHighMem(sg_page(s))) {
check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
}
check_sg_segment(dev, s); check_sg_segment(dev, s);
add_dma_entry(entry); add_dma_entry(entry, attrs);
} }
} }
...@@ -1368,7 +1370,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -1368,7 +1370,8 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
} }
void debug_dma_alloc_coherent(struct device *dev, size_t size, void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt) dma_addr_t dma_addr, void *virt,
unsigned long attrs)
{ {
struct dma_debug_entry *entry; struct dma_debug_entry *entry;
...@@ -1398,7 +1401,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -1398,7 +1401,7 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size,
else else
entry->pfn = page_to_pfn(virt_to_page(virt)); entry->pfn = page_to_pfn(virt_to_page(virt));
add_dma_entry(entry); add_dma_entry(entry, attrs);
} }
void debug_dma_free_coherent(struct device *dev, size_t size, void debug_dma_free_coherent(struct device *dev, size_t size,
...@@ -1429,7 +1432,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size, ...@@ -1429,7 +1432,8 @@ void debug_dma_free_coherent(struct device *dev, size_t size,
} }
void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
int direction, dma_addr_t dma_addr) int direction, dma_addr_t dma_addr,
unsigned long attrs)
{ {
struct dma_debug_entry *entry; struct dma_debug_entry *entry;
...@@ -1449,7 +1453,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size, ...@@ -1449,7 +1453,7 @@ void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
entry->direction = direction; entry->direction = direction;
entry->map_err_type = MAP_ERR_NOT_CHECKED; entry->map_err_type = MAP_ERR_NOT_CHECKED;
add_dma_entry(entry); add_dma_entry(entry, attrs);
} }
void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
......
...@@ -11,26 +11,30 @@ ...@@ -11,26 +11,30 @@
#ifdef CONFIG_DMA_API_DEBUG #ifdef CONFIG_DMA_API_DEBUG
extern void debug_dma_map_page(struct device *dev, struct page *page, extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size, size_t offset, size_t size,
int direction, dma_addr_t dma_addr); int direction, dma_addr_t dma_addr,
unsigned long attrs);
extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, int direction); size_t size, int direction);
extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction); int nents, int mapped_ents, int direction,
unsigned long attrs);
extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir); int nelems, int dir);
extern void debug_dma_alloc_coherent(struct device *dev, size_t size, extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt); dma_addr_t dma_addr, void *virt,
unsigned long attrs);
extern void debug_dma_free_coherent(struct device *dev, size_t size, extern void debug_dma_free_coherent(struct device *dev, size_t size,
void *virt, dma_addr_t addr); void *virt, dma_addr_t addr);
extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr, extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
size_t size, int direction, size_t size, int direction,
dma_addr_t dma_addr); dma_addr_t dma_addr,
unsigned long attrs);
extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
size_t size, int direction); size_t size, int direction);
...@@ -53,7 +57,8 @@ extern void debug_dma_sync_sg_for_device(struct device *dev, ...@@ -53,7 +57,8 @@ extern void debug_dma_sync_sg_for_device(struct device *dev,
#else /* CONFIG_DMA_API_DEBUG */ #else /* CONFIG_DMA_API_DEBUG */
static inline void debug_dma_map_page(struct device *dev, struct page *page, static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size, size_t offset, size_t size,
int direction, dma_addr_t dma_addr) int direction, dma_addr_t dma_addr,
unsigned long attrs)
{ {
} }
...@@ -63,7 +68,8 @@ static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, ...@@ -63,7 +68,8 @@ static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
} }
static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int mapped_ents, int direction) int nents, int mapped_ents, int direction,
unsigned long attrs)
{ {
} }
...@@ -74,7 +80,8 @@ static inline void debug_dma_unmap_sg(struct device *dev, ...@@ -74,7 +80,8 @@ static inline void debug_dma_unmap_sg(struct device *dev,
} }
static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t dma_addr, void *virt) dma_addr_t dma_addr, void *virt,
unsigned long attrs)
{ {
} }
...@@ -85,7 +92,8 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size, ...@@ -85,7 +92,8 @@ static inline void debug_dma_free_coherent(struct device *dev, size_t size,
static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr, static inline void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
size_t size, int direction, size_t size, int direction,
dma_addr_t dma_addr) dma_addr_t dma_addr,
unsigned long attrs)
{ {
} }
......
...@@ -156,7 +156,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, ...@@ -156,7 +156,7 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
else else
addr = ops->map_page(dev, page, offset, size, dir, attrs); addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr); debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
return addr; return addr;
} }
...@@ -195,7 +195,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, ...@@ -195,7 +195,7 @@ static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
ents = ops->map_sg(dev, sg, nents, dir, attrs); ents = ops->map_sg(dev, sg, nents, dir, attrs);
if (ents > 0) if (ents > 0)
debug_dma_map_sg(dev, sg, nents, ents, dir); debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
ents != -EIO)) ents != -EIO))
return -EIO; return -EIO;
...@@ -249,12 +249,12 @@ EXPORT_SYMBOL(dma_map_sg_attrs); ...@@ -249,12 +249,12 @@ EXPORT_SYMBOL(dma_map_sg_attrs);
* Returns 0 on success or a negative error code on error. The following * Returns 0 on success or a negative error code on error. The following
* error codes are supported with the given meaning: * error codes are supported with the given meaning:
* *
* -EINVAL - An invalid argument, unaligned access or other error * -EINVAL An invalid argument, unaligned access or other error
* in usage. Will not succeed if retried. * in usage. Will not succeed if retried.
* -ENOMEM - Insufficient resources (like memory or IOVA space) to * -ENOMEM Insufficient resources (like memory or IOVA space) to
* complete the mapping. Should succeed if retried later. * complete the mapping. Should succeed if retried later.
* -EIO - Legacy error code with an unknown meaning. eg. this is * -EIO Legacy error code with an unknown meaning. eg. this is
* returned if a lower level call returned DMA_MAPPING_ERROR. * returned if a lower level call returned DMA_MAPPING_ERROR.
*/ */
int dma_map_sgtable(struct device *dev, struct sg_table *sgt, int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
...@@ -305,7 +305,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, ...@@ -305,7 +305,7 @@ dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
else if (ops->map_resource) else if (ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs); addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr); debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
return addr; return addr;
} }
EXPORT_SYMBOL(dma_map_resource); EXPORT_SYMBOL(dma_map_resource);
...@@ -510,7 +510,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -510,7 +510,7 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
else else
return NULL; return NULL;
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
return cpu_addr; return cpu_addr;
} }
EXPORT_SYMBOL(dma_alloc_attrs); EXPORT_SYMBOL(dma_alloc_attrs);
...@@ -566,7 +566,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size, ...@@ -566,7 +566,7 @@ struct page *dma_alloc_pages(struct device *dev, size_t size,
struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
if (page) if (page)
debug_dma_map_page(dev, page, 0, size, dir, *dma_handle); debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
return page; return page;
} }
EXPORT_SYMBOL_GPL(dma_alloc_pages); EXPORT_SYMBOL_GPL(dma_alloc_pages);
...@@ -644,7 +644,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, ...@@ -644,7 +644,7 @@ struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
if (sgt) { if (sgt) {
sgt->nents = 1; sgt->nents = 1;
debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir); debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
} }
return sgt; return sgt;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment