Commit 474197a4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-6.6-2023-09-09' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - move a dma-debug call that prints a message out from a lock that's
   causing problems with the lock order in serial drivers (Sergey
   Senozhatsky)

 - fix the CONFIG_DMA_NUMA_CMA Kconfig entry to have the right
   dependency and not default to y (Christoph Hellwig)

 - move an ifdef a bit to remove a __maybe_unused that seems to trip up
   some sensitivities (Christoph Hellwig)

 - revert a bogus check in the CMA allocator (Zhenhua Huang)

* tag 'dma-mapping-6.6-2023-09-09' of git://git.infradead.org/users/hch/dma-mapping:
  Revert "dma-contiguous: check for memory region overlap"
  dma-pool: remove a __maybe_unused label in atomic_pool_expand
  dma-contiguous: fix the Kconfig entry for CONFIG_DMA_NUMA_CMA
  dma-debug: don't call __dma_entry_alloc_check_leak() under free_entries_lock
parents 060249b5 f875db4f
......@@ -160,7 +160,7 @@ if DMA_CMA
config DMA_NUMA_CMA
bool "Enable separate DMA Contiguous Memory Area for NUMA Node"
default NUMA
depends on NUMA
help
Enable this option to get numa CMA areas so that NUMA devices
can get local memory by DMA coherent APIs.
......
......@@ -473,11 +473,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
return -EBUSY;
}
if (memblock_is_region_reserved(rmem->base, rmem->size)) {
pr_info("Reserved memory: overlap with other memblock reserved region\n");
return -EBUSY;
}
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
......
......@@ -637,15 +637,19 @@ static struct dma_debug_entry *__dma_entry_alloc(void)
return entry;
}
static void __dma_entry_alloc_check_leak(void)
/*
* This should be called outside of free_entries_lock scope to avoid potential
* deadlocks with serial consoles that use DMA.
*/
static void __dma_entry_alloc_check_leak(u32 nr_entries)
{
u32 tmp = nr_total_entries % nr_prealloc_entries;
u32 tmp = nr_entries % nr_prealloc_entries;
/* Shout each time we tick over some multiple of the initial pool */
if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
nr_total_entries,
(nr_total_entries / nr_prealloc_entries));
nr_entries,
(nr_entries / nr_prealloc_entries));
}
}
......@@ -656,8 +660,10 @@ static void __dma_entry_alloc_check_leak(void)
*/
static struct dma_debug_entry *dma_entry_alloc(void)
{
bool alloc_check_leak = false;
struct dma_debug_entry *entry;
unsigned long flags;
u32 nr_entries;
spin_lock_irqsave(&free_entries_lock, flags);
if (num_free_entries == 0) {
......@@ -667,13 +673,17 @@ static struct dma_debug_entry *dma_entry_alloc(void)
pr_err("debugging out of memory - disabling\n");
return NULL;
}
__dma_entry_alloc_check_leak();
alloc_check_leak = true;
nr_entries = nr_total_entries;
}
entry = __dma_entry_alloc();
spin_unlock_irqrestore(&free_entries_lock, flags);
if (alloc_check_leak)
__dma_entry_alloc_check_leak(nr_entries);
#ifdef CONFIG_STACKTRACE
entry->stack_len = stack_trace_save(entry->stack_entries,
ARRAY_SIZE(entry->stack_entries),
......
......@@ -135,9 +135,9 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
remove_mapping:
#ifdef CONFIG_DMA_DIRECT_REMAP
dma_common_free_remap(addr, pool_size);
#endif
free_page: __maybe_unused
free_page:
__free_pages(page, order);
#endif
out:
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment