Commit e83b009c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.3-4' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - fix the handling of the bus_dma_mask in dma_get_required_mask, which
   caused a regression in this merge window (Lucas Stach)

 - fix a regression in the handling of DMA_ATTR_NO_KERNEL_MAPPING (me)

 - fix dma_mmap_coherent to not cause page attribute mismatches on
   coherent architectures like x86 (me)

* tag 'dma-mapping-5.3-4' of git://git.infradead.org/users/hch/dma-mapping:
  dma-mapping: fix page attributes for dma_mmap_*
  dma-direct: don't truncate dma_required_mask to bus addressing capabilities
  dma-direct: fix DMA_ATTR_NO_KERNEL_MAPPING
parents b5e33e44 33dcb37c
...@@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, ...@@ -2405,9 +2405,7 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs) unsigned long attrs)
{ {
if (!dev_is_dma_coherent(dev))
return __get_dma_pgprot(attrs, prot); return __get_dma_pgprot(attrs, prot);
return prot;
} }
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
......
...@@ -14,9 +14,7 @@ ...@@ -14,9 +14,7 @@
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs) unsigned long attrs)
{ {
if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
return pgprot_writecombine(prot); return pgprot_writecombine(prot);
return prot;
} }
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
......
...@@ -121,7 +121,6 @@ config PPC ...@@ -121,7 +121,6 @@ config PPC
select ARCH_32BIT_OFF_T if PPC32 select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
......
...@@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \ ...@@ -49,8 +49,7 @@ obj-y := cputable.o ptrace.o syscalls.o \
signal.o sysfs.o cacheinfo.o time.o \ signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \ prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \ udbg.o misc.o io.o misc_$(BITS).o \
of_platform.o prom_parse.o \ of_platform.o prom_parse.o
dma-common.o
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \ signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o paca.o nvram_64.o firmware.o
......
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Contains common dma routines for all powerpc platforms.
*
* Copyright (C) 2019 Shawn Anastasio.
*/
#include <linux/mm.h>
#include <linux/dma-noncoherent.h>
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
if (!dev_is_dma_coherent(dev))
return pgprot_noncached(prot);
return prot;
}
...@@ -572,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, ...@@ -572,7 +572,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
struct iova_domain *iovad = &cookie->iovad; struct iova_domain *iovad = &cookie->iovad;
bool coherent = dev_is_dma_coherent(dev); bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
struct page **pages; struct page **pages;
struct sg_table sgt; struct sg_table sgt;
...@@ -973,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size, ...@@ -973,7 +973,7 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
return NULL; return NULL;
if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
cpu_addr = dma_common_contiguous_remap(page, alloc_size, cpu_addr = dma_common_contiguous_remap(page, alloc_size,
VM_USERMAP, prot, __builtin_return_address(0)); VM_USERMAP, prot, __builtin_return_address(0));
...@@ -1033,7 +1033,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -1033,7 +1033,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn, off = vma->vm_pgoff; unsigned long pfn, off = vma->vm_pgoff;
int ret; int ret;
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret; return ret;
......
...@@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, ...@@ -42,13 +42,18 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs); dma_addr_t dma_addr, unsigned long attrs);
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr); dma_addr_t dma_addr);
#ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs); unsigned long attrs);
#ifdef CONFIG_MMU
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
#else #else
# define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
#endif unsigned long attrs)
{
return prot; /* no protection bits supported without page tables */
}
#endif /* CONFIG_MMU */
#ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
......
...@@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev) ...@@ -47,9 +47,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
{ {
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
max_dma = dev->bus_dma_mask;
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
} }
...@@ -130,10 +127,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -130,10 +127,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
if (!page) if (!page)
return NULL; return NULL;
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* remove any dirty cache lines on the kernel alias */ /* remove any dirty cache lines on the kernel alias */
if (!PageHighMem(page)) if (!PageHighMem(page))
arch_dma_prep_coherent(page, size); arch_dma_prep_coherent(page, size);
*dma_handle = phys_to_dma(dev, page_to_phys(page));
/* return the page pointer as the opaque cookie */ /* return the page pointer as the opaque cookie */
return page; return page;
} }
...@@ -178,7 +177,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, ...@@ -178,7 +177,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{ {
unsigned int page_order = get_order(size); unsigned int page_order = get_order(size);
if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) { if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */ /* cpu_addr is a struct page cookie, not a kernel address */
__dma_direct_free_pages(dev, size, cpu_addr); __dma_direct_free_pages(dev, size, cpu_addr);
return; return;
......
...@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, ...@@ -150,6 +150,23 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
} }
EXPORT_SYMBOL(dma_get_sgtable_attrs); EXPORT_SYMBOL(dma_get_sgtable_attrs);
#ifdef CONFIG_MMU
/*
* Return the page attributes used for mapping dma_alloc_* memory, either in
* kernel space if remapping is needed, or to userspace through dma_mmap_*.
*/
pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
{
if (dev_is_dma_coherent(dev) ||
(IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
(attrs & DMA_ATTR_NON_CONSISTENT)))
return prot;
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT))
return arch_dma_mmap_pgprot(dev, prot, attrs);
return pgprot_noncached(prot);
}
#endif /* CONFIG_MMU */
/* /*
* Create userspace mapping for the DMA-coherent memory. * Create userspace mapping for the DMA-coherent memory.
*/ */
...@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -164,7 +181,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn; unsigned long pfn;
int ret = -ENXIO; int ret = -ENXIO;
vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret; return ret;
......
...@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -218,7 +218,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
/* create a coherent mapping */ /* create a coherent mapping */
ret = dma_common_contiguous_remap(page, size, VM_USERMAP, ret = dma_common_contiguous_remap(page, size, VM_USERMAP,
arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs), dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0)); __builtin_return_address(0));
if (!ret) { if (!ret) {
__dma_direct_free_pages(dev, size, page); __dma_direct_free_pages(dev, size, page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment