Commit e1171aca authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'xtensa-20180225' of git://github.com/jcmvbkbc/linux-xtensa

Pull Xtensa fixes from Max Filippov:
 "Two fixes for reserved memory/DMA buffers allocation in high memory on
  xtensa architecture

   - fix memory accounting when reserved memory is in high memory region

   - fix DMA allocation from high memory"

* tag 'xtensa-20180225' of git://github.com/jcmvbkbc/linux-xtensa:
  xtensa: support DMA buffers in high memory
  xtensa: fix high memory/reserved memory collision
parents c23a7575 6137e416
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
*/ */
#include <linux/dma-contiguous.h> #include <linux/dma-contiguous.h>
#include <linux/dma-direct.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, ...@@ -123,7 +124,7 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
unsigned long ret; unsigned long ret;
unsigned long uncached = 0; unsigned long uncached;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL; struct page *page = NULL;
...@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, ...@@ -144,15 +145,27 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
if (!page) if (!page)
return NULL; return NULL;
ret = (unsigned long)page_address(page); *handle = phys_to_dma(dev, page_to_phys(page));
/* We currently don't support coherent memory outside KSEG */ #ifdef CONFIG_MMU
if (PageHighMem(page)) {
void *p;
p = dma_common_contiguous_remap(page, size, VM_MAP,
pgprot_noncached(PAGE_KERNEL),
__builtin_return_address(0));
if (!p) {
if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size));
}
return p;
}
#endif
ret = (unsigned long)page_address(page);
BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
*handle = virt_to_bus((void *)ret);
__invalidate_dcache_range(ret, size); __invalidate_dcache_range(ret, size);
return (void *)uncached; return (void *)uncached;
...@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size, ...@@ -161,13 +174,20 @@ static void *xtensa_dma_alloc(struct device *dev, size_t size,
static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr, static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs) dma_addr_t dma_handle, unsigned long attrs)
{ {
unsigned long addr = (unsigned long)vaddr +
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
struct page *page = virt_to_page(addr);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long addr = (unsigned long)vaddr;
BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || struct page *page;
addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
if (addr >= XCHAL_KSEG_BYPASS_VADDR &&
addr - XCHAL_KSEG_BYPASS_VADDR < XCHAL_KSEG_SIZE) {
addr += XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
page = virt_to_page(addr);
} else {
#ifdef CONFIG_MMU
dma_common_free_remap(vaddr, size, VM_MAP);
#endif
page = pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_handle)));
}
if (!dma_release_from_contiguous(dev, page, count)) if (!dma_release_from_contiguous(dev, page, count))
__free_pages(page, get_order(size)); __free_pages(page, get_order(size));
......
...@@ -79,19 +79,75 @@ void __init zones_init(void) ...@@ -79,19 +79,75 @@ void __init zones_init(void)
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL); free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
} }
#ifdef CONFIG_HIGHMEM
static void __init free_area_high(unsigned long pfn, unsigned long end)
{
for (; pfn < end; pfn++)
free_highmem_page(pfn_to_page(pfn));
}
static void __init free_highpages(void)
{
unsigned long max_low = max_low_pfn;
struct memblock_region *mem, *res;
reset_all_zones_managed_pages();
/* set highmem page free */
for_each_memblock(memory, mem) {
unsigned long start = memblock_region_memory_base_pfn(mem);
unsigned long end = memblock_region_memory_end_pfn(mem);
/* Ignore complete lowmem entries */
if (end <= max_low)
continue;
if (memblock_is_nomap(mem))
continue;
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
/* Find and exclude any reserved regions */
for_each_memblock(reserved, res) {
unsigned long res_start, res_end;
res_start = memblock_region_reserved_base_pfn(res);
res_end = memblock_region_reserved_end_pfn(res);
if (res_end < start)
continue;
if (res_start < start)
res_start = start;
if (res_start > end)
res_start = end;
if (res_end > end)
res_end = end;
if (res_start != start)
free_area_high(start, res_start);
start = res_end;
if (start == end)
break;
}
/* And now free anything which remains */
if (start < end)
free_area_high(start, end);
}
}
#else
static void __init free_highpages(void)
{
}
#endif
/* /*
* Initialize memory pages. * Initialize memory pages.
*/ */
void __init mem_init(void) void __init mem_init(void)
{ {
#ifdef CONFIG_HIGHMEM free_highpages();
unsigned long tmp;
reset_all_zones_managed_pages();
for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
max_mapnr = max_pfn - ARCH_PFN_OFFSET; max_mapnr = max_pfn - ARCH_PFN_OFFSET;
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT); high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment