Commit ee1acbfa authored by Paul Mundt's avatar Paul Mundt

sh: Handle shm_align_mask also for HAVE_ARCH_UNMAPPED_AREA_TOPDOWN.

Presently shm_align_mask is only looked at for the bottom up case, but we
still want this for proper colouring constraints in the topdown case.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 40c8bca7
...@@ -75,7 +75,5 @@ extern void copy_from_user_page(struct vm_area_struct *vma, ...@@ -75,7 +75,5 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
#define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all()
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHEFLUSH_H */ #endif /* __ASM_SH_CACHEFLUSH_H */
...@@ -154,6 +154,10 @@ extern void kmap_coherent_init(void); ...@@ -154,6 +154,10 @@ extern void kmap_coherent_init(void);
#define kmap_coherent_init() do { } while (0) #define kmap_coherent_init() do { } while (0)
#endif #endif
/* arch/sh/mm/mmap.c */
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* __ASM_SH_PGTABLE_H */ #endif /* __ASM_SH_PGTABLE_H */
/* /*
* arch/sh/mm/mmap.c * arch/sh/mm/mmap.c
* *
* Copyright (C) 2008 Paul Mundt * Copyright (C) 2008 - 2009 Paul Mundt
* *
* This file is subject to the terms and conditions of the GNU General Public * This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
...@@ -21,9 +21,26 @@ EXPORT_SYMBOL(shm_align_mask); ...@@ -21,9 +21,26 @@ EXPORT_SYMBOL(shm_align_mask);
/* /*
* To avoid cache aliases, we map the shared page with same color. * To avoid cache aliases, we map the shared page with same color.
*/ */
#define COLOUR_ALIGN(addr, pgoff) \ static inline unsigned long COLOUR_ALIGN(unsigned long addr,
((((addr) + shm_align_mask) & ~shm_align_mask) + \ unsigned long pgoff)
(((pgoff) << PAGE_SHIFT) & shm_align_mask)) {
unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
return base + off;
}
static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
unsigned long pgoff)
{
unsigned long base = addr & ~shm_align_mask;
unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
if (base + off <= addr)
return base + off;
return base - off;
}
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long len, unsigned long pgoff, unsigned long flags)
...@@ -103,6 +120,117 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -103,6 +120,117 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = COLOUR_ALIGN(addr, pgoff); addr = COLOUR_ALIGN(addr, pgoff);
} }
} }
unsigned long
arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff,
const unsigned long flags)
{
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
int do_colour_align;
if (flags & MAP_FIXED) {
/* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
*/
if ((flags & MAP_SHARED) &&
((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
return -EINVAL;
return addr;
}
if (unlikely(len > TASK_SIZE))
return -ENOMEM;
do_colour_align = 0;
if (filp || (flags & MAP_SHARED))
do_colour_align = 1;
/* requesting a specific address */
if (addr) {
if (do_colour_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
/* check if free_area_cache is useful for us */
if (len <= mm->cached_hole_size) {
mm->cached_hole_size = 0;
mm->free_area_cache = mm->mmap_base;
}
/* either no address requested or can't fit in requested address hole */
addr = mm->free_area_cache;
if (do_colour_align) {
unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
addr = base + len;
}
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr-len);
if (!vma || addr <= vma->vm_start) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr-len);
}
}
if (unlikely(mm->mmap_base < len))
goto bottomup;
addr = mm->mmap_base-len;
if (do_colour_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
do {
/*
* Lookup failure means no vma is above this address,
* else if new region fits below vma->vm_start,
* return with success:
*/
vma = find_vma(mm, addr);
if (likely(!vma || addr+len <= vma->vm_start)) {
/* remember the address as a hint for next time */
return (mm->free_area_cache = addr);
}
/* remember the largest hole we saw so far */
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
if (do_colour_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
} while (likely(len < vma->vm_start));
bottomup:
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
mm->cached_hole_size = ~0UL;
mm->free_area_cache = TASK_UNMAPPED_BASE;
addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = mm->mmap_base;
mm->cached_hole_size = ~0UL;
return addr;
}
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment