Commit b554cb42 authored by Graf Yang's avatar Graf Yang Committed by David Howells

NOMMU: support SMP dynamic percpu_alloc

The percpu code requires more functions to be implemented in the mm core
which nommu currently does not provide.  So add inline implementations
since these are largely meaningless on nommu systems.
Signed-off-by: default avatarGraf Yang <graf.yang@analog.com>
Signed-off-by: default avatarMike Frysinger <vapier@gentoo.org>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Acked-by: default avatarGreg Ungerer <gerg@uclinux.org>
parent eac522ef
...@@ -95,10 +95,27 @@ extern struct vm_struct *remove_vm_area(const void *addr); ...@@ -95,10 +95,27 @@ extern struct vm_struct *remove_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages); struct page ***pages);
#ifdef CONFIG_MMU
extern int map_kernel_range_noflush(unsigned long start, unsigned long size, extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages); pgprot_t prot, struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size);
#else
static inline int
map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages)
{
return size >> PAGE_SHIFT;
}
static inline void
unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
}
static inline void
unmap_kernel_range(unsigned long addr, unsigned long size)
{
}
#endif
/* Allocate/destroy a 'vmalloc' VM area. */ /* Allocate/destroy a 'vmalloc' VM area. */
extern struct vm_struct *alloc_vm_area(size_t size); extern struct vm_struct *alloc_vm_area(size_t size);
...@@ -116,11 +133,26 @@ extern struct vm_struct *vmlist; ...@@ -116,11 +133,26 @@ extern struct vm_struct *vmlist;
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# ifdef CONFIG_MMU
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms, const size_t *sizes, int nr_vms,
size_t align); size_t align);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
# else
static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align)
{
return NULL;
}
static inline void
pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
{
}
# endif
#endif #endif
#endif /* _LINUX_VMALLOC_H */ #endif /* _LINUX_VMALLOC_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment