Commit 0fc0531e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu:
  percpu: update comments to reflect that percpu allocations are always zero-filled
  percpu: Optimize __get_cpu_var()
  x86, percpu: Optimize this_cpu_ptr
  percpu: clear memory allocated with the km allocator
  percpu: fix build breakage on s390 and cleanup build configuration tests
  percpu: use percpu allocator on UP too
  percpu: reduce PCPU_MIN_UNIT_SIZE to 32k
  vmalloc: pcpu_get/free_vm_areas() aren't needed on UP

Fixed up trivial conflicts in include/linux/percpu.h
parents 91b74501 9329ba97
...@@ -47,6 +47,20 @@ ...@@ -47,6 +47,20 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
#define __my_cpu_offset percpu_read(this_cpu_off) #define __my_cpu_offset percpu_read(this_cpu_off)
/*
* Compared to the generic __my_cpu_offset version, the following
* saves one instruction and avoids clobbering a temp register.
*/
#define __this_cpu_ptr(ptr) \
({ \
unsigned long tcp_ptr__; \
__verify_pcpu_ptr(ptr); \
asm volatile("add " __percpu_arg(1) ", %0" \
: "=r" (tcp_ptr__) \
: "m" (this_cpu_off), "0" (ptr)); \
(typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
})
#else #else
#define __percpu_arg(x) "%P" #x #define __percpu_arg(x) "%P" #x
#endif #endif
......
...@@ -55,14 +55,18 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; ...@@ -55,14 +55,18 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
*/ */
#define per_cpu(var, cpu) \ #define per_cpu(var, cpu) \
(*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
#define __get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
#define __raw_get_cpu_var(var) \
(*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) #ifndef __this_cpu_ptr
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
#endif
#ifdef CONFIG_DEBUG_PREEMPT
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
#else
#define this_cpu_ptr(ptr) __this_cpu_ptr(ptr)
#endif
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
#define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var)))
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void); extern void setup_per_cpu_areas(void);
......
...@@ -48,10 +48,8 @@ ...@@ -48,10 +48,8 @@
preempt_enable(); \ preempt_enable(); \
} while (0) } while (0)
#ifdef CONFIG_SMP
/* minimum unit size, also is the maximum supported allocation size */ /* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
/* /*
* Percpu allocator can serve percpu allocations before slab is * Percpu allocator can serve percpu allocations before slab is
...@@ -146,37 +144,20 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, ...@@ -146,37 +144,20 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
* dynamically allocated. Non-atomic access to the current CPU's * dynamically allocated. Non-atomic access to the current CPU's
* version should probably be combined with get_cpu()/put_cpu(). * version should probably be combined with get_cpu()/put_cpu().
*/ */
#ifdef CONFIG_SMP
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
#else
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
#endif
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
extern bool is_kernel_percpu_address(unsigned long addr); extern bool is_kernel_percpu_address(unsigned long addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
extern void __init setup_per_cpu_areas(void); extern void __init setup_per_cpu_areas(void);
#endif #endif
extern void __init percpu_init_late(void); extern void __init percpu_init_late(void);
#else /* CONFIG_SMP */
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
/* can't distinguish from other static vars, always false */
static inline bool is_kernel_percpu_address(unsigned long addr)
{
return false;
}
static inline void __init setup_per_cpu_areas(void) { }
static inline void __init percpu_init_late(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr)
{
return NULL;
}
#endif /* CONFIG_SMP */
extern void __percpu *__alloc_percpu(size_t size, size_t align); extern void __percpu *__alloc_percpu(size_t size, size_t align);
extern void free_percpu(void __percpu *__pdata); extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr); extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
......
...@@ -117,10 +117,12 @@ extern rwlock_t vmlist_lock; ...@@ -117,10 +117,12 @@ extern rwlock_t vmlist_lock;
extern struct vm_struct *vmlist; extern struct vm_struct *vmlist;
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#ifdef CONFIG_SMP
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms, const size_t *sizes, int nr_vms,
size_t align, gfp_t gfp_mask); size_t align, gfp_t gfp_mask);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
#endif
#endif /* _LINUX_VMALLOC_H */ #endif /* _LINUX_VMALLOC_H */
...@@ -301,3 +301,11 @@ config NOMMU_INITIAL_TRIM_EXCESS ...@@ -301,3 +301,11 @@ config NOMMU_INITIAL_TRIM_EXCESS
of 1 says that all excess pages should be trimmed. of 1 says that all excess pages should be trimmed.
See Documentation/nommu-mmap.txt for more information. See Documentation/nommu-mmap.txt for more information.
#
# UP and nommu archs use km based percpu allocator
#
config NEED_PER_CPU_KM
depends on !SMP
bool
default y
...@@ -11,7 +11,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ ...@@ -11,7 +11,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \ maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \
prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
page_isolation.o mm_init.o mmu_context.o \ page_isolation.o mm_init.o mmu_context.o percpu.o \
$(mmu-y) $(mmu-y)
obj-y += init-mm.o obj-y += init-mm.o
...@@ -36,11 +36,6 @@ obj-$(CONFIG_FAILSLAB) += failslab.o ...@@ -36,11 +36,6 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_MIGRATION) += migrate.o
ifdef CONFIG_SMP
obj-y += percpu.o
else
obj-y += percpu_up.o
endif
obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_QUICKLIST) += quicklist.o
obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* chunk size is not aligned. percpu-km code will whine about it. * chunk size is not aligned. percpu-km code will whine about it.
*/ */
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
#error "contiguous percpu allocation is incompatible with paged first chunk" #error "contiguous percpu allocation is incompatible with paged first chunk"
#endif #endif
...@@ -35,7 +35,11 @@ ...@@ -35,7 +35,11 @@
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
{ {
/* noop */ unsigned int cpu;
for_each_possible_cpu(cpu)
memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
return 0; return 0;
} }
......
This diff is collapsed.
/*
* mm/percpu_up.c - dummy percpu memory allocator implementation for UP
*/
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/slab.h>
void __percpu *__alloc_percpu(size_t size, size_t align)
{
/*
* Can't easily make larger alignment work with kmalloc. WARN
* on it. Larger alignment should only be used for module
* percpu sections on SMP for which this path isn't used.
*/
WARN_ON_ONCE(align > SMP_CACHE_BYTES);
return (void __percpu __force *)kzalloc(size, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(__alloc_percpu);
void free_percpu(void __percpu *p)
{
kfree(this_cpu_ptr(p));
}
EXPORT_SYMBOL_GPL(free_percpu);
phys_addr_t per_cpu_ptr_to_phys(void *addr)
{
return __pa(addr);
}
...@@ -2065,6 +2065,7 @@ void free_vm_area(struct vm_struct *area) ...@@ -2065,6 +2065,7 @@ void free_vm_area(struct vm_struct *area)
} }
EXPORT_SYMBOL_GPL(free_vm_area); EXPORT_SYMBOL_GPL(free_vm_area);
#ifdef CONFIG_SMP
static struct vmap_area *node_to_va(struct rb_node *n) static struct vmap_area *node_to_va(struct rb_node *n)
{ {
return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
...@@ -2345,6 +2346,7 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) ...@@ -2345,6 +2346,7 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
free_vm_area(vms[i]); free_vm_area(vms[i]);
kfree(vms); kfree(vms);
} }
#endif /* CONFIG_SMP */
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static void *s_start(struct seq_file *m, loff_t *pos) static void *s_start(struct seq_file *m, loff_t *pos)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment