Commit 1b59be2a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  Update Pekka's email address in MAINTAINERS
  mm/slab.c: make local symbols static
  slub: Avoid use of slub_lock in show_slab_objects()
  memory hotplug: one more lock on memory hotplug
parents f9ee7f60 2ed1c525
...@@ -3684,7 +3684,7 @@ F: kernel/debug/ ...@@ -3684,7 +3684,7 @@ F: kernel/debug/
KMEMCHECK KMEMCHECK
M: Vegard Nossum <vegardno@ifi.uio.no> M: Vegard Nossum <vegardno@ifi.uio.no>
M: Pekka Enberg <penberg@cs.helsinki.fi> M: Pekka Enberg <penberg@kernel.org>
S: Maintained S: Maintained
F: Documentation/kmemcheck.txt F: Documentation/kmemcheck.txt
F: arch/x86/include/asm/kmemcheck.h F: arch/x86/include/asm/kmemcheck.h
...@@ -5646,7 +5646,7 @@ F: drivers/net/sky2.* ...@@ -5646,7 +5646,7 @@ F: drivers/net/sky2.*
SLAB ALLOCATOR SLAB ALLOCATOR
M: Christoph Lameter <cl@linux-foundation.org> M: Christoph Lameter <cl@linux-foundation.org>
M: Pekka Enberg <penberg@cs.helsinki.fi> M: Pekka Enberg <penberg@kernel.org>
M: Matt Mackall <mpm@selenic.com> M: Matt Mackall <mpm@selenic.com>
L: linux-mm@kvack.org L: linux-mm@kvack.org
S: Maintained S: Maintained
......
...@@ -165,6 +165,12 @@ extern void register_page_bootmem_info_node(struct pglist_data *pgdat); ...@@ -165,6 +165,12 @@ extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
extern void put_page_bootmem(struct page *page); extern void put_page_bootmem(struct page *page);
#endif #endif
/*
* Lock for memory hotplug guarantees 1) all callbacks for memory hotplug
* notifier will be called under this. 2) offline/online/add/remove memory
* will not run simultaneously.
*/
void lock_memory_hotplug(void); void lock_memory_hotplug(void);
void unlock_memory_hotplug(void); void unlock_memory_hotplug(void);
......
...@@ -409,6 +409,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) ...@@ -409,6 +409,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
int ret; int ret;
struct memory_notify arg; struct memory_notify arg;
lock_memory_hotplug();
arg.start_pfn = pfn; arg.start_pfn = pfn;
arg.nr_pages = nr_pages; arg.nr_pages = nr_pages;
arg.status_change_nid = -1; arg.status_change_nid = -1;
...@@ -421,6 +422,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) ...@@ -421,6 +422,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
ret = notifier_to_errno(ret); ret = notifier_to_errno(ret);
if (ret) { if (ret) {
memory_notify(MEM_CANCEL_ONLINE, &arg); memory_notify(MEM_CANCEL_ONLINE, &arg);
unlock_memory_hotplug();
return ret; return ret;
} }
/* /*
...@@ -445,6 +447,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) ...@@ -445,6 +447,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
printk(KERN_DEBUG "online_pages %lx at %lx failed\n", printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
nr_pages, pfn); nr_pages, pfn);
memory_notify(MEM_CANCEL_ONLINE, &arg); memory_notify(MEM_CANCEL_ONLINE, &arg);
unlock_memory_hotplug();
return ret; return ret;
} }
...@@ -469,6 +472,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages) ...@@ -469,6 +472,7 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
if (onlined_pages) if (onlined_pages)
memory_notify(MEM_ONLINE, &arg); memory_notify(MEM_ONLINE, &arg);
unlock_memory_hotplug();
return 0; return 0;
} }
......
...@@ -284,7 +284,7 @@ struct kmem_list3 { ...@@ -284,7 +284,7 @@ struct kmem_list3 {
* Need this for bootstrapping a per node allocator. * Need this for bootstrapping a per node allocator.
*/ */
#define NUM_INIT_LISTS (3 * MAX_NUMNODES) #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define CACHE_CACHE 0 #define CACHE_CACHE 0
#define SIZE_AC MAX_NUMNODES #define SIZE_AC MAX_NUMNODES
#define SIZE_L3 (2 * MAX_NUMNODES) #define SIZE_L3 (2 * MAX_NUMNODES)
...@@ -4053,7 +4053,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) ...@@ -4053,7 +4053,7 @@ static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
* necessary. Note that the l3 listlock also protects the array_cache * necessary. Note that the l3 listlock also protects the array_cache
* if drain_array() is used on the shared array. * if drain_array() is used on the shared array.
*/ */
void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
struct array_cache *ac, int force, int node) struct array_cache *ac, int force, int node)
{ {
int tofree; int tofree;
...@@ -4317,7 +4317,7 @@ static const struct seq_operations slabinfo_op = { ...@@ -4317,7 +4317,7 @@ static const struct seq_operations slabinfo_op = {
* @count: data length * @count: data length
* @ppos: unused * @ppos: unused
*/ */
ssize_t slabinfo_write(struct file *file, const char __user * buffer, static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp; char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
......
...@@ -3797,7 +3797,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -3797,7 +3797,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
} }
} }
down_read(&slub_lock); lock_memory_hotplug();
#ifdef CONFIG_SLUB_DEBUG #ifdef CONFIG_SLUB_DEBUG
if (flags & SO_ALL) { if (flags & SO_ALL) {
for_each_node_state(node, N_NORMAL_MEMORY) { for_each_node_state(node, N_NORMAL_MEMORY) {
...@@ -3838,7 +3838,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s, ...@@ -3838,7 +3838,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
x += sprintf(buf + x, " N%d=%lu", x += sprintf(buf + x, " N%d=%lu",
node, nodes[node]); node, nodes[node]);
#endif #endif
up_read(&slub_lock); unlock_memory_hotplug();
kfree(nodes); kfree(nodes);
return x + sprintf(buf + x, "\n"); return x + sprintf(buf + x, "\n");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment