Commit d7ec4bfe authored by Vaibhav Nagarnaik's avatar Vaibhav Nagarnaik Committed by Steven Rostedt

ring-buffer: Set __GFP_NORETRY flag for ring buffer allocating process

The tracing ring buffer is allocated from kernel memory. While
allocating a large chunk of memory, OOM might happen which destabilizes
the system. Thus random processes might get killed during the
allocation.

This patch adds __GFP_NORETRY flag to the ring buffer allocation calls
to make it fail more gracefully if the system will not be able to
complete the allocation request.
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarVaibhav Nagarnaik <vnagarnaik@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Michael Rubin <mrubin@google.com>
Cc: David Sharp <dhsharp@google.com>
Link: http://lkml.kernel.org/r/1307491302-9236-1-git-send-email-vnagarnaik@google.comSigned-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 22fe9b54
...@@ -1004,9 +1004,14 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1004,9 +1004,14 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
struct page *page; struct page *page;
/*
* __GFP_NORETRY flag makes sure that the allocation fails
* gracefully without invoking oom-killer and the system is
* not destabilized.
*/
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); GFP_KERNEL | __GFP_NORETRY,
cpu_to_node(cpu_buffer->cpu));
if (!bpage) if (!bpage)
goto free_pages; goto free_pages;
...@@ -1015,7 +1020,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1015,7 +1020,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
list_add(&bpage->list, &pages); list_add(&bpage->list, &pages);
page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
GFP_KERNEL, 0); GFP_KERNEL | __GFP_NORETRY, 0);
if (!page) if (!page)
goto free_pages; goto free_pages;
bpage->page = page_address(page); bpage->page = page_address(page);
...@@ -1377,13 +1382,20 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) ...@@ -1377,13 +1382,20 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
for_each_buffer_cpu(buffer, cpu) { for_each_buffer_cpu(buffer, cpu) {
for (i = 0; i < new_pages; i++) { for (i = 0; i < new_pages; i++) {
struct page *page; struct page *page;
/*
* __GFP_NORETRY flag makes sure that the allocation
* fails gracefully without invoking oom-killer and
* the system is not destabilized.
*/
bpage = kzalloc_node(ALIGN(sizeof(*bpage), bpage = kzalloc_node(ALIGN(sizeof(*bpage),
cache_line_size()), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu)); GFP_KERNEL | __GFP_NORETRY,
cpu_to_node(cpu));
if (!bpage) if (!bpage)
goto free_pages; goto free_pages;
list_add(&bpage->list, &pages); list_add(&bpage->list, &pages);
page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
if (!page) if (!page)
goto free_pages; goto free_pages;
bpage->page = page_address(page); bpage->page = page_address(page);
...@@ -3737,7 +3749,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) ...@@ -3737,7 +3749,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
struct buffer_data_page *bpage; struct buffer_data_page *bpage;
struct page *page; struct page *page;
page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0);
if (!page) if (!page)
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment