Commit 08cd2a69 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/urgent-2' of...

Merge branch 'tip/perf/urgent-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/urgent

Pull ftrace fixes from Steve Rostedt.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents fd6da696 9366c1ba
...@@ -2437,7 +2437,7 @@ static void reset_iter_read(struct ftrace_iterator *iter) ...@@ -2437,7 +2437,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
{ {
iter->pos = 0; iter->pos = 0;
iter->func_pos = 0; iter->func_pos = 0;
iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH); iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
} }
static void *t_start(struct seq_file *m, loff_t *pos) static void *t_start(struct seq_file *m, loff_t *pos)
......
...@@ -1396,6 +1396,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -1396,6 +1396,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
struct list_head *head_page_with_bit; struct list_head *head_page_with_bit;
head_page = &rb_set_head_page(cpu_buffer)->list; head_page = &rb_set_head_page(cpu_buffer)->list;
if (!head_page)
break;
prev_page = head_page->prev; prev_page = head_page->prev;
first_page = pages->next; first_page = pages->next;
...@@ -2934,7 +2936,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) ...@@ -2934,7 +2936,7 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
unsigned long flags; unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage; struct buffer_page *bpage;
unsigned long ret; unsigned long ret = 0;
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0; return 0;
...@@ -2949,7 +2951,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) ...@@ -2949,7 +2951,8 @@ unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
bpage = cpu_buffer->reader_page; bpage = cpu_buffer->reader_page;
else else
bpage = rb_set_head_page(cpu_buffer); bpage = rb_set_head_page(cpu_buffer);
ret = bpage->page->time_stamp; if (bpage)
ret = bpage->page->time_stamp;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
return ret; return ret;
...@@ -3260,6 +3263,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -3260,6 +3263,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* Splice the empty reader page into the list around the head. * Splice the empty reader page into the list around the head.
*/ */
reader = rb_set_head_page(cpu_buffer); reader = rb_set_head_page(cpu_buffer);
if (!reader)
goto out;
cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
cpu_buffer->reader_page->list.prev = reader->list.prev; cpu_buffer->reader_page->list.prev = reader->list.prev;
...@@ -3778,12 +3783,17 @@ void ...@@ -3778,12 +3783,17 @@ void
ring_buffer_read_finish(struct ring_buffer_iter *iter) ring_buffer_read_finish(struct ring_buffer_iter *iter)
{ {
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
unsigned long flags;
/* /*
* Ring buffer is disabled from recording, here's a good place * Ring buffer is disabled from recording, here's a good place
* to check the integrity of the ring buffer. * to check the integrity of the ring buffer.
* Must prevent readers from trying to read, as the check
* clears the HEAD page and readers require it.
*/ */
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_check_pages(cpu_buffer); rb_check_pages(cpu_buffer);
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&cpu_buffer->record_disabled);
atomic_dec(&cpu_buffer->buffer->resize_disabled); atomic_dec(&cpu_buffer->buffer->resize_disabled);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment