Commit b4b55dfd authored by Uros Bizjak's avatar Uros Bizjak Committed by Steven Rostedt (Google)

ring_buffer: Change some static functions to void

The results of some static functions are not used. Change the
type of these function to void and remove unnecessary returns.

No functional change intended.

Link: https://lkml.kernel.org/r/20230305155532.5549-2-ubizjak@gmail.comSigned-off-by: default avatarUros Bizjak <ubizjak@gmail.com>
Reviewed-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Reviewed-by: default avatarMukesh Ojha <quic_mojha@quicinc.com>
Signed-off-by: default avatarSteven Rostedt (Google) <rostedt@goodmis.org>
parent fee86a4e
...@@ -1565,15 +1565,12 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1565,15 +1565,12 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
} }
} }
static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *bpage) struct buffer_page *bpage)
{ {
unsigned long val = (unsigned long)bpage; unsigned long val = (unsigned long)bpage;
if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
return 1;
return 0;
} }
/** /**
...@@ -1583,30 +1580,28 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1583,30 +1580,28 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
* As a safety measure we check to make sure the data pages have not * As a safety measure we check to make sure the data pages have not
* been corrupted. * been corrupted.
*/ */
static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
{ {
struct list_head *head = rb_list_head(cpu_buffer->pages); struct list_head *head = rb_list_head(cpu_buffer->pages);
struct list_head *tmp; struct list_head *tmp;
if (RB_WARN_ON(cpu_buffer, if (RB_WARN_ON(cpu_buffer,
rb_list_head(rb_list_head(head->next)->prev) != head)) rb_list_head(rb_list_head(head->next)->prev) != head))
return -1; return;
if (RB_WARN_ON(cpu_buffer, if (RB_WARN_ON(cpu_buffer,
rb_list_head(rb_list_head(head->prev)->next) != head)) rb_list_head(rb_list_head(head->prev)->next) != head))
return -1; return;
for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) { for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
if (RB_WARN_ON(cpu_buffer, if (RB_WARN_ON(cpu_buffer,
rb_list_head(rb_list_head(tmp->next)->prev) != tmp)) rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
return -1; return;
if (RB_WARN_ON(cpu_buffer, if (RB_WARN_ON(cpu_buffer,
rb_list_head(rb_list_head(tmp->prev)->next) != tmp)) rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
return -1; return;
} }
return 0;
} }
static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
...@@ -4496,7 +4491,6 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -4496,7 +4491,6 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
default: default:
RB_WARN_ON(cpu_buffer, 1); RB_WARN_ON(cpu_buffer, 1);
} }
return;
} }
static void static void
...@@ -4527,7 +4521,6 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter, ...@@ -4527,7 +4521,6 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
default: default:
RB_WARN_ON(iter->cpu_buffer, 1); RB_WARN_ON(iter->cpu_buffer, 1);
} }
return;
} }
static struct buffer_page * static struct buffer_page *
...@@ -4942,7 +4935,6 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) ...@@ -4942,7 +4935,6 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
{ {
if (likely(locked)) if (likely(locked))
raw_spin_unlock(&cpu_buffer->reader_lock); raw_spin_unlock(&cpu_buffer->reader_lock);
return;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment