Commit 211117ff authored by Robert Richter's avatar Robert Richter

oprofile: fix lost sample counter

The number of lost samples could be greater than the number of
received samples. This patches fixes this. The implementation
introduces return values for add_sample() and add_code().
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
parent 1d7503b5
...@@ -145,32 +145,31 @@ void end_cpu_work(void) ...@@ -145,32 +145,31 @@ void end_cpu_work(void)
flush_scheduled_work(); flush_scheduled_work();
} }
static inline void static inline int
add_sample(struct oprofile_cpu_buffer *cpu_buf, add_sample(struct oprofile_cpu_buffer *cpu_buf,
unsigned long pc, unsigned long event) unsigned long pc, unsigned long event)
{ {
struct op_entry entry; struct op_entry entry;
int ret;
if (cpu_buffer_write_entry(&entry)) ret = cpu_buffer_write_entry(&entry);
goto Error; if (ret)
return ret;
entry.sample->eip = pc; entry.sample->eip = pc;
entry.sample->event = event; entry.sample->event = event;
if (cpu_buffer_write_commit(&entry)) ret = cpu_buffer_write_commit(&entry);
goto Error; if (ret)
return ret;
return; return 0;
Error:
cpu_buf->sample_lost_overflow++;
return;
} }
static inline void static inline int
add_code(struct oprofile_cpu_buffer *buffer, unsigned long value) add_code(struct oprofile_cpu_buffer *buffer, unsigned long value)
{ {
add_sample(buffer, ESCAPE_CODE, value); return add_sample(buffer, ESCAPE_CODE, value);
} }
/* This must be safe from any context. It's safe writing here /* This must be safe from any context. It's safe writing here
...@@ -201,17 +200,25 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc, ...@@ -201,17 +200,25 @@ static int log_sample(struct oprofile_cpu_buffer *cpu_buf, unsigned long pc,
/* notice a switch from user->kernel or vice versa */ /* notice a switch from user->kernel or vice versa */
if (cpu_buf->last_is_kernel != is_kernel) { if (cpu_buf->last_is_kernel != is_kernel) {
cpu_buf->last_is_kernel = is_kernel; cpu_buf->last_is_kernel = is_kernel;
add_code(cpu_buf, is_kernel); if (add_code(cpu_buf, is_kernel))
goto fail;
} }
/* notice a task switch */ /* notice a task switch */
if (cpu_buf->last_task != task) { if (cpu_buf->last_task != task) {
cpu_buf->last_task = task; cpu_buf->last_task = task;
add_code(cpu_buf, (unsigned long)task); if (add_code(cpu_buf, (unsigned long)task))
goto fail;
} }
add_sample(cpu_buf, pc, event); if (add_sample(cpu_buf, pc, event))
goto fail;
return 1; return 1;
fail:
cpu_buf->sample_lost_overflow++;
return 0;
} }
static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf) static int oprofile_begin_trace(struct oprofile_cpu_buffer *cpu_buf)
...@@ -266,37 +273,49 @@ void oprofile_add_ibs_sample(struct pt_regs * const regs, ...@@ -266,37 +273,49 @@ void oprofile_add_ibs_sample(struct pt_regs * const regs,
int is_kernel = !user_mode(regs); int is_kernel = !user_mode(regs);
struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer); struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
struct task_struct *task; struct task_struct *task;
int fail = 0;
cpu_buf->sample_received++; cpu_buf->sample_received++;
/* notice a switch from user->kernel or vice versa */ /* notice a switch from user->kernel or vice versa */
if (cpu_buf->last_is_kernel != is_kernel) { if (cpu_buf->last_is_kernel != is_kernel) {
if (add_code(cpu_buf, is_kernel))
goto fail;
cpu_buf->last_is_kernel = is_kernel; cpu_buf->last_is_kernel = is_kernel;
add_code(cpu_buf, is_kernel);
} }
/* notice a task switch */ /* notice a task switch */
if (!is_kernel) { if (!is_kernel) {
task = current; task = current;
if (cpu_buf->last_task != task) { if (cpu_buf->last_task != task) {
if (add_code(cpu_buf, (unsigned long)task))
goto fail;
cpu_buf->last_task = task; cpu_buf->last_task = task;
add_code(cpu_buf, (unsigned long)task);
} }
} }
add_code(cpu_buf, ibs_code); fail = fail || add_code(cpu_buf, ibs_code);
add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]); fail = fail || add_sample(cpu_buf, ibs_sample[0], ibs_sample[1]);
add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]); fail = fail || add_sample(cpu_buf, ibs_sample[2], ibs_sample[3]);
add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]); fail = fail || add_sample(cpu_buf, ibs_sample[4], ibs_sample[5]);
if (ibs_code == IBS_OP_BEGIN) { if (ibs_code == IBS_OP_BEGIN) {
add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]); fail = fail || add_sample(cpu_buf, ibs_sample[6], ibs_sample[7]);
add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]); fail = fail || add_sample(cpu_buf, ibs_sample[8], ibs_sample[9]);
add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]); fail = fail || add_sample(cpu_buf, ibs_sample[10], ibs_sample[11]);
} }
if (fail)
goto fail;
if (backtrace_depth) if (backtrace_depth)
oprofile_ops.backtrace(regs, backtrace_depth); oprofile_ops.backtrace(regs, backtrace_depth);
return;
fail:
cpu_buf->sample_lost_overflow++;
return;
} }
#endif #endif
...@@ -318,13 +337,17 @@ void oprofile_add_trace(unsigned long pc) ...@@ -318,13 +337,17 @@ void oprofile_add_trace(unsigned long pc)
* broken frame can give an eip with the same value as an * broken frame can give an eip with the same value as an
* escape code, abort the trace if we get it * escape code, abort the trace if we get it
*/ */
if (pc == ESCAPE_CODE) { if (pc == ESCAPE_CODE)
cpu_buf->tracing = 0; goto fail;
cpu_buf->backtrace_aborted++;
return; if (add_sample(cpu_buf, pc, 0))
} goto fail;
add_sample(cpu_buf, pc, 0); return;
fail:
cpu_buf->tracing = 0;
cpu_buf->backtrace_aborted++;
return;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment