Commit f9ee7f60 authored by Linus Torvalds's avatar Linus Torvalds

Merge branches 'core-fixes-for-linus', 'x86-fixes-for-linus',...

Merge branches 'core-fixes-for-linus', 'x86-fixes-for-linus', 'timers-fixes-for-linus' and 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  rcu: avoid pointless blocked-task warnings
  rcu: demote SRCU_SYNCHRONIZE_DELAY from kernel-parameter status
  rtmutex: Fix comment about why new_owner can be NULL in wake_futex_pi()

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, olpc: Add missing Kconfig dependencies
  x86, mrst: Set correct APB timer IRQ affinity for secondary cpu
  x86: tsc: Fix calibration refinement conditionals to avoid divide by zero
  x86, ia64, acpi: Clean up x86-ism in drivers/acpi/numa.c

* 'timers-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  timekeeping: Make local variables static
  time: Rename misnamed minsec argument of clocks_calc_mult_shift()

* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  tracing: Remove syscall_exit_fields
  tracing: Only process module tracepoints once
  perf record: Add "nodelay" mode, disabled by default
  perf sched: Fix list of events, dropping unsupported ':r' modifier
  Revert "perf tools: Emit clearer message for sys_perf_event_open ENOENT return"
  perf top: Fix annotate segv
  perf evsel: Fix order of event list deletion
...@@ -2068,6 +2068,7 @@ config OLPC ...@@ -2068,6 +2068,7 @@ config OLPC
bool "One Laptop Per Child support" bool "One Laptop Per Child support"
select GPIOLIB select GPIOLIB
select OLPC_OPENFIRMWARE select OLPC_OPENFIRMWARE
depends on !X86_64 && !X86_PAE
---help--- ---help---
Add support for detecting the unique features of the OLPC Add support for detecting the unique features of the OLPC
XO hardware. XO hardware.
......
...@@ -313,13 +313,15 @@ static void apbt_setup_irq(struct apbt_dev *adev) ...@@ -313,13 +313,15 @@ static void apbt_setup_irq(struct apbt_dev *adev)
if (adev->irq == 0) if (adev->irq == 0)
return; return;
if (system_state == SYSTEM_BOOTING) {
irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
/* APB timer irqs are set up as mp_irqs, timer is edge type */ /* APB timer irqs are set up as mp_irqs, timer is edge type */
__set_irq_handler(adev->irq, handle_edge_irq, 0, "edge"); __set_irq_handler(adev->irq, handle_edge_irq, 0, "edge");
if (system_state == SYSTEM_BOOTING) {
if (request_irq(adev->irq, apbt_interrupt_handler, if (request_irq(adev->irq, apbt_interrupt_handler,
IRQF_TIMER | IRQF_DISABLED | IRQF_NOBALANCING, IRQF_TIMER | IRQF_DISABLED |
IRQF_NOBALANCING,
adev->name, adev)) { adev->name, adev)) {
printk(KERN_ERR "Failed request IRQ for APBT%d\n", printk(KERN_ERR "Failed request IRQ for APBT%d\n",
adev->num); adev->num);
......
...@@ -464,7 +464,7 @@ unsigned long native_calibrate_tsc(void) ...@@ -464,7 +464,7 @@ unsigned long native_calibrate_tsc(void)
tsc_pit_min = min(tsc_pit_min, tsc_pit_khz); tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
/* hpet or pmtimer available ? */ /* hpet or pmtimer available ? */
if (!hpet && !ref1 && !ref2) if (ref1 == ref2)
continue; continue;
/* Check, whether the sampling was disturbed by an SMI */ /* Check, whether the sampling was disturbed by an SMI */
...@@ -935,7 +935,7 @@ static void tsc_refine_calibration_work(struct work_struct *work) ...@@ -935,7 +935,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
tsc_stop = tsc_read_refs(&ref_stop, hpet); tsc_stop = tsc_read_refs(&ref_stop, hpet);
/* hpet or pmtimer available ? */ /* hpet or pmtimer available ? */
if (!hpet && !ref_start && !ref_stop) if (ref_start == ref_stop)
goto out; goto out;
/* Check, whether the sampling was disturbed by an SMI */ /* Check, whether the sampling was disturbed by an SMI */
......
...@@ -275,23 +275,19 @@ acpi_table_parse_srat(enum acpi_srat_type id, ...@@ -275,23 +275,19 @@ acpi_table_parse_srat(enum acpi_srat_type id,
int __init acpi_numa_init(void) int __init acpi_numa_init(void)
{ {
int ret = 0; int ret = 0;
int nr_cpu_entries = nr_cpu_ids;
#ifdef CONFIG_X86
/* /*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus= * Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
* SRAT cpu entries could have different order with that in MADT. * SRAT cpu entries could have different order with that in MADT.
* So go over all cpu entries in SRAT to get apicid to node mapping. * So go over all cpu entries in SRAT to get apicid to node mapping.
*/ */
nr_cpu_entries = MAX_LOCAL_APIC;
#endif
/* SRAT: Static Resource Affinity Table */ /* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
acpi_parse_x2apic_affinity, nr_cpu_entries); acpi_parse_x2apic_affinity, 0);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
acpi_parse_processor_affinity, nr_cpu_entries); acpi_parse_processor_affinity, 0);
ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity, acpi_parse_memory_affinity,
NR_NODE_MEMBLKS); NR_NODE_MEMBLKS);
......
/*
* Because linux/module.h has tracepoints in the header, and ftrace.h
* eventually includes this file, define_trace.h includes linux/module.h
* But we do not want the module.h to override the TRACE_SYSTEM macro
* variable that define_trace.h is processing, so we only set it
* when module events are being processed, which would happen when
* CREATE_TRACE_POINTS is defined.
*/
#ifdef CREATE_TRACE_POINTS
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM module #define TRACE_SYSTEM module
#endif
#if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_MODULE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_MODULE_H #define _TRACE_MODULE_H
......
...@@ -515,21 +515,6 @@ config RCU_BOOST_DELAY ...@@ -515,21 +515,6 @@ config RCU_BOOST_DELAY
Accept the default if unsure. Accept the default if unsure.
config SRCU_SYNCHRONIZE_DELAY
int "Microseconds to delay before waiting for readers"
range 0 20
default 10
help
This option controls how long SRCU delays before entering its
loop waiting on SRCU readers. The purpose of this loop is
to avoid the unconditional context-switch penalty that would
otherwise be incurred if there was an active SRCU reader,
in a manner similar to adaptive locking schemes. This should
be set to be a bit longer than the common-case SRCU read-side
critical-section overhead.
Accept the default if unsure.
endmenu # "RCU Subsystem" endmenu # "RCU Subsystem"
config IKCONFIG config IKCONFIG
......
...@@ -826,10 +826,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) ...@@ -826,10 +826,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/* /*
* This happens when we have stolen the lock and the original * It is possible that the next waiter (the one that brought
* pending owner did not enqueue itself back on the rt_mutex. * this owner to the kernel) timed out and is no longer
* Thats not a tragedy. We know that way, that a lock waiter * waiting on the lock.
* is on the fly. We make the futex_q waiter the pending owner.
*/ */
if (!new_owner) if (!new_owner)
new_owner = this->task; new_owner = this->task;
......
...@@ -189,7 +189,8 @@ static int rcu_kthread(void *arg) ...@@ -189,7 +189,8 @@ static int rcu_kthread(void *arg)
unsigned long flags; unsigned long flags;
for (;;) { for (;;) {
wait_event(rcu_kthread_wq, have_rcu_kthread_work != 0); wait_event_interruptible(rcu_kthread_wq,
have_rcu_kthread_work != 0);
morework = rcu_boost(); morework = rcu_boost();
local_irq_save(flags); local_irq_save(flags);
work = have_rcu_kthread_work; work = have_rcu_kthread_work;
......
...@@ -155,6 +155,16 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx) ...@@ -155,6 +155,16 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
} }
EXPORT_SYMBOL_GPL(__srcu_read_unlock); EXPORT_SYMBOL_GPL(__srcu_read_unlock);
/*
* We use an adaptive strategy for synchronize_srcu() and especially for
* synchronize_srcu_expedited(). We spin for a fixed time period
* (defined below) to allow SRCU readers to exit their read-side critical
* sections. If there are still some readers after 10 microseconds,
* we repeatedly block for 1-millisecond time periods. This approach
* has done well in testing, so there is no need for a config parameter.
*/
#define SYNCHRONIZE_SRCU_READER_DELAY 10
/* /*
* Helper function for synchronize_srcu() and synchronize_srcu_expedited(). * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/ */
...@@ -207,11 +217,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void)) ...@@ -207,11 +217,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
* will have finished executing. We initially give readers * will have finished executing. We initially give readers
* an arbitrarily chosen 10 microseconds to get out of their * an arbitrarily chosen 10 microseconds to get out of their
* SRCU read-side critical sections, then loop waiting 1/HZ * SRCU read-side critical sections, then loop waiting 1/HZ
* seconds per iteration. * seconds per iteration. The 10-microsecond value has done
* very well in testing.
*/ */
if (srcu_readers_active_idx(sp, idx)) if (srcu_readers_active_idx(sp, idx))
udelay(CONFIG_SRCU_SYNCHRONIZE_DELAY); udelay(SYNCHRONIZE_SRCU_READER_DELAY);
while (srcu_readers_active_idx(sp, idx)) while (srcu_readers_active_idx(sp, idx))
schedule_timeout_interruptible(1); schedule_timeout_interruptible(1);
......
...@@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time); ...@@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time);
* @shift: pointer to shift variable * @shift: pointer to shift variable
* @from: frequency to convert from * @from: frequency to convert from
* @to: frequency to convert to * @to: frequency to convert to
* @minsec: guaranteed runtime conversion range in seconds * @maxsec: guaranteed runtime conversion range in seconds
* *
* The function evaluates the shift/mult pair for the scaled math * The function evaluates the shift/mult pair for the scaled math
* operations of clocksources and clockevents. * operations of clocksources and clockevents.
...@@ -122,7 +122,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time); ...@@ -122,7 +122,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time);
* NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock * NSEC_PER_SEC == 1GHz and @from is the counter frequency. For clock
* event @to is the counter frequency and @from is NSEC_PER_SEC. * event @to is the counter frequency and @from is NSEC_PER_SEC.
* *
* The @minsec conversion range argument controls the time frame in * The @maxsec conversion range argument controls the time frame in
* seconds which must be covered by the runtime conversion with the * seconds which must be covered by the runtime conversion with the
* calculated mult and shift factors. This guarantees that no 64bit * calculated mult and shift factors. This guarantees that no 64bit
* overflow happens when the input value of the conversion is * overflow happens when the input value of the conversion is
...@@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time); ...@@ -131,7 +131,7 @@ EXPORT_SYMBOL_GPL(timecounter_cyc2time);
* factors. * factors.
*/ */
void void
clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec) clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 maxsec)
{ {
u64 tmp; u64 tmp;
u32 sft, sftacc= 32; u32 sft, sftacc= 32;
...@@ -140,7 +140,7 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec) ...@@ -140,7 +140,7 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec)
* Calculate the shift factor which is limiting the conversion * Calculate the shift factor which is limiting the conversion
* range: * range:
*/ */
tmp = ((u64)minsec * from) >> 32; tmp = ((u64)maxsec * from) >> 32;
while (tmp) { while (tmp) {
tmp >>=1; tmp >>=1;
sftacc--; sftacc--;
......
...@@ -49,7 +49,7 @@ struct timekeeper { ...@@ -49,7 +49,7 @@ struct timekeeper {
u32 mult; u32 mult;
}; };
struct timekeeper timekeeper; static struct timekeeper timekeeper;
/** /**
* timekeeper_setup_internals - Set up internals to use clocksource clock. * timekeeper_setup_internals - Set up internals to use clocksource clock.
...@@ -164,7 +164,7 @@ static struct timespec total_sleep_time; ...@@ -164,7 +164,7 @@ static struct timespec total_sleep_time;
/* /*
* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
*/ */
struct timespec raw_time; static struct timespec raw_time;
/* flag for if timekeeping is suspended */ /* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended; int __read_mostly timekeeping_suspended;
......
...@@ -23,9 +23,6 @@ static int syscall_exit_register(struct ftrace_event_call *event, ...@@ -23,9 +23,6 @@ static int syscall_exit_register(struct ftrace_event_call *event,
static int syscall_enter_define_fields(struct ftrace_event_call *call); static int syscall_enter_define_fields(struct ftrace_event_call *call);
static int syscall_exit_define_fields(struct ftrace_event_call *call); static int syscall_exit_define_fields(struct ftrace_event_call *call);
/* All syscall exit events have the same fields */
static LIST_HEAD(syscall_exit_fields);
static struct list_head * static struct list_head *
syscall_get_enter_fields(struct ftrace_event_call *call) syscall_get_enter_fields(struct ftrace_event_call *call)
{ {
...@@ -34,12 +31,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call) ...@@ -34,12 +31,6 @@ syscall_get_enter_fields(struct ftrace_event_call *call)
return &entry->enter_fields; return &entry->enter_fields;
} }
static struct list_head *
syscall_get_exit_fields(struct ftrace_event_call *call)
{
return &syscall_exit_fields;
}
struct trace_event_functions enter_syscall_print_funcs = { struct trace_event_functions enter_syscall_print_funcs = {
.trace = print_syscall_enter, .trace = print_syscall_enter,
}; };
...@@ -60,7 +51,7 @@ struct ftrace_event_class event_class_syscall_exit = { ...@@ -60,7 +51,7 @@ struct ftrace_event_class event_class_syscall_exit = {
.system = "syscalls", .system = "syscalls",
.reg = syscall_exit_register, .reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields, .define_fields = syscall_exit_define_fields,
.get_fields = syscall_get_exit_fields, .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
.raw_init = init_syscall_trace, .raw_init = init_syscall_trace,
}; };
......
...@@ -61,6 +61,9 @@ OPTIONS ...@@ -61,6 +61,9 @@ OPTIONS
-r:: -r::
--realtime=:: --realtime=::
Collect data with this RT SCHED_FIFO priority. Collect data with this RT SCHED_FIFO priority.
-D::
--no-delay::
Collect data without buffering.
-A:: -A::
--append:: --append::
Append to the output file to do incremental profiling. Append to the output file to do incremental profiling.
......
...@@ -49,6 +49,7 @@ static int pipe_output = 0; ...@@ -49,6 +49,7 @@ static int pipe_output = 0;
static const char *output_name = "perf.data"; static const char *output_name = "perf.data";
static int group = 0; static int group = 0;
static int realtime_prio = 0; static int realtime_prio = 0;
static bool nodelay = false;
static bool raw_samples = false; static bool raw_samples = false;
static bool sample_id_all_avail = true; static bool sample_id_all_avail = true;
static bool system_wide = false; static bool system_wide = false;
...@@ -307,6 +308,11 @@ static void create_counter(struct perf_evsel *evsel, int cpu) ...@@ -307,6 +308,11 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
attr->sample_type |= PERF_SAMPLE_CPU; attr->sample_type |= PERF_SAMPLE_CPU;
} }
if (nodelay) {
attr->watermark = 0;
attr->wakeup_events = 1;
}
attr->mmap = track; attr->mmap = track;
attr->comm = track; attr->comm = track;
attr->inherit = !no_inherit; attr->inherit = !no_inherit;
...@@ -331,9 +337,6 @@ static void create_counter(struct perf_evsel *evsel, int cpu) ...@@ -331,9 +337,6 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
else if (err == ENODEV && cpu_list) { else if (err == ENODEV && cpu_list) {
die("No such device - did you specify" die("No such device - did you specify"
" an out-of-range profile CPU?\n"); " an out-of-range profile CPU?\n");
} else if (err == ENOENT) {
die("%s event is not supported. ",
event_name(evsel));
} else if (err == EINVAL && sample_id_all_avail) { } else if (err == EINVAL && sample_id_all_avail) {
/* /*
* Old kernel, no attr->sample_id_type_all field * Old kernel, no attr->sample_id_type_all field
...@@ -480,6 +483,7 @@ static void atexit_header(void) ...@@ -480,6 +483,7 @@ static void atexit_header(void)
process_buildids(); process_buildids();
perf_header__write(&session->header, output, true); perf_header__write(&session->header, output, true);
perf_session__delete(session); perf_session__delete(session);
perf_evsel_list__delete();
symbol__exit(); symbol__exit();
} }
} }
...@@ -845,6 +849,8 @@ const struct option record_options[] = { ...@@ -845,6 +849,8 @@ const struct option record_options[] = {
"record events on existing thread id"), "record events on existing thread id"),
OPT_INTEGER('r', "realtime", &realtime_prio, OPT_INTEGER('r', "realtime", &realtime_prio,
"collect data with this RT SCHED_FIFO priority"), "collect data with this RT SCHED_FIFO priority"),
OPT_BOOLEAN('D', "no-delay", &nodelay,
"collect data without buffering"),
OPT_BOOLEAN('R', "raw-samples", &raw_samples, OPT_BOOLEAN('R', "raw-samples", &raw_samples,
"collect raw sample records from all opened counters"), "collect raw sample records from all opened counters"),
OPT_BOOLEAN('a', "all-cpus", &system_wide, OPT_BOOLEAN('a', "all-cpus", &system_wide,
......
...@@ -1843,15 +1843,15 @@ static const char *record_args[] = { ...@@ -1843,15 +1843,15 @@ static const char *record_args[] = {
"-f", "-f",
"-m", "1024", "-m", "1024",
"-c", "1", "-c", "1",
"-e", "sched:sched_switch:r", "-e", "sched:sched_switch",
"-e", "sched:sched_stat_wait:r", "-e", "sched:sched_stat_wait",
"-e", "sched:sched_stat_sleep:r", "-e", "sched:sched_stat_sleep",
"-e", "sched:sched_stat_iowait:r", "-e", "sched:sched_stat_iowait",
"-e", "sched:sched_stat_runtime:r", "-e", "sched:sched_stat_runtime",
"-e", "sched:sched_process_exit:r", "-e", "sched:sched_process_exit",
"-e", "sched:sched_process_fork:r", "-e", "sched:sched_process_fork",
"-e", "sched:sched_wakeup:r", "-e", "sched:sched_wakeup",
"-e", "sched:sched_migrate_task:r", "-e", "sched:sched_migrate_task",
}; };
static int __cmd_record(int argc, const char **argv) static int __cmd_record(int argc, const char **argv)
......
...@@ -743,6 +743,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) ...@@ -743,6 +743,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
out_free_fd: out_free_fd:
list_for_each_entry(pos, &evsel_list, node) list_for_each_entry(pos, &evsel_list, node)
perf_evsel__free_stat_priv(pos); perf_evsel__free_stat_priv(pos);
perf_evsel_list__delete();
out: out:
thread_map__delete(threads); thread_map__delete(threads);
threads = NULL; threads = NULL;
......
...@@ -1247,8 +1247,6 @@ static void start_counter(int i, struct perf_evsel *evsel) ...@@ -1247,8 +1247,6 @@ static void start_counter(int i, struct perf_evsel *evsel)
die("Permission error - are you root?\n" die("Permission error - are you root?\n"
"\t Consider tweaking" "\t Consider tweaking"
" /proc/sys/kernel/perf_event_paranoid.\n"); " /proc/sys/kernel/perf_event_paranoid.\n");
if (err == ENOENT)
die("%s event is not supported. ", event_name(evsel));
/* /*
* If it's cycles then fall back to hrtimer * If it's cycles then fall back to hrtimer
* based cpu-clock-tick sw counter, which * based cpu-clock-tick sw counter, which
...@@ -1473,6 +1471,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) ...@@ -1473,6 +1471,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
pos->attr.sample_period = default_interval; pos->attr.sample_period = default_interval;
} }
sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
symbol_conf.priv_size = (sizeof(struct sym_entry) + symbol_conf.priv_size = (sizeof(struct sym_entry) +
(nr_counters + 1) * sizeof(unsigned long)); (nr_counters + 1) * sizeof(unsigned long));
...@@ -1490,6 +1490,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) ...@@ -1490,6 +1490,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
out_free_fd: out_free_fd:
list_for_each_entry(pos, &evsel_list, node) list_for_each_entry(pos, &evsel_list, node)
perf_evsel__free_mmap(pos); perf_evsel__free_mmap(pos);
perf_evsel_list__delete();
return status; return status;
} }
...@@ -286,8 +286,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) ...@@ -286,8 +286,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
status = p->fn(argc, argv, prefix); status = p->fn(argc, argv, prefix);
exit_browser(status); exit_browser(status);
perf_evsel_list__delete();
if (status) if (status)
return status & 0xff; return status & 0xff;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment