Commit 31ae9835 authored by Linus Torvalds's avatar Linus Torvalds

Merge branches 'perf-urgent-for-linus', 'x86-urgent-for-linus' and...

Merge branches 'perf-urgent-for-linus', 'x86-urgent-for-linus' and 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf, x86 and scheduler updates from Ingo Molnar.

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tracing: Do not enable function event with enable
  perf stat: handle ENXIO error for perf_event_open
  perf: Turn off compiler warnings for flex and bison generated files
  perf stat: Fix case where guest/host monitoring is not supported by kernel
  perf build-id: Fix filename size calculation

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, kvm: KVM paravirt kernels don't check for CPUID being unavailable
  x86: Fix section annotation of acpi_map_cpu2node()
  x86/microcode: Ensure that module is only loaded on supported Intel CPUs

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched: Fix KVM and ia64 boot crash due to sched_groups circular linked list assumption
...@@ -170,6 +170,9 @@ static inline int kvm_para_available(void) ...@@ -170,6 +170,9 @@ static inline int kvm_para_available(void)
unsigned int eax, ebx, ecx, edx; unsigned int eax, ebx, ecx, edx;
char signature[13]; char signature[13];
if (boot_cpu_data.cpuid_level < 0)
return 0; /* So we don't blow up on old processors */
cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
memcpy(signature + 0, &ebx, 4); memcpy(signature + 0, &ebx, 4);
memcpy(signature + 4, &ecx, 4); memcpy(signature + 4, &ecx, 4);
......
...@@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void) ...@@ -593,7 +593,7 @@ void __init acpi_set_irq_model_ioapic(void)
#ifdef CONFIG_ACPI_HOTPLUG_CPU #ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h> #include <acpi/processor.h>
static void __cpuinitdata acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
{ {
#ifdef CONFIG_ACPI_NUMA #ifdef CONFIG_ACPI_NUMA
int nid; int nid;
......
...@@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) ...@@ -147,12 +147,6 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
memset(csig, 0, sizeof(*csig)); memset(csig, 0, sizeof(*csig));
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
cpu_has(c, X86_FEATURE_IA64)) {
pr_err("CPU%d not a capable Intel processor\n", cpu_num);
return -1;
}
csig->sig = cpuid_eax(0x00000001); csig->sig = cpuid_eax(0x00000001);
if ((c->x86_model >= 5) || (c->x86 > 6)) { if ((c->x86_model >= 5) || (c->x86 > 6)) {
...@@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = { ...@@ -463,6 +457,14 @@ static struct microcode_ops microcode_intel_ops = {
struct microcode_ops * __init init_intel_microcode(void) struct microcode_ops * __init init_intel_microcode(void)
{ {
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
cpu_has(c, X86_FEATURE_IA64)) {
pr_err("Intel CPU family 0x%x not supported\n", c->x86);
return NULL;
}
return &microcode_intel_ops; return &microcode_intel_ops;
} }
...@@ -179,6 +179,7 @@ enum { ...@@ -179,6 +179,7 @@ enum {
TRACE_EVENT_FL_RECORDED_CMD_BIT, TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
}; };
enum { enum {
...@@ -187,6 +188,7 @@ enum { ...@@ -187,6 +188,7 @@ enum {
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
}; };
struct ftrace_event_call { struct ftrace_event_call {
......
...@@ -6382,6 +6382,8 @@ static int __sdt_alloc(const struct cpumask *cpu_map) ...@@ -6382,6 +6382,8 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
if (!sg) if (!sg)
return -ENOMEM; return -ENOMEM;
sg->next = sg;
*per_cpu_ptr(sdd->sg, j) = sg; *per_cpu_ptr(sdd->sg, j) = sg;
sgp = kzalloc_node(sizeof(struct sched_group_power), sgp = kzalloc_node(sizeof(struct sched_group_power),
......
...@@ -294,6 +294,9 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, ...@@ -294,6 +294,9 @@ static int __ftrace_set_clr_event(const char *match, const char *sub,
if (!call->name || !call->class || !call->class->reg) if (!call->name || !call->class || !call->class->reg)
continue; continue;
if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
continue;
if (match && if (match &&
strcmp(match, call->name) != 0 && strcmp(match, call->name) != 0 &&
strcmp(match, call->class->system) != 0) strcmp(match, call->class->system) != 0)
...@@ -1164,7 +1167,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, ...@@ -1164,7 +1167,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
return -1; return -1;
} }
if (call->class->reg) if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
trace_create_file("enable", 0644, call->dir, call, trace_create_file("enable", 0644, call->dir, call,
enable); enable);
......
...@@ -180,6 +180,7 @@ struct ftrace_event_call __used event_##call = { \ ...@@ -180,6 +180,7 @@ struct ftrace_event_call __used event_##call = { \
.event.type = etype, \ .event.type = etype, \
.class = &event_class_ftrace_##call, \ .class = &event_class_ftrace_##call, \
.print_fmt = print, \ .print_fmt = print, \
.flags = TRACE_EVENT_FL_IGNORE_ENABLE, \
}; \ }; \
struct ftrace_event_call __used \ struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
......
...@@ -774,10 +774,10 @@ $(OUTPUT)perf.o perf.spec \ ...@@ -774,10 +774,10 @@ $(OUTPUT)perf.o perf.spec \
# over the general rule for .o # over the general rule for .o
$(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $< $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $<
$(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -Wno-redundant-decls -Wno-switch-default -Wno-unused-function $< $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $<
$(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $<
......
...@@ -283,6 +283,8 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, ...@@ -283,6 +283,8 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
{ {
struct perf_event_attr *attr = &evsel->attr; struct perf_event_attr *attr = &evsel->attr;
struct xyarray *group_fd = NULL; struct xyarray *group_fd = NULL;
bool exclude_guest_missing = false;
int ret;
if (group && evsel != first) if (group && evsel != first)
group_fd = first->fd; group_fd = first->fd;
...@@ -293,16 +295,39 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, ...@@ -293,16 +295,39 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
attr->inherit = !no_inherit; attr->inherit = !no_inherit;
if (system_wide) retry:
return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, if (exclude_guest_missing)
evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
if (system_wide) {
ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
group, group_fd); group, group_fd);
if (ret)
goto check_ret;
return 0;
}
if (!target_pid && !target_tid && (!group || evsel == first)) { if (!target_pid && !target_tid && (!group || evsel == first)) {
attr->disabled = 1; attr->disabled = 1;
attr->enable_on_exec = 1; attr->enable_on_exec = 1;
} }
return perf_evsel__open_per_thread(evsel, evsel_list->threads, ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
group, group_fd); group, group_fd);
if (!ret)
return 0;
/* fall through */
check_ret:
if (ret && errno == EINVAL) {
if (!exclude_guest_missing &&
(evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
pr_debug("Old kernel, cannot exclude "
"guest or host samples.\n");
exclude_guest_missing = true;
goto retry;
}
}
return ret;
} }
/* /*
...@@ -463,8 +488,13 @@ static int run_perf_stat(int argc __used, const char **argv) ...@@ -463,8 +488,13 @@ static int run_perf_stat(int argc __used, const char **argv)
list_for_each_entry(counter, &evsel_list->entries, node) { list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) { if (create_perf_stat_counter(counter, first) < 0) {
/*
* PPC returns ENXIO for HW counters until 2.6.37
* (behavior changed with commit b0a873e).
*/
if (errno == EINVAL || errno == ENOSYS || if (errno == EINVAL || errno == ENOSYS ||
errno == ENOENT || errno == EOPNOTSUPP) { errno == ENOENT || errno == EOPNOTSUPP ||
errno == ENXIO) {
if (verbose) if (verbose)
ui__warning("%s event is not supported by the kernel.\n", ui__warning("%s event is not supported by the kernel.\n",
event_name(counter)); event_name(counter));
......
...@@ -296,7 +296,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, ...@@ -296,7 +296,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
if (mkdir_p(filename, 0755)) if (mkdir_p(filename, 0755))
goto out_free; goto out_free;
snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id); snprintf(filename + len, size - len, "/%s", sbuild_id);
if (access(filename, F_OK)) { if (access(filename, F_OK)) {
if (is_kallsyms) { if (is_kallsyms) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment