Commit 29d14f08 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Thomas Gleixner:
 "This is much bigger than typical fixes, but Peter found a category of
  races that spurred more fixes and more debugging enhancements.  Work
  started before the merge window, but got finished only now.

  Aside of that this contains the usual small fixes to perf and tools.
  Nothing particular exciting"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (43 commits)
  perf: Remove/simplify lockdep annotation
  perf: Synchronously clean up child events
  perf: Untangle 'owner' confusion
  perf: Add flags argument to perf_remove_from_context()
  perf: Clean up sync_child_event()
  perf: Robustify event->owner usage and SMP ordering
  perf: Fix STATE_EXIT usage
  perf: Update locking order
  perf: Remove __free_event()
  perf/bpf: Convert perf_event_array to use struct file
  perf: Fix NULL deref
  perf/x86: De-obfuscate code
  perf/x86: Fix uninitialized value usage
  perf: Fix race in perf_event_exit_task_context()
  perf: Fix orphan hole
  perf stat: Do not clean event's private stats
  perf hists: Fix HISTC_MEM_DCACHELINE width setting
  perf annotate browser: Fix behaviour of Shift-Tab with nothing focussed
  perf tests: Remove wrong semicolon in while loop in CQM test
  perf: Synchronously free aux pages in case of allocation failure
  ...
parents bbfb239a 28fb8a5b
......@@ -1960,7 +1960,8 @@ intel_bts_constraints(struct perf_event *event)
static int intel_alt_er(int idx, u64 config)
{
int alt_idx;
int alt_idx = idx;
if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
return idx;
......@@ -2897,14 +2898,12 @@ static void intel_pmu_cpu_starting(int cpu)
return;
if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
struct intel_shared_regs *pc;
pc = per_cpu(cpu_hw_events, i).shared_regs;
if (pc && pc->core_id == core_id) {
*onln = cpuc->shared_regs;
cpuc->kfree_on_online[0] = cpuc->shared_regs;
cpuc->shared_regs = pc;
break;
}
......
......@@ -995,6 +995,9 @@ static int __init uncore_pci_init(void)
case 87: /* Knights Landing */
ret = knl_uncore_pci_init();
break;
case 94: /* SkyLake */
ret = skl_uncore_pci_init();
break;
default:
return 0;
}
......
......@@ -336,6 +336,7 @@ int snb_uncore_pci_init(void);
int ivb_uncore_pci_init(void);
int hsw_uncore_pci_init(void);
int bdw_uncore_pci_init(void);
int skl_uncore_pci_init(void);
void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void);
int snb_pci2phy_map_init(int devid);
......
......@@ -8,6 +8,7 @@
#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
......@@ -524,6 +525,14 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
{ /* end: all zeroes */ },
};
static const struct pci_device_id skl_uncore_pci_ids[] = {
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* end: all zeroes */ },
};
static struct pci_driver snb_uncore_pci_driver = {
.name = "snb_uncore",
.id_table = snb_uncore_pci_ids,
......@@ -544,6 +553,11 @@ static struct pci_driver bdw_uncore_pci_driver = {
.id_table = bdw_uncore_pci_ids,
};
static struct pci_driver skl_uncore_pci_driver = {
.name = "skl_uncore",
.id_table = skl_uncore_pci_ids,
};
struct imc_uncore_pci_dev {
__u32 pci_id;
struct pci_driver *driver;
......@@ -558,6 +572,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */
{ /* end marker */ }
};
......@@ -610,6 +625,11 @@ int bdw_uncore_pci_init(void)
return imc_uncore_pci_init();
}
int skl_uncore_pci_init(void)
{
return imc_uncore_pci_init();
}
/* end of Sandy Bridge uncore support */
/* Nehalem uncore support */
......
......@@ -634,9 +634,6 @@ struct perf_event_context {
int nr_cgroups; /* cgroup evts */
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
struct delayed_work orphans_remove;
bool orphans_remove_sched;
};
/*
......@@ -729,7 +726,7 @@ extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
extern struct perf_event *perf_event_get(unsigned int fd);
extern struct file *perf_event_get(unsigned int fd);
extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
......@@ -1044,7 +1041,7 @@ extern void perf_swevent_put_recursion_context(int rctx);
extern u64 perf_swevent_set_period(struct perf_event *event);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
extern int __perf_event_disable(void *info);
extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
......@@ -1070,7 +1067,7 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{
return ERR_PTR(-EINVAL);
......
......@@ -291,10 +291,13 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
{
struct perf_event *event;
const struct perf_event_attr *attr;
struct file *file;
event = perf_event_get(fd);
if (IS_ERR(event))
return event;
file = perf_event_get(fd);
if (IS_ERR(file))
return file;
event = file->private_data;
attr = perf_event_attrs(event);
if (IS_ERR(attr))
......@@ -304,24 +307,22 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
goto err;
if (attr->type == PERF_TYPE_RAW)
return event;
return file;
if (attr->type == PERF_TYPE_HARDWARE)
return event;
return file;
if (attr->type == PERF_TYPE_SOFTWARE &&
attr->config == PERF_COUNT_SW_BPF_OUTPUT)
return event;
return file;
err:
perf_event_release_kernel(event);
fput(file);
return ERR_PTR(-EINVAL);
}
static void perf_event_fd_array_put_ptr(void *ptr)
{
struct perf_event *event = ptr;
perf_event_release_kernel(event);
fput((struct file *)ptr);
}
static const struct bpf_map_ops perf_event_array_ops = {
......
This diff is collapsed.
......@@ -444,7 +444,7 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
* current task.
*/
if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
__perf_event_disable(bp);
perf_event_disable_local(bp);
else
perf_event_disable(bp);
......
......@@ -459,6 +459,25 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
__free_page(page);
}
static void __rb_free_aux(struct ring_buffer *rb)
{
int pg;
if (rb->aux_priv) {
rb->free_aux(rb->aux_priv);
rb->free_aux = NULL;
rb->aux_priv = NULL;
}
if (rb->aux_nr_pages) {
for (pg = 0; pg < rb->aux_nr_pages; pg++)
rb_free_aux_page(rb, pg);
kfree(rb->aux_pages);
rb->aux_nr_pages = 0;
}
}
int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
pgoff_t pgoff, int nr_pages, long watermark, int flags)
{
......@@ -547,30 +566,11 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
if (!ret)
rb->aux_pgoff = pgoff;
else
rb_free_aux(rb);
__rb_free_aux(rb);
return ret;
}
static void __rb_free_aux(struct ring_buffer *rb)
{
int pg;
if (rb->aux_priv) {
rb->free_aux(rb->aux_priv);
rb->free_aux = NULL;
rb->aux_priv = NULL;
}
if (rb->aux_nr_pages) {
for (pg = 0; pg < rb->aux_nr_pages; pg++)
rb_free_aux_page(rb, pg);
kfree(rb->aux_pages);
rb->aux_nr_pages = 0;
}
}
void rb_free_aux(struct ring_buffer *rb)
{
if (atomic_dec_and_test(&rb->aux_refcount))
......
......@@ -191,14 +191,17 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct perf_event *event;
struct file *file;
if (unlikely(index >= array->map.max_entries))
return -E2BIG;
event = (struct perf_event *)array->ptrs[index];
if (!event)
file = (struct file *)array->ptrs[index];
if (unlikely(!file))
return -ENOENT;
event = file->private_data;
/* make sure event is local and doesn't have pmu::count */
if (event->oncpu != smp_processor_id() ||
event->pmu->count)
......@@ -228,6 +231,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
void *data = (void *) (long) r4;
struct perf_sample_data sample_data;
struct perf_event *event;
struct file *file;
struct perf_raw_record raw = {
.size = size,
.data = data,
......@@ -236,10 +240,12 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
if (unlikely(index >= array->map.max_entries))
return -E2BIG;
event = (struct perf_event *)array->ptrs[index];
if (unlikely(!event))
file = (struct file *)array->ptrs[index];
if (unlikely(!file))
return -ENOENT;
event = file->private_data;
if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
return -EINVAL;
......
......@@ -77,6 +77,9 @@ include config/utilities.mak
# Define NO_AUXTRACE if you do not want AUX area tracing support
#
# Define NO_LIBBPF if you do not want BPF support
#
# Define FEATURES_DUMP to provide features detection dump file
# and bypass the feature detection
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
......@@ -166,6 +169,15 @@ ifeq ($(config),1)
include config/Makefile
endif
# The FEATURE_DUMP_EXPORT holds location of the actual
# FEATURE_DUMP file to be used to bypass feature detection
# (for bpf or any other subproject)
ifeq ($(FEATURES_DUMP),)
FEATURE_DUMP_EXPORT := $(realpath $(OUTPUT)FEATURE-DUMP)
else
FEATURE_DUMP_EXPORT := $(FEATURES_DUMP)
endif
export prefix bindir sharedir sysconfdir DESTDIR
# sparse is architecture-neutral, which means that we need to tell it
......@@ -436,7 +448,7 @@ $(LIBAPI)-clean:
$(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
$(LIBBPF): fixdep FORCE
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(realpath $(OUTPUT)FEATURE-DUMP)
$(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
$(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
......@@ -610,6 +622,17 @@ clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean
$(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
$(python-clean)
#
# To provide FEATURE-DUMP into $(FEATURE_DUMP_COPY)
# file if defined, with no further action.
feature-dump:
ifdef FEATURE_DUMP_COPY
@cp $(OUTPUT)FEATURE-DUMP $(FEATURE_DUMP_COPY)
@echo "FEATURE-DUMP file copied into $(FEATURE_DUMP_COPY)"
else
@echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP"
endif
#
# Trick: if ../../.git does not exist - we are building out of tree for example,
# then force version regeneration:
......
......@@ -17,7 +17,7 @@ static pid_t spawn(void)
if (pid)
return pid;
while(1);
while(1)
sleep(5);
return 0;
}
......
......@@ -181,7 +181,11 @@ LDFLAGS += -Wl,-z,noexecstack
EXTLIBS = -lpthread -lrt -lm -ldl
ifeq ($(FEATURES_DUMP),)
include $(srctree)/tools/build/Makefile.feature
else
include $(FEATURES_DUMP)
endif
ifeq ($(feature-stackprotector-all), 1)
CFLAGS += -fstack-protector-all
......
......@@ -5,7 +5,7 @@ ifeq ($(MAKECMDGOALS),)
# no target specified, trigger the whole suite
all:
@echo "Testing Makefile"; $(MAKE) -sf tests/make MK=Makefile
@echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf
@echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf SET_PARALLEL=1 SET_O=1
else
# run only specific test over 'Makefile'
%:
......@@ -13,6 +13,26 @@ else
endif
else
PERF := .
PERF_O := $(PERF)
O_OPT :=
ifneq ($(O),)
FULL_O := $(shell readlink -f $(O) || echo $(O))
PERF_O := $(FULL_O)
ifeq ($(SET_O),1)
O_OPT := 'O=$(FULL_O)'
endif
K_O_OPT := 'O=$(FULL_O)'
endif
PARALLEL_OPT=
ifeq ($(SET_PARALLEL),1)
cores := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
ifeq ($(cores),0)
cores := 1
endif
PARALLEL_OPT="-j$(cores)"
endif
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
......@@ -156,11 +176,11 @@ test_make_doc := $(test_ok)
test_make_help_O := $(test_ok)
test_make_doc_O := $(test_ok)
test_make_python_perf_so := test -f $(PERF)/python/perf.so
test_make_python_perf_so := test -f $(PERF_O)/python/perf.so
test_make_perf_o := test -f $(PERF)/perf.o
test_make_util_map_o := test -f $(PERF)/util/map.o
test_make_util_pmu_bison_o := test -f $(PERF)/util/pmu-bison.o
test_make_perf_o := test -f $(PERF_O)/perf.o
test_make_util_map_o := test -f $(PERF_O)/util/map.o
test_make_util_pmu_bison_o := test -f $(PERF_O)/util/pmu-bison.o
define test_dest_files
for file in $(1); do \
......@@ -227,7 +247,7 @@ test_make_perf_o_O := test -f $$TMP_O/perf.o
test_make_util_map_o_O := test -f $$TMP_O/util/map.o
test_make_util_pmu_bison_o_O := test -f $$TMP_O/util/pmu-bison.o
test_default = test -x $(PERF)/perf
test_default = test -x $(PERF_O)/perf
test = $(if $(test_$1),$(test_$1),$(test_default))
test_default_O = test -x $$TMP_O/perf
......@@ -247,12 +267,12 @@ endif
MAKEFLAGS := --no-print-directory
clean := @(cd $(PERF); make -s -f $(MK) clean >/dev/null)
clean := @(cd $(PERF); make -s -f $(MK) $(O_OPT) clean >/dev/null)
$(run):
$(call clean)
@TMP_DEST=$$(mktemp -d); \
cmd="cd $(PERF) && make -f $(MK) DESTDIR=$$TMP_DEST $($@)"; \
cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST $($@)"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1; \
echo " test: $(call test,$@)" >> $@ 2>&1; \
......@@ -263,7 +283,7 @@ $(run_O):
$(call clean)
@TMP_O=$$(mktemp -d); \
TMP_DEST=$$(mktemp -d); \
cmd="cd $(PERF) && make -f $(MK) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
cmd="cd $(PERF) && make -f $(MK) $(PARALLEL_OPT) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1 && \
echo " test: $(call test_O,$@)" >> $@ 2>&1; \
......@@ -276,17 +296,22 @@ tarpkg:
( eval $$cmd ) >> $@ 2>&1 && \
rm -f $@
KERNEL_O := ../..
ifneq ($(O),)
KERNEL_O := $(O)
endif
make_kernelsrc:
@echo "- make -C <kernelsrc> tools/perf"
@echo "- make -C <kernelsrc> $(PARALLEL_OPT) $(K_O_OPT) tools/perf"
$(call clean); \
(make -C ../.. tools/perf) > $@ 2>&1 && \
test -x perf && rm -f $@ || (cat $@ ; false)
(make -C ../.. $(PARALLEL_OPT) $(K_O_OPT) tools/perf) > $@ 2>&1 && \
test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
make_kernelsrc_tools:
@echo "- make -C <kernelsrc>/tools perf"
@echo "- make -C <kernelsrc>/tools $(PARALLEL_OPT) $(K_O_OPT) perf"
$(call clean); \
(make -C ../../tools perf) > $@ 2>&1 && \
test -x perf && rm -f $@ || (cat $@ ; false)
(make -C ../../tools $(PARALLEL_OPT) $(K_O_OPT) perf) > $@ 2>&1 && \
test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools
@echo OK
......
......@@ -755,11 +755,11 @@ static int annotate_browser__run(struct annotate_browser *browser,
nd = browser->curr_hot;
break;
case K_UNTAB:
if (nd != NULL)
if (nd != NULL) {
nd = rb_next(nd);
if (nd == NULL)
nd = rb_first(&browser->entries);
else
} else
nd = browser->curr_hot;
break;
case K_F1:
......
......@@ -131,6 +131,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
symlen);
hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
symlen);
}
if (h->mem_info->iaddr.sym) {
......
......@@ -1149,7 +1149,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
machine = machines__find(machines, pid);
if (!machine)
machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
return machine;
}
......
......@@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
int i, ret;
aggr->val = aggr->ena = aggr->run = 0;
init_stats(ps->res_stats);
if (counter->per_pkg)
zero_per_pkg(counter);
......
......@@ -1466,7 +1466,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
* Read the build id if possible. This is required for
* DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
*/
if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0)
if (filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
dso__set_build_id(dso, build_id);
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment