Commit 6751b8d9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "On the kernel side there's a bunch of ring-buffer ordering fixes for a
  reproducible bug, plus a PEBS constraints regression fix.

  Plus tooling fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  tools headers UAPI: Sync kvm.h headers with the kernel sources
  perf record: Fix s390 missing module symbol and warning for non-root users
  perf machine: Read also the end of the kernel
  perf test vmlinux-kallsyms: Ignore aliases to _etext when searching on kallsyms
  perf session: Add missing swap ops for namespace events
  perf namespace: Protect reading thread's namespace
  tools headers UAPI: Sync drm/drm.h with the kernel
  tools headers UAPI: Sync drm/i915_drm.h with the kernel
  tools headers UAPI: Sync linux/fs.h with the kernel
  tools headers UAPI: Sync linux/sched.h with the kernel
  tools arch x86: Sync asm/cpufeatures.h with the with the kernel
  tools include UAPI: Update copy of files related to new fspick, fsmount, fsconfig, fsopen, move_mount and open_tree syscalls
  perf arm64: Fix mksyscalltbl when system kernel headers are ahead of the kernel
  perf data: Fix 'strncat may truncate' build failure with recent gcc
  perf/ring-buffer: Use regular variables for nesting
  perf/ring-buffer: Always use {READ,WRITE}_ONCE() for rb->user_page data
  perf/ring_buffer: Add ordering to rb->nest increment
  perf/ring_buffer: Fix exposing a temporarily decreased data_head
  perf/x86/intel/ds: Fix EVENT vs. UEVENT PEBS constraints
parents af042452 849e96f3
...@@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = { ...@@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
}; };
...@@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { ...@@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
/* Allow all events as PEBS with no flags */ /* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
...@@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { ...@@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
struct event_constraint intel_slm_pebs_event_constraints[] = { struct event_constraint intel_slm_pebs_event_constraints[] = {
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
/* Allow all events as PEBS with no flags */ /* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
...@@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = { ...@@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
}; };
...@@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = { ...@@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */ INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END EVENT_CONSTRAINT_END
}; };
...@@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { ...@@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
...@@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = { ...@@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */ INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */ INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
...@@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = { ...@@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
...@@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = { ...@@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */ INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
...@@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = { ...@@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
struct event_constraint intel_skl_pebs_event_constraints[] = { struct event_constraint intel_skl_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */ /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */ /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f), INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */ INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
......
...@@ -24,7 +24,7 @@ struct ring_buffer { ...@@ -24,7 +24,7 @@ struct ring_buffer {
atomic_t poll; /* POLL_ for wakeups */ atomic_t poll; /* POLL_ for wakeups */
local_t head; /* write position */ local_t head; /* write position */
local_t nest; /* nested writers */ unsigned int nest; /* nested writers */
local_t events; /* event limit */ local_t events; /* event limit */
local_t wakeup; /* wakeup stamp */ local_t wakeup; /* wakeup stamp */
local_t lost; /* nr records lost */ local_t lost; /* nr records lost */
...@@ -41,7 +41,7 @@ struct ring_buffer { ...@@ -41,7 +41,7 @@ struct ring_buffer {
/* AUX area */ /* AUX area */
long aux_head; long aux_head;
local_t aux_nest; unsigned int aux_nest;
long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */ long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
unsigned long aux_pgoff; unsigned long aux_pgoff;
int aux_nr_pages; int aux_nr_pages;
......
...@@ -38,7 +38,12 @@ static void perf_output_get_handle(struct perf_output_handle *handle) ...@@ -38,7 +38,12 @@ static void perf_output_get_handle(struct perf_output_handle *handle)
struct ring_buffer *rb = handle->rb; struct ring_buffer *rb = handle->rb;
preempt_disable(); preempt_disable();
local_inc(&rb->nest);
/*
* Avoid an explicit LOAD/STORE such that architectures with memops
* can use them.
*/
(*(volatile unsigned int *)&rb->nest)++;
handle->wakeup = local_read(&rb->wakeup); handle->wakeup = local_read(&rb->wakeup);
} }
...@@ -46,17 +51,35 @@ static void perf_output_put_handle(struct perf_output_handle *handle) ...@@ -46,17 +51,35 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
{ {
struct ring_buffer *rb = handle->rb; struct ring_buffer *rb = handle->rb;
unsigned long head; unsigned long head;
unsigned int nest;
/*
* If this isn't the outermost nesting, we don't have to update
* @rb->user_page->data_head.
*/
nest = READ_ONCE(rb->nest);
if (nest > 1) {
WRITE_ONCE(rb->nest, nest - 1);
goto out;
}
again: again:
/*
* In order to avoid publishing a head value that goes backwards,
* we must ensure the load of @rb->head happens after we've
* incremented @rb->nest.
*
* Otherwise we can observe a @rb->head value before one published
* by an IRQ/NMI happening between the load and the increment.
*/
barrier();
head = local_read(&rb->head); head = local_read(&rb->head);
/* /*
* IRQ/NMI can happen here, which means we can miss a head update. * IRQ/NMI can happen here and advance @rb->head, causing our
* load above to be stale.
*/ */
if (!local_dec_and_test(&rb->nest))
goto out;
/* /*
* Since the mmap() consumer (userspace) can run on a different CPU: * Since the mmap() consumer (userspace) can run on a different CPU:
* *
...@@ -84,14 +107,23 @@ static void perf_output_put_handle(struct perf_output_handle *handle) ...@@ -84,14 +107,23 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
* See perf_output_begin(). * See perf_output_begin().
*/ */
smp_wmb(); /* B, matches C */ smp_wmb(); /* B, matches C */
rb->user_page->data_head = head; WRITE_ONCE(rb->user_page->data_head, head);
/* /*
* Now check if we missed an update -- rely on previous implied * We must publish the head before decrementing the nest count,
* compiler barriers to force a re-read. * otherwise an IRQ/NMI can publish a more recent head value and our
* write will (temporarily) publish a stale value.
*/ */
barrier();
WRITE_ONCE(rb->nest, 0);
/*
* Ensure we decrement @rb->nest before we validate the @rb->head.
* Otherwise we cannot be sure we caught the 'last' nested update.
*/
barrier();
if (unlikely(head != local_read(&rb->head))) { if (unlikely(head != local_read(&rb->head))) {
local_inc(&rb->nest); WRITE_ONCE(rb->nest, 1);
goto again; goto again;
} }
...@@ -330,6 +362,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, ...@@ -330,6 +362,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *output_event = event; struct perf_event *output_event = event;
unsigned long aux_head, aux_tail; unsigned long aux_head, aux_tail;
struct ring_buffer *rb; struct ring_buffer *rb;
unsigned int nest;
if (output_event->parent) if (output_event->parent)
output_event = output_event->parent; output_event = output_event->parent;
...@@ -360,13 +393,16 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, ...@@ -360,13 +393,16 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
if (!refcount_inc_not_zero(&rb->aux_refcount)) if (!refcount_inc_not_zero(&rb->aux_refcount))
goto err; goto err;
nest = READ_ONCE(rb->aux_nest);
/* /*
* Nesting is not supported for AUX area, make sure nested * Nesting is not supported for AUX area, make sure nested
* writers are caught early * writers are caught early
*/ */
if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1))) if (WARN_ON_ONCE(nest))
goto err_put; goto err_put;
WRITE_ONCE(rb->aux_nest, nest + 1);
aux_head = rb->aux_head; aux_head = rb->aux_head;
handle->rb = rb; handle->rb = rb;
...@@ -394,7 +430,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle, ...@@ -394,7 +430,7 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
if (!handle->size) { /* A, matches D */ if (!handle->size) { /* A, matches D */
event->pending_disable = smp_processor_id(); event->pending_disable = smp_processor_id();
perf_output_wakeup(handle); perf_output_wakeup(handle);
local_set(&rb->aux_nest, 0); WRITE_ONCE(rb->aux_nest, 0);
goto err_put; goto err_put;
} }
} }
...@@ -471,7 +507,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) ...@@ -471,7 +507,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
perf_event_aux_event(handle->event, aux_head, size, perf_event_aux_event(handle->event, aux_head, size,
handle->aux_flags); handle->aux_flags);
rb->user_page->aux_head = rb->aux_head; WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
if (rb_need_aux_wakeup(rb)) if (rb_need_aux_wakeup(rb))
wakeup = true; wakeup = true;
...@@ -483,7 +519,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size) ...@@ -483,7 +519,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
handle->event = NULL; handle->event = NULL;
local_set(&rb->aux_nest, 0); WRITE_ONCE(rb->aux_nest, 0);
/* can't be last */ /* can't be last */
rb_free_aux(rb); rb_free_aux(rb);
ring_buffer_put(rb); ring_buffer_put(rb);
...@@ -503,7 +539,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size) ...@@ -503,7 +539,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
rb->aux_head += size; rb->aux_head += size;
rb->user_page->aux_head = rb->aux_head; WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
if (rb_need_aux_wakeup(rb)) { if (rb_need_aux_wakeup(rb)) {
perf_output_wakeup(handle); perf_output_wakeup(handle);
handle->wakeup = rb->aux_wakeup + rb->aux_watermark; handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/psci.h> #include <linux/psci.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sve_context.h>
#define __KVM_HAVE_GUEST_DEBUG #define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_IRQ_LINE
...@@ -102,6 +103,9 @@ struct kvm_regs { ...@@ -102,6 +103,9 @@ struct kvm_regs {
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ #define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */
#define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
struct kvm_vcpu_init { struct kvm_vcpu_init {
__u32 target; __u32 target;
...@@ -226,6 +230,45 @@ struct kvm_vcpu_events { ...@@ -226,6 +230,45 @@ struct kvm_vcpu_events {
KVM_REG_ARM_FW | ((r) & 0xffff)) KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
/* SVE registers */
#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
/* Z- and P-regs occupy blocks at the following offsets within this range: */
#define KVM_REG_ARM64_SVE_ZREG_BASE 0
#define KVM_REG_ARM64_SVE_PREG_BASE 0x400
#define KVM_REG_ARM64_SVE_FFR_BASE 0x600
#define KVM_ARM64_SVE_NUM_ZREGS __SVE_NUM_ZREGS
#define KVM_ARM64_SVE_NUM_PREGS __SVE_NUM_PREGS
#define KVM_ARM64_SVE_MAX_SLICES 32
#define KVM_REG_ARM64_SVE_ZREG(n, i) \
(KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_ZREG_BASE | \
KVM_REG_SIZE_U2048 | \
(((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) | \
((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
#define KVM_REG_ARM64_SVE_PREG(n, i) \
(KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_PREG_BASE | \
KVM_REG_SIZE_U256 | \
(((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) | \
((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
#define KVM_REG_ARM64_SVE_FFR(i) \
(KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_FFR_BASE | \
KVM_REG_SIZE_U256 | \
((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
/* Vector lengths pseudo-register: */
#define KVM_REG_ARM64_SVE_VLS (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \
KVM_REG_SIZE_U512 | 0xffff)
#define KVM_ARM64_SVE_VLS_WORDS \
((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1)
/* Device Control API: ARM VGIC */ /* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
......
...@@ -482,6 +482,8 @@ struct kvm_ppc_cpu_char { ...@@ -482,6 +482,8 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */ #define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */
#define KVM_REG_PPC_ICP_PPRI_MASK 0xff #define KVM_REG_PPC_ICP_PPRI_MASK 0xff
#define KVM_REG_PPC_VP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x8d)
/* Device control API: PPC-specific devices */ /* Device control API: PPC-specific devices */
#define KVM_DEV_MPIC_GRP_MISC 1 #define KVM_DEV_MPIC_GRP_MISC 1
#define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */ #define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */
...@@ -677,4 +679,48 @@ struct kvm_ppc_cpu_char { ...@@ -677,4 +679,48 @@ struct kvm_ppc_cpu_char {
#define KVM_XICS_PRESENTED (1ULL << 43) #define KVM_XICS_PRESENTED (1ULL << 43)
#define KVM_XICS_QUEUED (1ULL << 44) #define KVM_XICS_QUEUED (1ULL << 44)
/* POWER9 XIVE Native Interrupt Controller */
#define KVM_DEV_XIVE_GRP_CTRL 1
#define KVM_DEV_XIVE_RESET 1
#define KVM_DEV_XIVE_EQ_SYNC 2
#define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */
#define KVM_DEV_XIVE_GRP_SOURCE_SYNC 5 /* 64-bit source identifier */
/* Layout of 64-bit XIVE source attribute values */
#define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0)
#define KVM_XIVE_LEVEL_ASSERTED (1ULL << 1)
/* Layout of 64-bit XIVE source configuration attribute values */
#define KVM_XIVE_SOURCE_PRIORITY_SHIFT 0
#define KVM_XIVE_SOURCE_PRIORITY_MASK 0x7
#define KVM_XIVE_SOURCE_SERVER_SHIFT 3
#define KVM_XIVE_SOURCE_SERVER_MASK 0xfffffff8ULL
#define KVM_XIVE_SOURCE_MASKED_SHIFT 32
#define KVM_XIVE_SOURCE_MASKED_MASK 0x100000000ULL
#define KVM_XIVE_SOURCE_EISN_SHIFT 33
#define KVM_XIVE_SOURCE_EISN_MASK 0xfffffffe00000000ULL
/* Layout of 64-bit EQ identifier */
#define KVM_XIVE_EQ_PRIORITY_SHIFT 0
#define KVM_XIVE_EQ_PRIORITY_MASK 0x7
#define KVM_XIVE_EQ_SERVER_SHIFT 3
#define KVM_XIVE_EQ_SERVER_MASK 0xfffffff8ULL
/* Layout of EQ configuration values (64 bytes) */
struct kvm_ppc_xive_eq {
__u32 flags;
__u32 qshift;
__u64 qaddr;
__u32 qtoggle;
__u32 qindex;
__u8 pad[40];
};
#define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001
#define KVM_XIVE_TIMA_PAGE_OFFSET 0
#define KVM_XIVE_ESB_PAGE_OFFSET 4
#endif /* __LINUX_KVM_POWERPC_H */ #endif /* __LINUX_KVM_POWERPC_H */
...@@ -153,7 +153,9 @@ struct kvm_s390_vm_cpu_subfunc { ...@@ -153,7 +153,9 @@ struct kvm_s390_vm_cpu_subfunc {
__u8 ppno[16]; /* with MSA5 */ __u8 ppno[16]; /* with MSA5 */
__u8 kma[16]; /* with MSA8 */ __u8 kma[16]; /* with MSA8 */
__u8 kdsa[16]; /* with MSA9 */ __u8 kdsa[16]; /* with MSA9 */
__u8 reserved[1792]; __u8 sortl[32]; /* with STFLE.150 */
__u8 dfltcc[32]; /* with STFLE.151 */
__u8 reserved[1728];
}; };
/* kvm attributes for crypto */ /* kvm attributes for crypto */
......
...@@ -344,6 +344,7 @@ ...@@ -344,6 +344,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
...@@ -382,5 +383,7 @@ ...@@ -382,5 +383,7 @@
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
#endif /* _ASM_X86_CPUFEATURES_H */ #endif /* _ASM_X86_CPUFEATURES_H */
...@@ -832,9 +832,21 @@ __SYSCALL(__NR_io_uring_setup, sys_io_uring_setup) ...@@ -832,9 +832,21 @@ __SYSCALL(__NR_io_uring_setup, sys_io_uring_setup)
__SYSCALL(__NR_io_uring_enter, sys_io_uring_enter) __SYSCALL(__NR_io_uring_enter, sys_io_uring_enter)
#define __NR_io_uring_register 427 #define __NR_io_uring_register 427
__SYSCALL(__NR_io_uring_register, sys_io_uring_register) __SYSCALL(__NR_io_uring_register, sys_io_uring_register)
#define __NR_open_tree 428
__SYSCALL(__NR_open_tree, sys_open_tree)
#define __NR_move_mount 429
__SYSCALL(__NR_move_mount, sys_move_mount)
#define __NR_fsopen 430
__SYSCALL(__NR_fsopen, sys_fsopen)
#define __NR_fsconfig 431
__SYSCALL(__NR_fsconfig, sys_fsconfig)
#define __NR_fsmount 432
__SYSCALL(__NR_fsmount, sys_fsmount)
#define __NR_fspick 433
__SYSCALL(__NR_fspick, sys_fspick)
#undef __NR_syscalls #undef __NR_syscalls
#define __NR_syscalls 428 #define __NR_syscalls 434
/* /*
* 32 bit systems traditionally used different * 32 bit systems traditionally used different
......
...@@ -649,6 +649,7 @@ struct drm_gem_open { ...@@ -649,6 +649,7 @@ struct drm_gem_open {
#define DRM_CAP_PAGE_FLIP_TARGET 0x11 #define DRM_CAP_PAGE_FLIP_TARGET 0x11
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
#define DRM_CAP_SYNCOBJ 0x13 #define DRM_CAP_SYNCOBJ 0x13
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/** DRM_IOCTL_GET_CAP ioctl argument type */ /** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap { struct drm_get_cap {
...@@ -735,8 +736,18 @@ struct drm_syncobj_handle { ...@@ -735,8 +736,18 @@ struct drm_syncobj_handle {
__u32 pad; __u32 pad;
}; };
struct drm_syncobj_transfer {
__u32 src_handle;
__u32 dst_handle;
__u64 src_point;
__u64 dst_point;
__u32 flags;
__u32 pad;
};
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
struct drm_syncobj_wait { struct drm_syncobj_wait {
__u64 handles; __u64 handles;
/* absolute timeout */ /* absolute timeout */
...@@ -747,12 +758,33 @@ struct drm_syncobj_wait { ...@@ -747,12 +758,33 @@ struct drm_syncobj_wait {
__u32 pad; __u32 pad;
}; };
struct drm_syncobj_timeline_wait {
__u64 handles;
/* wait on specific timeline point for every handles*/
__u64 points;
/* absolute timeout */
__s64 timeout_nsec;
__u32 count_handles;
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
};
struct drm_syncobj_array { struct drm_syncobj_array {
__u64 handles; __u64 handles;
__u32 count_handles; __u32 count_handles;
__u32 pad; __u32 pad;
}; };
struct drm_syncobj_timeline_array {
__u64 handles;
__u64 points;
__u32 count_handles;
__u32 pad;
};
/* Query current scanout sequence number */ /* Query current scanout sequence number */
struct drm_crtc_get_sequence { struct drm_crtc_get_sequence {
__u32 crtc_id; /* requested crtc_id */ __u32 crtc_id; /* requested crtc_id */
...@@ -909,6 +941,11 @@ extern "C" { ...@@ -909,6 +941,11 @@ extern "C" {
#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease) #define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease) #define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
/** /**
* Device specific ioctls should only be in their respective headers * Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f. * The device specific ioctl range is from 0x40 to 0x9f.
......
...@@ -62,6 +62,28 @@ extern "C" { ...@@ -62,6 +62,28 @@ extern "C" {
#define I915_ERROR_UEVENT "ERROR" #define I915_ERROR_UEVENT "ERROR"
#define I915_RESET_UEVENT "RESET" #define I915_RESET_UEVENT "RESET"
/*
* i915_user_extension: Base class for defining a chain of extensions
*
* Many interfaces need to grow over time. In most cases we can simply
* extend the struct and have userspace pass in more data. Another option,
* as demonstrated by Vulkan's approach to providing extensions for forward
* and backward compatibility, is to use a list of optional structs to
* provide those extra details.
*
* The key advantage to using an extension chain is that it allows us to
* redefine the interface more easily than an ever growing struct of
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
* the __user boundary with pointers encapsulated inside u64.
*/
struct i915_user_extension {
__u64 next_extension;
__u32 name;
__u32 flags; /* All undefined bits must be zero. */
__u32 rsvd[4]; /* Reserved for future use; must be zero. */
};
/* /*
* MOCS indexes used for GPU surfaces, defining the cacheability of the * MOCS indexes used for GPU surfaces, defining the cacheability of the
* surface data and the coherency for this data wrt. CPU vs. GPU accesses. * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
...@@ -99,9 +121,23 @@ enum drm_i915_gem_engine_class { ...@@ -99,9 +121,23 @@ enum drm_i915_gem_engine_class {
I915_ENGINE_CLASS_VIDEO = 2, I915_ENGINE_CLASS_VIDEO = 2,
I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
/* should be kept compact */
I915_ENGINE_CLASS_INVALID = -1 I915_ENGINE_CLASS_INVALID = -1
}; };
/*
* There may be more than one engine fulfilling any role within the system.
* Each engine of a class is given a unique instance number and therefore
* any engine can be specified by its class:instance tuplet. APIs that allow
* access to any engine in the system will use struct i915_engine_class_instance
* for this identification.
*/
struct i915_engine_class_instance {
__u16 engine_class; /* see enum drm_i915_gem_engine_class */
__u16 engine_instance;
};
/** /**
* DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
* *
...@@ -319,6 +355,7 @@ typedef struct _drm_i915_sarea { ...@@ -319,6 +355,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_ADD_CONFIG 0x37 #define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38 #define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_I915_QUERY 0x39 #define DRM_I915_QUERY 0x39
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
...@@ -367,6 +404,7 @@ typedef struct _drm_i915_sarea { ...@@ -367,6 +404,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
...@@ -476,6 +514,7 @@ typedef struct drm_i915_irq_wait { ...@@ -476,6 +514,7 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_ENABLED (1ul << 0) #define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_PARAM_HUC_STATUS 42 #define I915_PARAM_HUC_STATUS 42
...@@ -559,6 +598,8 @@ typedef struct drm_i915_irq_wait { ...@@ -559,6 +598,8 @@ typedef struct drm_i915_irq_wait {
*/ */
#define I915_PARAM_MMAP_GTT_COHERENT 52 #define I915_PARAM_MMAP_GTT_COHERENT 52
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam { typedef struct drm_i915_getparam {
__s32 param; __s32 param;
/* /*
...@@ -574,6 +615,7 @@ typedef struct drm_i915_getparam { ...@@ -574,6 +615,7 @@ typedef struct drm_i915_getparam {
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3
#define I915_SETPARAM_NUM_USED_FENCES 4 #define I915_SETPARAM_NUM_USED_FENCES 4
/* Must be kept compact -- no holes */
typedef struct drm_i915_setparam { typedef struct drm_i915_setparam {
int param; int param;
...@@ -972,7 +1014,7 @@ struct drm_i915_gem_execbuffer2 { ...@@ -972,7 +1014,7 @@ struct drm_i915_gem_execbuffer2 {
* struct drm_i915_gem_exec_fence *fences. * struct drm_i915_gem_exec_fence *fences.
*/ */
__u64 cliprects_ptr; __u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (7<<0) #define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0) #define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0) #define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0) #define I915_EXEC_BSD (2<<0)
...@@ -1120,32 +1162,34 @@ struct drm_i915_gem_busy { ...@@ -1120,32 +1162,34 @@ struct drm_i915_gem_busy {
* as busy may become idle before the ioctl is completed. * as busy may become idle before the ioctl is completed.
* *
* Furthermore, if the object is busy, which engine is busy is only * Furthermore, if the object is busy, which engine is busy is only
* provided as a guide. There are race conditions which prevent the * provided as a guide and only indirectly by reporting its class
* report of which engines are busy from being always accurate. * (there may be more than one engine in each class). There are race
* However, the converse is not true. If the object is idle, the * conditions which prevent the report of which engines are busy from
* result of the ioctl, that all engines are idle, is accurate. * being always accurate. However, the converse is not true. If the
* object is idle, the result of the ioctl, that all engines are idle,
* is accurate.
* *
* The returned dword is split into two fields to indicate both * The returned dword is split into two fields to indicate both
* the engines on which the object is being read, and the * the engine classess on which the object is being read, and the
* engine on which it is currently being written (if any). * engine class on which it is currently being written (if any).
* *
* The low word (bits 0:15) indicate if the object is being written * The low word (bits 0:15) indicate if the object is being written
* to by any engine (there can only be one, as the GEM implicit * to by any engine (there can only be one, as the GEM implicit
* synchronisation rules force writes to be serialised). Only the * synchronisation rules force writes to be serialised). Only the
* engine for the last write is reported. * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
* 1 not 0 etc) for the last write is reported.
* *
* The high word (bits 16:31) are a bitmask of which engines are * The high word (bits 16:31) are a bitmask of which engines classes
* currently reading from the object. Multiple engines may be * are currently reading from the object. Multiple engines may be
* reading from the object simultaneously. * reading from the object simultaneously.
* *
* The value of each engine is the same as specified in the * The value of each engine class is the same as specified in the
* EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc. * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
* Note I915_EXEC_DEFAULT is a symbolic value and is mapped to * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
* the I915_EXEC_RENDER engine for execution, and so it is never
* reported as active itself. Some hardware may have parallel * reported as active itself. Some hardware may have parallel
* execution engines, e.g. multiple media engines, which are * execution engines, e.g. multiple media engines, which are
* mapped to the same identifier in the EXECBUFFER2 ioctl and * mapped to the same class identifier and so are not separately
* so are not separately reported for busyness. * reported for busyness.
* *
* Caveat emptor: * Caveat emptor:
* Only the boolean result of this query is reliable; that is whether * Only the boolean result of this query is reliable; that is whether
...@@ -1412,65 +1456,17 @@ struct drm_i915_gem_wait { ...@@ -1412,65 +1456,17 @@ struct drm_i915_gem_wait {
}; };
struct drm_i915_gem_context_create { struct drm_i915_gem_context_create {
/* output: id of new context*/ __u32 ctx_id; /* output: id of new context*/
__u32 ctx_id;
__u32 pad; __u32 pad;
}; };
struct drm_i915_gem_context_destroy { struct drm_i915_gem_context_create_ext {
__u32 ctx_id; __u32 ctx_id; /* output: id of new context*/
__u32 pad;
};
struct drm_i915_reg_read {
/*
* Register offset.
* For 64bit wide registers where the upper 32bits don't immediately
* follow the lower 32bits, the offset of the lower 32bits must
* be specified
*/
__u64 offset;
#define I915_REG_READ_8B_WA (1ul << 0)
__u64 val; /* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
* single instruction 8byte read, in order to workaround that pass
* flag I915_REG_READ_8B_WA in offset field.
*
*/
struct drm_i915_reset_stats {
__u32 ctx_id;
__u32 flags; __u32 flags;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
/* All resets since boot/module reload, for all contexts */ #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
__u32 reset_count; (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
__u64 extensions;
/* Number of batches lost when active in GPU, for this context */
__u32 batch_active;
/* Number of batches lost pending for execution, for this context */
__u32 batch_pending;
__u32 pad;
};
struct drm_i915_gem_userptr {
__u64 user_ptr;
__u64 user_size;
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
}; };
struct drm_i915_gem_context_param { struct drm_i915_gem_context_param {
...@@ -1491,6 +1487,28 @@ struct drm_i915_gem_context_param { ...@@ -1491,6 +1487,28 @@ struct drm_i915_gem_context_param {
* drm_i915_gem_context_param_sseu. * drm_i915_gem_context_param_sseu.
*/ */
#define I915_CONTEXT_PARAM_SSEU 0x7 #define I915_CONTEXT_PARAM_SSEU 0x7
/*
* Not all clients may want to attempt automatic recover of a context after
* a hang (for example, some clients may only submit very small incremental
* batches relying on known logical state of previous batches which will never
* recover correctly and each attempt will hang), and so would prefer that
* the context is forever banned instead.
*
* If set to false (0), after a reset, subsequent (and in flight) rendering
* from this context is discarded, and the client will need to create a new
* context to use instead.
*
* If set to true (1), the kernel will automatically attempt to recover the
* context by skipping the hanging batch and executing the next batch starting
* from the default context state (discarding the incomplete logical context
* state lost due to the reset).
*
* On creation, all new contexts are marked as recoverable.
*/
#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
/* Must be kept compact -- no holes and well documented */
__u64 value; __u64 value;
}; };
...@@ -1519,8 +1537,7 @@ struct drm_i915_gem_context_param_sseu { ...@@ -1519,8 +1537,7 @@ struct drm_i915_gem_context_param_sseu {
/* /*
* Engine class & instance to be configured or queried. * Engine class & instance to be configured or queried.
*/ */
__u16 engine_class; struct i915_engine_class_instance engine;
__u16 engine_instance;
/* /*
* Unused for now. Must be cleared to zero. * Unused for now. Must be cleared to zero.
...@@ -1553,6 +1570,96 @@ struct drm_i915_gem_context_param_sseu { ...@@ -1553,6 +1570,96 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd; __u32 rsvd;
}; };
struct drm_i915_gem_context_create_ext_setparam {
#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
struct i915_user_extension base;
struct drm_i915_gem_context_param param;
};
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
/*
* DRM_I915_GEM_VM_CREATE -
*
* Create a new virtual memory address space (ppGTT) for use within a context
* on the same file. Extensions can be provided to configure exactly how the
* address space is setup upon creation.
*
* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
* returned in the outparam @id.
*
* No flags are defined, with all bits reserved and must be zero.
*
* An extension chain maybe provided, starting with @extensions, and terminated
* by the @next_extension being 0. Currently, no extensions are defined.
*
* DRM_I915_GEM_VM_DESTROY -
*
* Destroys a previously created VM id, specified in @id.
*
* No extensions or flags are allowed currently, and so must be zero.
*/
struct drm_i915_gem_vm_control {
__u64 extensions;
__u32 flags;
__u32 vm_id;
};
struct drm_i915_reg_read {
/*
* Register offset.
* For 64bit wide registers where the upper 32bits don't immediately
* follow the lower 32bits, the offset of the lower 32bits must
* be specified
*/
__u64 offset;
#define I915_REG_READ_8B_WA (1ul << 0)
__u64 val; /* Return value */
};
/* Known registers:
*
* Render engine timestamp - 0x2358 + 64bit - gen7+
* - Note this register returns an invalid value if using the default
* single instruction 8byte read, in order to workaround that pass
* flag I915_REG_READ_8B_WA in offset field.
*
*/
struct drm_i915_reset_stats {
__u32 ctx_id;
__u32 flags;
/* All resets since boot/module reload, for all contexts */
__u32 reset_count;
/* Number of batches lost when active in GPU, for this context */
__u32 batch_active;
/* Number of batches lost pending for execution, for this context */
__u32 batch_pending;
__u32 pad;
};
struct drm_i915_gem_userptr {
__u64 user_ptr;
__u64 user_size;
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
};
enum drm_i915_oa_format { enum drm_i915_oa_format {
I915_OA_FORMAT_A13 = 1, /* HSW only */ I915_OA_FORMAT_A13 = 1, /* HSW only */
I915_OA_FORMAT_A29, /* HSW only */ I915_OA_FORMAT_A29, /* HSW only */
...@@ -1714,6 +1821,7 @@ struct drm_i915_perf_oa_config { ...@@ -1714,6 +1821,7 @@ struct drm_i915_perf_oa_config {
struct drm_i915_query_item { struct drm_i915_query_item {
__u64 query_id; __u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1 #define DRM_I915_QUERY_TOPOLOGY_INFO 1
/* Must be kept compact -- no holes and well documented */
/* /*
* When set to zero by userspace, this is filled with the size of the * When set to zero by userspace, this is filled with the size of the
......
...@@ -91,5 +91,7 @@ ...@@ -91,5 +91,7 @@
#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */ #define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */ #define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
#endif /* _UAPI_LINUX_FCNTL_H */ #endif /* _UAPI_LINUX_FCNTL_H */
...@@ -320,6 +320,9 @@ struct fscrypt_key { ...@@ -320,6 +320,9 @@ struct fscrypt_key {
#define SYNC_FILE_RANGE_WAIT_BEFORE 1 #define SYNC_FILE_RANGE_WAIT_BEFORE 1
#define SYNC_FILE_RANGE_WRITE 2 #define SYNC_FILE_RANGE_WRITE 2
#define SYNC_FILE_RANGE_WAIT_AFTER 4 #define SYNC_FILE_RANGE_WAIT_AFTER 4
#define SYNC_FILE_RANGE_WRITE_AND_WAIT (SYNC_FILE_RANGE_WRITE | \
SYNC_FILE_RANGE_WAIT_BEFORE | \
SYNC_FILE_RANGE_WAIT_AFTER)
/* /*
* Flags for preadv2/pwritev2: * Flags for preadv2/pwritev2:
......
...@@ -986,8 +986,13 @@ struct kvm_ppc_resize_hpt { ...@@ -986,8 +986,13 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 #define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
#define KVM_CAP_EXCEPTION_PAYLOAD 164 #define KVM_CAP_EXCEPTION_PAYLOAD 164
#define KVM_CAP_ARM_VM_IPA_SIZE 165 #define KVM_CAP_ARM_VM_IPA_SIZE 165
#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 #define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 /* Obsolete */
#define KVM_CAP_HYPERV_CPUID 167 #define KVM_CAP_HYPERV_CPUID 167
#define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 168
#define KVM_CAP_PPC_IRQ_XIVE 169
#define KVM_CAP_ARM_SVE 170
#define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
#ifdef KVM_CAP_IRQ_ROUTING #ifdef KVM_CAP_IRQ_ROUTING
...@@ -1145,6 +1150,7 @@ struct kvm_dirty_tlb { ...@@ -1145,6 +1150,7 @@ struct kvm_dirty_tlb {
#define KVM_REG_SIZE_U256 0x0050000000000000ULL #define KVM_REG_SIZE_U256 0x0050000000000000ULL
#define KVM_REG_SIZE_U512 0x0060000000000000ULL #define KVM_REG_SIZE_U512 0x0060000000000000ULL
#define KVM_REG_SIZE_U1024 0x0070000000000000ULL #define KVM_REG_SIZE_U1024 0x0070000000000000ULL
#define KVM_REG_SIZE_U2048 0x0080000000000000ULL
struct kvm_reg_list { struct kvm_reg_list {
__u64 n; /* number of regs */ __u64 n; /* number of regs */
...@@ -1211,6 +1217,8 @@ enum kvm_device_type { ...@@ -1211,6 +1217,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3 #define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3
KVM_DEV_TYPE_ARM_VGIC_ITS, KVM_DEV_TYPE_ARM_VGIC_ITS,
#define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS #define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS
KVM_DEV_TYPE_XIVE,
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
KVM_DEV_TYPE_MAX, KVM_DEV_TYPE_MAX,
}; };
...@@ -1434,12 +1442,15 @@ struct kvm_enc_region { ...@@ -1434,12 +1442,15 @@ struct kvm_enc_region {
#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state) #define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state) #define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */ /* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT_2 */
#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log) #define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
/* Available with KVM_CAP_HYPERV_CPUID */ /* Available with KVM_CAP_HYPERV_CPUID */
#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2) #define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
/* Available with KVM_CAP_ARM_SVE */
#define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int)
/* Secure Encrypted Virtualization command */ /* Secure Encrypted Virtualization command */
enum sev_cmd_id { enum sev_cmd_id {
/* Guest initialization commands */ /* Guest initialization commands */
......
...@@ -55,4 +55,66 @@ ...@@ -55,4 +55,66 @@
#define MS_MGC_VAL 0xC0ED0000 #define MS_MGC_VAL 0xC0ED0000
#define MS_MGC_MSK 0xffff0000 #define MS_MGC_MSK 0xffff0000
/*
* open_tree() flags.
*/
#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */
#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */
/*
* move_mount() flags.
*/
#define MOVE_MOUNT_F_SYMLINKS 0x00000001 /* Follow symlinks on from path */
#define MOVE_MOUNT_F_AUTOMOUNTS 0x00000002 /* Follow automounts on from path */
#define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */
#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */
#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */
#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
#define MOVE_MOUNT__MASK 0x00000077
/*
* fsopen() flags.
*/
#define FSOPEN_CLOEXEC 0x00000001
/*
* fspick() flags.
*/
#define FSPICK_CLOEXEC 0x00000001
#define FSPICK_SYMLINK_NOFOLLOW 0x00000002
#define FSPICK_NO_AUTOMOUNT 0x00000004
#define FSPICK_EMPTY_PATH 0x00000008
/*
* The type of fsconfig() call made.
*/
enum fsconfig_command {
FSCONFIG_SET_FLAG = 0, /* Set parameter, supplying no value */
FSCONFIG_SET_STRING = 1, /* Set parameter, supplying a string value */
FSCONFIG_SET_BINARY = 2, /* Set parameter, supplying a binary blob value */
FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */
FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */
FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */
FSCONFIG_CMD_CREATE = 6, /* Invoke superblock creation */
FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */
};
/*
* fsmount() flags.
*/
#define FSMOUNT_CLOEXEC 0x00000001
/*
* Mount attributes.
*/
#define MOUNT_ATTR_RDONLY 0x00000001 /* Mount read-only */
#define MOUNT_ATTR_NOSUID 0x00000002 /* Ignore suid and sgid bits */
#define MOUNT_ATTR_NODEV 0x00000004 /* Disallow access to device special files */
#define MOUNT_ATTR_NOEXEC 0x00000008 /* Disallow program execution */
#define MOUNT_ATTR__ATIME 0x00000070 /* Setting on how atime should be updated */
#define MOUNT_ATTR_RELATIME 0x00000000 /* - Update atime relative to mtime/ctime. */
#define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */
#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
#endif /* _UAPI_LINUX_MOUNT_H */ #endif /* _UAPI_LINUX_MOUNT_H */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#define CLONE_FS 0x00000200 /* set if fs info shared between processes */ #define CLONE_FS 0x00000200 /* set if fs info shared between processes */
#define CLONE_FILES 0x00000400 /* set if open files shared between processes */ #define CLONE_FILES 0x00000400 /* set if open files shared between processes */
#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ #define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
#define CLONE_PIDFD 0x00001000 /* set if a pidfd should be placed in parent */
#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ #define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
......
...@@ -56,7 +56,7 @@ create_table() ...@@ -56,7 +56,7 @@ create_table()
echo "};" echo "};"
} }
$gcc -E -dM -x c $input \ $gcc -E -dM -x c -I $incpath/include/uapi $input \
|sed -ne 's/^#define __NR_//p' \ |sed -ne 's/^#define __NR_//p' \
|sort -t' ' -k2 -nu \ |sort -t' ' -k2 -nu \
|create_table |create_table
...@@ -5,16 +5,19 @@ ...@@ -5,16 +5,19 @@
#include "util.h" #include "util.h"
#include "machine.h" #include "machine.h"
#include "api/fs/fs.h" #include "api/fs/fs.h"
#include "debug.h"
int arch__fix_module_text_start(u64 *start, const char *name) int arch__fix_module_text_start(u64 *start, const char *name)
{ {
u64 m_start = *start;
char path[PATH_MAX]; char path[PATH_MAX];
snprintf(path, PATH_MAX, "module/%.*s/sections/.text", snprintf(path, PATH_MAX, "module/%.*s/sections/.text",
(int)strlen(name) - 2, name + 1); (int)strlen(name) - 2, name + 1);
if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
if (sysfs__read_ull(path, (unsigned long long *)start) < 0) pr_debug2("Using module %s start:%#lx\n", path, m_start);
return -1; *start = m_start;
}
return 0; return 0;
} }
...@@ -349,6 +349,12 @@ ...@@ -349,6 +349,12 @@
425 common io_uring_setup __x64_sys_io_uring_setup 425 common io_uring_setup __x64_sys_io_uring_setup
426 common io_uring_enter __x64_sys_io_uring_enter 426 common io_uring_enter __x64_sys_io_uring_enter
427 common io_uring_register __x64_sys_io_uring_register 427 common io_uring_register __x64_sys_io_uring_register
428 common open_tree __x64_sys_open_tree
429 common move_mount __x64_sys_move_mount
430 common fsopen __x64_sys_fsopen
431 common fsconfig __x64_sys_fsconfig
432 common fsmount __x64_sys_fsmount
433 common fspick __x64_sys_fspick
# #
# x32-specific system call numbers start at 512 to avoid cache impact # x32-specific system call numbers start at 512 to avoid cache impact
......
...@@ -161,9 +161,16 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest ...@@ -161,9 +161,16 @@ int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest
continue; continue;
} }
} else } else if (mem_start == kallsyms.vmlinux_map->end) {
/*
* Ignore aliases to _etext, i.e. to the end of the kernel text area,
* such as __indirect_thunk_end.
*/
continue;
} else {
pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n", pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n",
mem_start, sym->name); mem_start, sym->name);
}
err = -1; err = -1;
} }
......
...@@ -271,7 +271,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string) ...@@ -271,7 +271,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string)
if (i > 0) if (i > 0)
strncpy(buffer, string, i); strncpy(buffer, string, i);
} }
strncat(buffer + p, numstr, 4); memcpy(buffer + p, numstr, 4);
p += 3; p += 3;
} }
} }
......
...@@ -924,7 +924,8 @@ const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; ...@@ -924,7 +924,8 @@ const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
* symbol_name if it's not that important. * symbol_name if it's not that important.
*/ */
static int machine__get_running_kernel_start(struct machine *machine, static int machine__get_running_kernel_start(struct machine *machine,
const char **symbol_name, u64 *start) const char **symbol_name,
u64 *start, u64 *end)
{ {
char filename[PATH_MAX]; char filename[PATH_MAX];
int i, err = -1; int i, err = -1;
...@@ -949,6 +950,11 @@ static int machine__get_running_kernel_start(struct machine *machine, ...@@ -949,6 +950,11 @@ static int machine__get_running_kernel_start(struct machine *machine,
*symbol_name = name; *symbol_name = name;
*start = addr; *start = addr;
err = kallsyms__get_function_start(filename, "_etext", &addr);
if (!err)
*end = addr;
return 0; return 0;
} }
...@@ -1441,7 +1447,7 @@ int machine__create_kernel_maps(struct machine *machine) ...@@ -1441,7 +1447,7 @@ int machine__create_kernel_maps(struct machine *machine)
struct dso *kernel = machine__get_kernel(machine); struct dso *kernel = machine__get_kernel(machine);
const char *name = NULL; const char *name = NULL;
struct map *map; struct map *map;
u64 addr = 0; u64 start = 0, end = ~0ULL;
int ret; int ret;
if (kernel == NULL) if (kernel == NULL)
...@@ -1460,9 +1466,9 @@ int machine__create_kernel_maps(struct machine *machine) ...@@ -1460,9 +1466,9 @@ int machine__create_kernel_maps(struct machine *machine)
"continuing anyway...\n", machine->pid); "continuing anyway...\n", machine->pid);
} }
if (!machine__get_running_kernel_start(machine, &name, &addr)) { if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
if (name && if (name &&
map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) { map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
machine__destroy_kernel_maps(machine); machine__destroy_kernel_maps(machine);
ret = -1; ret = -1;
goto out_put; goto out_put;
...@@ -1472,16 +1478,19 @@ int machine__create_kernel_maps(struct machine *machine) ...@@ -1472,16 +1478,19 @@ int machine__create_kernel_maps(struct machine *machine)
* we have a real start address now, so re-order the kmaps * we have a real start address now, so re-order the kmaps
* assume it's the last in the kmaps * assume it's the last in the kmaps
*/ */
machine__update_kernel_mmap(machine, addr, ~0ULL); machine__update_kernel_mmap(machine, start, end);
} }
if (machine__create_extra_kernel_maps(machine, kernel)) if (machine__create_extra_kernel_maps(machine, kernel))
pr_debug("Problems creating extra kernel maps, continuing anyway...\n"); pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
if (end == ~0ULL) {
/* update end address of the kernel map using adjacent module address */ /* update end address of the kernel map using adjacent module address */
map = map__next(machine__kernel_map(machine)); map = map__next(machine__kernel_map(machine));
if (map) if (map)
machine__set_kernel_mmap(machine, addr, map->start); machine__set_kernel_mmap(machine, start, map->start);
}
out_put: out_put:
dso__put(kernel); dso__put(kernel);
return ret; return ret;
......
...@@ -647,6 +647,26 @@ static void perf_event__throttle_swap(union perf_event *event, ...@@ -647,6 +647,26 @@ static void perf_event__throttle_swap(union perf_event *event,
swap_sample_id_all(event, &event->throttle + 1); swap_sample_id_all(event, &event->throttle + 1);
} }
static void perf_event__namespaces_swap(union perf_event *event,
bool sample_id_all)
{
u64 i;
event->namespaces.pid = bswap_32(event->namespaces.pid);
event->namespaces.tid = bswap_32(event->namespaces.tid);
event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
for (i = 0; i < event->namespaces.nr_namespaces; i++) {
struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
ns->dev = bswap_64(ns->dev);
ns->ino = bswap_64(ns->ino);
}
if (sample_id_all)
swap_sample_id_all(event, &event->namespaces.link_info[i]);
}
static u8 revbyte(u8 b) static u8 revbyte(u8 b)
{ {
int rev = (b >> 4) | ((b & 0xf) << 4); int rev = (b >> 4) | ((b & 0xf) << 4);
...@@ -887,6 +907,7 @@ static perf_event__swap_op perf_event__swap_ops[] = { ...@@ -887,6 +907,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
[PERF_RECORD_SWITCH] = perf_event__switch_swap, [PERF_RECORD_SWITCH] = perf_event__switch_swap,
[PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
[PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
[PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
[PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
[PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
......
...@@ -133,7 +133,7 @@ void thread__put(struct thread *thread) ...@@ -133,7 +133,7 @@ void thread__put(struct thread *thread)
} }
} }
struct namespaces *thread__namespaces(const struct thread *thread) static struct namespaces *__thread__namespaces(const struct thread *thread)
{ {
if (list_empty(&thread->namespaces_list)) if (list_empty(&thread->namespaces_list))
return NULL; return NULL;
...@@ -141,10 +141,21 @@ struct namespaces *thread__namespaces(const struct thread *thread) ...@@ -141,10 +141,21 @@ struct namespaces *thread__namespaces(const struct thread *thread)
return list_first_entry(&thread->namespaces_list, struct namespaces, list); return list_first_entry(&thread->namespaces_list, struct namespaces, list);
} }
struct namespaces *thread__namespaces(const struct thread *thread)
{
struct namespaces *ns;
down_read((struct rw_semaphore *)&thread->namespaces_lock);
ns = __thread__namespaces(thread);
up_read((struct rw_semaphore *)&thread->namespaces_lock);
return ns;
}
static int __thread__set_namespaces(struct thread *thread, u64 timestamp, static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
struct namespaces_event *event) struct namespaces_event *event)
{ {
struct namespaces *new, *curr = thread__namespaces(thread); struct namespaces *new, *curr = __thread__namespaces(thread);
new = namespaces__new(event); new = namespaces__new(event);
if (!new) if (!new)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment