Commit 13a2e429 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'perf-tools-fixes-for-v6.9-2024-04-19' of...

Merge tag 'perf-tools-fixes-for-v6.9-2024-04-19' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools

Pull perf tools fixes from Namhyung Kim:
 "A random set of small bug fixes:

   - Fix perf annotate TUI when used with data type profiling

   - Work around BPF verifier about sighand lock checking

  And a set of kernel header synchronization"

* tag 'perf-tools-fixes-for-v6.9-2024-04-19' of git://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools:
  tools/include: Sync arm64 asm/cputype.h with the kernel sources
  tools/include: Sync asm-generic/bitops/fls.h with the kernel sources
  tools/include: Sync x86 asm/msr-index.h with the kernel sources
  tools/include: Sync x86 asm/irq_vectors.h with the kernel sources
  tools/include: Sync x86 CPU feature headers with the kernel sources
  tools/include: Sync uapi/sound/asound.h with the kernel sources
  tools/include: Sync uapi/linux/kvm.h and asm/kvm.h with the kernel sources
  tools/include: Sync uapi/linux/fs.h with the kernel sources
  tools/include: Sync uapi/drm/i915_drm.h with the kernel sources
  perf lock contention: Add a missing NULL check
  perf annotate: Make sure to call symbol__annotate2() in TUI
parents 2d412262 1cebd7f7
...@@ -61,6 +61,7 @@ ...@@ -61,6 +61,7 @@
#define ARM_CPU_IMP_HISI 0x48 #define ARM_CPU_IMP_HISI 0x48
#define ARM_CPU_IMP_APPLE 0x61 #define ARM_CPU_IMP_APPLE 0x61
#define ARM_CPU_IMP_AMPERE 0xC0 #define ARM_CPU_IMP_AMPERE 0xC0
#define ARM_CPU_IMP_MICROSOFT 0x6D
#define ARM_CPU_PART_AEM_V8 0xD0F #define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00 #define ARM_CPU_PART_FOUNDATION 0xD00
...@@ -135,6 +136,8 @@ ...@@ -135,6 +136,8 @@
#define AMPERE_CPU_PART_AMPERE1 0xAC3 #define AMPERE_CPU_PART_AMPERE1 0xAC3
#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
...@@ -193,6 +196,7 @@ ...@@ -193,6 +196,7 @@
#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX) #define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX) #define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
......
...@@ -37,9 +37,7 @@ ...@@ -37,9 +37,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/sve_context.h> #include <asm/sve_context.h>
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
#define __KVM_HAVE_VCPU_EVENTS #define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
...@@ -76,10 +74,10 @@ struct kvm_regs { ...@@ -76,10 +74,10 @@ struct kvm_regs {
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0 #define KVM_ARM_DEVICE_TYPE_SHIFT 0
#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \ #define KVM_ARM_DEVICE_TYPE_MASK __GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
KVM_ARM_DEVICE_TYPE_SHIFT) KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16 #define KVM_ARM_DEVICE_ID_SHIFT 16
#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \ #define KVM_ARM_DEVICE_ID_MASK __GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
KVM_ARM_DEVICE_ID_SHIFT) KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */ /* Supported device IDs */
...@@ -162,6 +160,11 @@ struct kvm_sync_regs { ...@@ -162,6 +160,11 @@ struct kvm_sync_regs {
__u64 device_irq_level; __u64 device_irq_level;
}; };
/* Bits for run->s.regs.device_irq_level */
#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
#define KVM_ARM_DEV_PMU (1 << 2)
/* /*
* PMU filter structure. Describe a range of events with a particular * PMU filter structure. Describe a range of events with a particular
* action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER. * action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER.
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#define __KVM_HAVE_PPC_SMT #define __KVM_HAVE_PPC_SMT
#define __KVM_HAVE_IRQCHIP #define __KVM_HAVE_IRQCHIP
#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_GUEST_DEBUG
/* Not always available, but if it is, this is the correct offset. */ /* Not always available, but if it is, this is the correct offset. */
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
...@@ -733,4 +732,48 @@ struct kvm_ppc_xive_eq { ...@@ -733,4 +732,48 @@ struct kvm_ppc_xive_eq {
#define KVM_XIVE_TIMA_PAGE_OFFSET 0 #define KVM_XIVE_TIMA_PAGE_OFFSET 0
#define KVM_XIVE_ESB_PAGE_OFFSET 4 #define KVM_XIVE_ESB_PAGE_OFFSET 4
/* for KVM_PPC_GET_PVINFO */
#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
struct kvm_ppc_pvinfo {
/* out */
__u32 flags;
__u32 hcall[4];
__u8 pad[108];
};
/* for KVM_PPC_GET_SMMU_INFO */
#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
struct kvm_ppc_one_page_size {
__u32 page_shift; /* Page shift (or 0) */
__u32 pte_enc; /* Encoding in the HPTE (>>12) */
};
struct kvm_ppc_one_seg_page_size {
__u32 page_shift; /* Base page shift of segment (or 0) */
__u32 slb_enc; /* SLB encoding for BookS */
struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
#define KVM_PPC_1T_SEGMENTS 0x00000002
#define KVM_PPC_NO_HASH 0x00000004
struct kvm_ppc_smmu_info {
__u64 flags;
__u32 slb_size;
__u16 data_keys; /* # storage keys supported for data */
__u16 instr_keys; /* # storage keys supported for instructions */
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */
struct kvm_ppc_resize_hpt {
__u64 flags;
__u32 shift;
__u32 pad;
};
#endif /* __LINUX_KVM_POWERPC_H */ #endif /* __LINUX_KVM_POWERPC_H */
...@@ -12,7 +12,320 @@ ...@@ -12,7 +12,320 @@
#include <linux/types.h> #include <linux/types.h>
#define __KVM_S390 #define __KVM_S390
#define __KVM_HAVE_GUEST_DEBUG
struct kvm_s390_skeys {
__u64 start_gfn;
__u64 count;
__u64 skeydata_addr;
__u32 flags;
__u32 reserved[9];
};
#define KVM_S390_CMMA_PEEK (1 << 0)
/**
* kvm_s390_cmma_log - Used for CMMA migration.
*
* Used both for input and output.
*
* @start_gfn: Guest page number to start from.
* @count: Size of the result buffer.
* @flags: Control operation mode via KVM_S390_CMMA_* flags
* @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty
* pages are still remaining.
* @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set
* in the PGSTE.
* @values: Pointer to the values buffer.
*
* Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls.
*/
struct kvm_s390_cmma_log {
__u64 start_gfn;
__u32 count;
__u32 flags;
union {
__u64 remaining;
__u64 mask;
};
__u64 values;
};
#define KVM_S390_RESET_POR 1
#define KVM_S390_RESET_CLEAR 2
#define KVM_S390_RESET_SUBSYSTEM 4
#define KVM_S390_RESET_CPU_INIT 8
#define KVM_S390_RESET_IPL 16
/* for KVM_S390_MEM_OP */
struct kvm_s390_mem_op {
/* in */
__u64 gaddr; /* the guest address */
__u64 flags; /* flags */
__u32 size; /* amount of bytes */
__u32 op; /* type of operation */
__u64 buf; /* buffer in userspace */
union {
struct {
__u8 ar; /* the access register number */
__u8 key; /* access key, ignored if flag unset */
__u8 pad1[6]; /* ignored */
__u64 old_addr; /* ignored if cmpxchg flag unset */
};
__u32 sida_offset; /* offset into the sida */
__u8 reserved[32]; /* ignored */
};
};
/* types for kvm_s390_mem_op->op */
#define KVM_S390_MEMOP_LOGICAL_READ 0
#define KVM_S390_MEMOP_LOGICAL_WRITE 1
#define KVM_S390_MEMOP_SIDA_READ 2
#define KVM_S390_MEMOP_SIDA_WRITE 3
#define KVM_S390_MEMOP_ABSOLUTE_READ 4
#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6
/* flags for kvm_s390_mem_op->flags */
#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0)
#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1)
struct kvm_s390_psw {
__u64 mask;
__u64 addr;
};
/* valid values for type in kvm_s390_interrupt */
#define KVM_S390_SIGP_STOP 0xfffe0000u
#define KVM_S390_PROGRAM_INT 0xfffe0001u
#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
#define KVM_S390_RESTART 0xfffe0003u
#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u
#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u
#define KVM_S390_MCHK 0xfffe1000u
#define KVM_S390_INT_CLOCK_COMP 0xffff1004u
#define KVM_S390_INT_CPU_TIMER 0xffff1005u
#define KVM_S390_INT_VIRTIO 0xffff2603u
#define KVM_S390_INT_SERVICE 0xffff2401u
#define KVM_S390_INT_EMERGENCY 0xffff1201u
#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
/* Anything below 0xfffe0000u is taken by INT_IO */
#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \
(((schid)) | \
((ssid) << 16) | \
((cssid) << 18) | \
((ai) << 26))
#define KVM_S390_INT_IO_MIN 0x00000000u
#define KVM_S390_INT_IO_MAX 0xfffdffffu
#define KVM_S390_INT_IO_AI_MASK 0x04000000u
struct kvm_s390_interrupt {
__u32 type;
__u32 parm;
__u64 parm64;
};
struct kvm_s390_io_info {
__u16 subchannel_id;
__u16 subchannel_nr;
__u32 io_int_parm;
__u32 io_int_word;
};
struct kvm_s390_ext_info {
__u32 ext_params;
__u32 pad;
__u64 ext_params2;
};
struct kvm_s390_pgm_info {
__u64 trans_exc_code;
__u64 mon_code;
__u64 per_address;
__u32 data_exc_code;
__u16 code;
__u16 mon_class_nr;
__u8 per_code;
__u8 per_atmid;
__u8 exc_access_id;
__u8 per_access_id;
__u8 op_access_id;
#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01
#define KVM_S390_PGM_FLAGS_ILC_0 0x02
#define KVM_S390_PGM_FLAGS_ILC_1 0x04
#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06
#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08
__u8 flags;
__u8 pad[2];
};
struct kvm_s390_prefix_info {
__u32 address;
};
struct kvm_s390_extcall_info {
__u16 code;
};
struct kvm_s390_emerg_info {
__u16 code;
};
#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
struct kvm_s390_stop_info {
__u32 flags;
};
struct kvm_s390_mchk_info {
__u64 cr14;
__u64 mcic;
__u64 failing_storage_address;
__u32 ext_damage_code;
__u32 pad;
__u8 fixed_logout[16];
};
struct kvm_s390_irq {
__u64 type;
union {
struct kvm_s390_io_info io;
struct kvm_s390_ext_info ext;
struct kvm_s390_pgm_info pgm;
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk;
char reserved[64];
} u;
};
struct kvm_s390_irq_state {
__u64 buf;
__u32 flags; /* will stay unused for compatibility reasons */
__u32 len;
__u32 reserved[4]; /* will stay unused for compatibility reasons */
};
struct kvm_s390_ucas_mapping {
__u64 user_addr;
__u64 vcpu_addr;
__u64 length;
};
struct kvm_s390_pv_sec_parm {
__u64 origin;
__u64 length;
};
struct kvm_s390_pv_unp {
__u64 addr;
__u64 size;
__u64 tweak;
};
enum pv_cmd_dmp_id {
KVM_PV_DUMP_INIT,
KVM_PV_DUMP_CONFIG_STOR_STATE,
KVM_PV_DUMP_COMPLETE,
KVM_PV_DUMP_CPU,
};
struct kvm_s390_pv_dmp {
__u64 subcmd;
__u64 buff_addr;
__u64 buff_len;
__u64 gaddr; /* For dump storage state */
__u64 reserved[4];
};
enum pv_cmd_info_id {
KVM_PV_INFO_VM,
KVM_PV_INFO_DUMP,
};
struct kvm_s390_pv_info_dump {
__u64 dump_cpu_buffer_len;
__u64 dump_config_mem_buffer_per_1m;
__u64 dump_config_finalize_len;
};
struct kvm_s390_pv_info_vm {
__u64 inst_calls_list[4];
__u64 max_cpus;
__u64 max_guests;
__u64 max_guest_addr;
__u64 feature_indication;
};
struct kvm_s390_pv_info_header {
__u32 id;
__u32 len_max;
__u32 len_written;
__u32 reserved;
};
struct kvm_s390_pv_info {
struct kvm_s390_pv_info_header header;
union {
struct kvm_s390_pv_info_dump dump;
struct kvm_s390_pv_info_vm vm;
};
};
enum pv_cmd_id {
KVM_PV_ENABLE,
KVM_PV_DISABLE,
KVM_PV_SET_SEC_PARMS,
KVM_PV_UNPACK,
KVM_PV_VERIFY,
KVM_PV_PREP_RESET,
KVM_PV_UNSHARE_ALL,
KVM_PV_INFO,
KVM_PV_DUMP,
KVM_PV_ASYNC_CLEANUP_PREPARE,
KVM_PV_ASYNC_CLEANUP_PERFORM,
};
struct kvm_pv_cmd {
__u32 cmd; /* Command to be executed */
__u16 rc; /* Ultravisor return code */
__u16 rrc; /* Ultravisor return reason code */
__u64 data; /* Data or address */
__u32 flags; /* flags for future extensions. Must be 0 for now */
__u32 reserved[3];
};
struct kvm_s390_zpci_op {
/* in */
__u32 fh; /* target device */
__u8 op; /* operation to perform */
__u8 pad[3];
union {
/* for KVM_S390_ZPCIOP_REG_AEN */
struct {
__u64 ibv; /* Guest addr of interrupt bit vector */
__u64 sb; /* Guest addr of summary bit */
__u32 flags;
__u32 noi; /* Number of interrupts */
__u8 isc; /* Guest interrupt subclass */
__u8 sbo; /* Offset of guest summary bit vector */
__u16 pad;
} reg_aen;
__u64 reserved[8];
} u;
};
/* types for kvm_s390_zpci_op->op */
#define KVM_S390_ZPCIOP_REG_AEN 0
#define KVM_S390_ZPCIOP_DEREG_AEN 1
/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
/* Device control API: s390-specific devices */ /* Device control API: s390-specific devices */
#define KVM_DEV_FLIC_GET_ALL_IRQS 1 #define KVM_DEV_FLIC_GET_ALL_IRQS 1
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
/* /*
* Defines x86 CPU feature bits * Defines x86 CPU feature bits
*/ */
#define NCAPINTS 21 /* N 32-bit words worth of info */ #define NCAPINTS 22 /* N 32-bit words worth of info */
#define NBUGINTS 2 /* N 32-bit bug flags */ #define NBUGINTS 2 /* N 32-bit bug flags */
/* /*
...@@ -81,10 +81,8 @@ ...@@ -81,10 +81,8 @@
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
/* CPU types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */ #define X86_FEATURE_ZEN5 ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
...@@ -97,7 +95,7 @@ ...@@ -97,7 +95,7 @@
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */ #define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
/* FREE, was #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) "" LFENCE synchronizes RDTSC */ #define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
...@@ -461,6 +459,14 @@ ...@@ -461,6 +459,14 @@
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
/*
* Extended auxiliary flags: Linux defined - for features scattered in various
* CPUID levels like 0x80000022, etc.
*
* Reuse free bits when adding new feature flags!
*/
#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
/* /*
* BUG word(s) * BUG word(s)
*/ */
...@@ -508,4 +514,5 @@ ...@@ -508,4 +514,5 @@
/* BUG word 2 */ /* BUG word 2 */
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ #define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
#endif /* _ASM_X86_CPUFEATURES_H */ #endif /* _ASM_X86_CPUFEATURES_H */
...@@ -123,6 +123,12 @@ ...@@ -123,6 +123,12 @@
# define DISABLE_FRED (1 << (X86_FEATURE_FRED & 31)) # define DISABLE_FRED (1 << (X86_FEATURE_FRED & 31))
#endif #endif
#ifdef CONFIG_KVM_AMD_SEV
#define DISABLE_SEV_SNP 0
#else
#define DISABLE_SEV_SNP (1 << (X86_FEATURE_SEV_SNP & 31))
#endif
/* /*
* Make sure to add features to the correct mask * Make sure to add features to the correct mask
*/ */
...@@ -147,8 +153,9 @@ ...@@ -147,8 +153,9 @@
DISABLE_ENQCMD) DISABLE_ENQCMD)
#define DISABLED_MASK17 0 #define DISABLED_MASK17 0
#define DISABLED_MASK18 (DISABLE_IBT) #define DISABLED_MASK18 (DISABLE_IBT)
#define DISABLED_MASK19 0 #define DISABLED_MASK19 (DISABLE_SEV_SNP)
#define DISABLED_MASK20 0 #define DISABLED_MASK20 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) #define DISABLED_MASK21 0
#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_DISABLED_FEATURES_H */ #endif /* _ASM_X86_DISABLED_FEATURES_H */
...@@ -84,11 +84,9 @@ ...@@ -84,11 +84,9 @@
#define HYPERVISOR_CALLBACK_VECTOR 0xf3 #define HYPERVISOR_CALLBACK_VECTOR 0xf3
/* Vector for KVM to deliver posted interrupt IPI */ /* Vector for KVM to deliver posted interrupt IPI */
#if IS_ENABLED(CONFIG_KVM)
#define POSTED_INTR_VECTOR 0xf2 #define POSTED_INTR_VECTOR 0xf2
#define POSTED_INTR_WAKEUP_VECTOR 0xf1 #define POSTED_INTR_WAKEUP_VECTOR 0xf1
#define POSTED_INTR_NESTED_VECTOR 0xf0 #define POSTED_INTR_NESTED_VECTOR 0xf0
#endif
#define MANAGED_IRQ_SHUTDOWN_VECTOR 0xef #define MANAGED_IRQ_SHUTDOWN_VECTOR 0xef
......
...@@ -176,6 +176,14 @@ ...@@ -176,6 +176,14 @@
* CPU is not vulnerable to Gather * CPU is not vulnerable to Gather
* Data Sampling (GDS). * Data Sampling (GDS).
*/ */
#define ARCH_CAP_RFDS_NO BIT(27) /*
* Not susceptible to Register
* File Data Sampling.
*/
#define ARCH_CAP_RFDS_CLEAR BIT(28) /*
* VERW clears CPU Register
* File.
*/
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /* #define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
* IA32_XAPIC_DISABLE_STATUS MSR * IA32_XAPIC_DISABLE_STATUS MSR
...@@ -605,34 +613,47 @@ ...@@ -605,34 +613,47 @@
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130 #define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131 #define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0 #define MSR_AMD64_SEV_ENABLED_BIT 0
#define MSR_AMD64_SEV_ES_ENABLED_BIT 1
#define MSR_AMD64_SEV_SNP_ENABLED_BIT 2
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT) #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
#define MSR_AMD64_SEV_ES_ENABLED_BIT 1
#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT) #define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
#define MSR_AMD64_SEV_SNP_ENABLED_BIT 2
#define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT) #define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
#define MSR_AMD64_SNP_VTOM_BIT 3
/* SNP feature bits enabled by the hypervisor */ #define MSR_AMD64_SNP_VTOM BIT_ULL(MSR_AMD64_SNP_VTOM_BIT)
#define MSR_AMD64_SNP_VTOM BIT_ULL(3) #define MSR_AMD64_SNP_REFLECT_VC_BIT 4
#define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(4) #define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(MSR_AMD64_SNP_REFLECT_VC_BIT)
#define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(5) #define MSR_AMD64_SNP_RESTRICTED_INJ_BIT 5
#define MSR_AMD64_SNP_ALT_INJ BIT_ULL(6) #define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(MSR_AMD64_SNP_RESTRICTED_INJ_BIT)
#define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(7) #define MSR_AMD64_SNP_ALT_INJ_BIT 6
#define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(8) #define MSR_AMD64_SNP_ALT_INJ BIT_ULL(MSR_AMD64_SNP_ALT_INJ_BIT)
#define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(9) #define MSR_AMD64_SNP_DEBUG_SWAP_BIT 7
#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(10) #define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(MSR_AMD64_SNP_DEBUG_SWAP_BIT)
#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(11) #define MSR_AMD64_SNP_PREVENT_HOST_IBS_BIT 8
#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(12) #define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(MSR_AMD64_SNP_PREVENT_HOST_IBS_BIT)
#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(14) #define MSR_AMD64_SNP_BTB_ISOLATION_BIT 9
#define MSR_AMD64_SNP_VMSA_REG_PROTECTION BIT_ULL(16) #define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(MSR_AMD64_SNP_BTB_ISOLATION_BIT)
#define MSR_AMD64_SNP_SMT_PROTECTION BIT_ULL(17) #define MSR_AMD64_SNP_VMPL_SSS_BIT 10
#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(MSR_AMD64_SNP_VMPL_SSS_BIT)
/* SNP feature bits reserved for future use. */ #define MSR_AMD64_SNP_SECURE_TSC_BIT 11
#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(MSR_AMD64_SNP_SECURE_TSC_BIT)
#define MSR_AMD64_SNP_VMGEXIT_PARAM_BIT 12
#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(MSR_AMD64_SNP_VMGEXIT_PARAM_BIT)
#define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13) #define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13)
#define MSR_AMD64_SNP_IBS_VIRT_BIT 14
#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(MSR_AMD64_SNP_IBS_VIRT_BIT)
#define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15) #define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15)
#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, 18) #define MSR_AMD64_SNP_VMSA_REG_PROT_BIT 16
#define MSR_AMD64_SNP_VMSA_REG_PROT BIT_ULL(MSR_AMD64_SNP_VMSA_REG_PROT_BIT)
#define MSR_AMD64_SNP_SMT_PROT_BIT 17
#define MSR_AMD64_SNP_SMT_PROT BIT_ULL(MSR_AMD64_SNP_SMT_PROT_BIT)
#define MSR_AMD64_SNP_RESV_BIT 18
#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, MSR_AMD64_SNP_RESV_BIT)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
#define MSR_AMD64_RMP_BASE 0xc0010132
#define MSR_AMD64_RMP_END 0xc0010133
/* AMD Collaborative Processor Performance Control MSRs */ /* AMD Collaborative Processor Performance Control MSRs */
#define MSR_AMD_CPPC_CAP1 0xc00102b0 #define MSR_AMD_CPPC_CAP1 0xc00102b0
#define MSR_AMD_CPPC_ENABLE 0xc00102b1 #define MSR_AMD_CPPC_ENABLE 0xc00102b1
...@@ -721,6 +742,13 @@ ...@@ -721,6 +742,13 @@
#define MSR_AMD64_SYSCFG 0xc0010010 #define MSR_AMD64_SYSCFG 0xc0010010
#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23 #define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT) #define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
#define MSR_AMD64_SYSCFG_SNP_EN_BIT 24
#define MSR_AMD64_SYSCFG_SNP_EN BIT_ULL(MSR_AMD64_SYSCFG_SNP_EN_BIT)
#define MSR_AMD64_SYSCFG_SNP_VMPL_EN_BIT 25
#define MSR_AMD64_SYSCFG_SNP_VMPL_EN BIT_ULL(MSR_AMD64_SYSCFG_SNP_VMPL_EN_BIT)
#define MSR_AMD64_SYSCFG_MFDM_BIT 19
#define MSR_AMD64_SYSCFG_MFDM BIT_ULL(MSR_AMD64_SYSCFG_MFDM_BIT)
#define MSR_K8_INT_PENDING_MSG 0xc0010055 #define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */ /* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000 #define K8_INTP_C1E_ACTIVE_MASK 0x18000000
......
...@@ -99,6 +99,7 @@ ...@@ -99,6 +99,7 @@
#define REQUIRED_MASK18 0 #define REQUIRED_MASK18 0
#define REQUIRED_MASK19 0 #define REQUIRED_MASK19 0
#define REQUIRED_MASK20 0 #define REQUIRED_MASK20 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) #define REQUIRED_MASK21 0
#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 22)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */ #endif /* _ASM_X86_REQUIRED_FEATURES_H */
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
* *
*/ */
#include <linux/const.h>
#include <linux/bits.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/ioctl.h> #include <linux/ioctl.h>
#include <linux/stddef.h> #include <linux/stddef.h>
...@@ -40,7 +42,6 @@ ...@@ -40,7 +42,6 @@
#define __KVM_HAVE_IRQ_LINE #define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_MSI #define __KVM_HAVE_MSI
#define __KVM_HAVE_USER_NMI #define __KVM_HAVE_USER_NMI
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_MSIX #define __KVM_HAVE_MSIX
#define __KVM_HAVE_MCE #define __KVM_HAVE_MCE
#define __KVM_HAVE_PIT_STATE2 #define __KVM_HAVE_PIT_STATE2
...@@ -49,7 +50,6 @@ ...@@ -49,7 +50,6 @@
#define __KVM_HAVE_DEBUGREGS #define __KVM_HAVE_DEBUGREGS
#define __KVM_HAVE_XSAVE #define __KVM_HAVE_XSAVE
#define __KVM_HAVE_XCRS #define __KVM_HAVE_XCRS
#define __KVM_HAVE_READONLY_MEM
/* Architectural interrupt line count. */ /* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256 #define KVM_NR_INTERRUPTS 256
...@@ -526,9 +526,301 @@ struct kvm_pmu_event_filter { ...@@ -526,9 +526,301 @@ struct kvm_pmu_event_filter {
#define KVM_PMU_EVENT_ALLOW 0 #define KVM_PMU_EVENT_ALLOW 0
#define KVM_PMU_EVENT_DENY 1 #define KVM_PMU_EVENT_DENY 1
#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS BIT(0) #define KVM_PMU_EVENT_FLAG_MASKED_EVENTS _BITUL(0)
#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS) #define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS)
/* for KVM_CAP_MCE */
struct kvm_x86_mce {
__u64 status;
__u64 addr;
__u64 misc;
__u64 mcg_status;
__u8 bank;
__u8 pad1[7];
__u64 pad2[3];
};
/* for KVM_CAP_XEN_HVM */
#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA (1 << 8)
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
__u64 blob_addr_32;
__u64 blob_addr_64;
__u8 blob_size_32;
__u8 blob_size_64;
__u8 pad2[30];
};
struct kvm_xen_hvm_attr {
__u16 type;
__u16 pad[3];
union {
__u8 long_mode;
__u8 vector;
__u8 runstate_update_flag;
union {
__u64 gfn;
#define KVM_XEN_INVALID_GFN ((__u64)-1)
__u64 hva;
} shared_info;
struct {
__u32 send_port;
__u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
__u32 flags;
#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
#define KVM_XEN_EVTCHN_RESET (1 << 2)
/*
* Events sent by the guest are either looped back to
* the guest itself (potentially on a different port#)
* or signalled via an eventfd.
*/
union {
struct {
__u32 port;
__u32 vcpu;
__u32 priority;
} port;
struct {
__u32 port; /* Zero for eventfd */
__s32 fd;
} eventfd;
__u32 padding[4];
} deliver;
} evtchn;
__u32 xen_version;
__u64 pad[8];
} u;
};
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */
#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */
#define KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA 0x6
struct kvm_xen_vcpu_attr {
__u16 type;
__u16 pad[3];
union {
__u64 gpa;
#define KVM_XEN_INVALID_GPA ((__u64)-1)
__u64 hva;
__u64 pad[8];
struct {
__u64 state;
__u64 state_entry_time;
__u64 time_running;
__u64 time_runnable;
__u64 time_blocked;
__u64 time_offline;
} runstate;
__u32 vcpu_id;
struct {
__u32 port;
__u32 priority;
__u64 expires_ns;
} timer;
__u8 vector;
} u;
};
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO_HVA 0x9
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
KVM_SEV_INIT = 0,
KVM_SEV_ES_INIT,
/* Guest launch commands */
KVM_SEV_LAUNCH_START,
KVM_SEV_LAUNCH_UPDATE_DATA,
KVM_SEV_LAUNCH_UPDATE_VMSA,
KVM_SEV_LAUNCH_SECRET,
KVM_SEV_LAUNCH_MEASURE,
KVM_SEV_LAUNCH_FINISH,
/* Guest migration commands (outgoing) */
KVM_SEV_SEND_START,
KVM_SEV_SEND_UPDATE_DATA,
KVM_SEV_SEND_UPDATE_VMSA,
KVM_SEV_SEND_FINISH,
/* Guest migration commands (incoming) */
KVM_SEV_RECEIVE_START,
KVM_SEV_RECEIVE_UPDATE_DATA,
KVM_SEV_RECEIVE_UPDATE_VMSA,
KVM_SEV_RECEIVE_FINISH,
/* Guest status and debug commands */
KVM_SEV_GUEST_STATUS,
KVM_SEV_DBG_DECRYPT,
KVM_SEV_DBG_ENCRYPT,
/* Guest certificates commands */
KVM_SEV_CERT_EXPORT,
/* Attestation report */
KVM_SEV_GET_ATTESTATION_REPORT,
/* Guest Migration Extension */
KVM_SEV_SEND_CANCEL,
KVM_SEV_NR_MAX,
};
struct kvm_sev_cmd {
__u32 id;
__u32 pad0;
__u64 data;
__u32 error;
__u32 sev_fd;
};
struct kvm_sev_launch_start {
__u32 handle;
__u32 policy;
__u64 dh_uaddr;
__u32 dh_len;
__u32 pad0;
__u64 session_uaddr;
__u32 session_len;
__u32 pad1;
};
struct kvm_sev_launch_update_data {
__u64 uaddr;
__u32 len;
__u32 pad0;
};
struct kvm_sev_launch_secret {
__u64 hdr_uaddr;
__u32 hdr_len;
__u32 pad0;
__u64 guest_uaddr;
__u32 guest_len;
__u32 pad1;
__u64 trans_uaddr;
__u32 trans_len;
__u32 pad2;
};
struct kvm_sev_launch_measure {
__u64 uaddr;
__u32 len;
__u32 pad0;
};
struct kvm_sev_guest_status {
__u32 handle;
__u32 policy;
__u32 state;
};
struct kvm_sev_dbg {
__u64 src_uaddr;
__u64 dst_uaddr;
__u32 len;
__u32 pad0;
};
struct kvm_sev_attestation_report {
__u8 mnonce[16];
__u64 uaddr;
__u32 len;
__u32 pad0;
};
struct kvm_sev_send_start {
__u32 policy;
__u32 pad0;
__u64 pdh_cert_uaddr;
__u32 pdh_cert_len;
__u32 pad1;
__u64 plat_certs_uaddr;
__u32 plat_certs_len;
__u32 pad2;
__u64 amd_certs_uaddr;
__u32 amd_certs_len;
__u32 pad3;
__u64 session_uaddr;
__u32 session_len;
__u32 pad4;
};
struct kvm_sev_send_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
__u32 pad0;
__u64 guest_uaddr;
__u32 guest_len;
__u32 pad1;
__u64 trans_uaddr;
__u32 trans_len;
__u32 pad2;
};
struct kvm_sev_receive_start {
__u32 handle;
__u32 policy;
__u64 pdh_uaddr;
__u32 pdh_len;
__u32 pad0;
__u64 session_uaddr;
__u32 session_len;
__u32 pad1;
};
struct kvm_sev_receive_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
__u32 pad0;
__u64 guest_uaddr;
__u32 guest_len;
__u32 pad1;
__u64 trans_uaddr;
__u32 trans_len;
__u32 pad2;
};
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
struct kvm_hyperv_eventfd {
__u32 conn_id;
__s32 fd;
__u32 flags;
__u32 padding[3];
};
#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
/* /*
* Masked event layout. * Masked event layout.
* Bits Description * Bits Description
...@@ -549,10 +841,10 @@ struct kvm_pmu_event_filter { ...@@ -549,10 +841,10 @@ struct kvm_pmu_event_filter {
((__u64)(!!(exclude)) << 55)) ((__u64)(!!(exclude)) << 55))
#define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \ #define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \
(GENMASK_ULL(7, 0) | GENMASK_ULL(35, 32)) (__GENMASK_ULL(7, 0) | __GENMASK_ULL(35, 32))
#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (GENMASK_ULL(63, 56)) #define KVM_PMU_MASKED_ENTRY_UMASK_MASK (__GENMASK_ULL(63, 56))
#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (GENMASK_ULL(15, 8)) #define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (__GENMASK_ULL(15, 8))
#define KVM_PMU_MASKED_ENTRY_EXCLUDE (BIT_ULL(55)) #define KVM_PMU_MASKED_ENTRY_EXCLUDE (_BITULL(55))
#define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56) #define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56)
/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */ /* for KVM_{GET,SET,HAS}_DEVICE_ATTR */
...@@ -560,7 +852,7 @@ struct kvm_pmu_event_filter { ...@@ -560,7 +852,7 @@ struct kvm_pmu_event_filter {
#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */ #define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
/* x86-specific KVM_EXIT_HYPERCALL flags. */ /* x86-specific KVM_EXIT_HYPERCALL flags. */
#define KVM_EXIT_HYPERCALL_LONG_MODE BIT(0) #define KVM_EXIT_HYPERCALL_LONG_MODE _BITULL(0)
#define KVM_X86_DEFAULT_VM 0 #define KVM_X86_DEFAULT_VM 0
#define KVM_X86_SW_PROTECTED_VM 1 #define KVM_X86_SW_PROTECTED_VM 1
......
...@@ -5,12 +5,12 @@ ...@@ -5,12 +5,12 @@
#include <asm/types.h> #include <asm/types.h>
/** /**
* __fls - find last (most-significant) set bit in a long word * generic___fls - find last (most-significant) set bit in a long word
* @word: the word to search * @word: the word to search
* *
* Undefined if no set bit exists, so code should check against 0 first. * Undefined if no set bit exists, so code should check against 0 first.
*/ */
static __always_inline unsigned long __fls(unsigned long word) static __always_inline unsigned long generic___fls(unsigned long word)
{ {
int num = BITS_PER_LONG - 1; int num = BITS_PER_LONG - 1;
...@@ -41,4 +41,8 @@ static __always_inline unsigned long __fls(unsigned long word) ...@@ -41,4 +41,8 @@ static __always_inline unsigned long __fls(unsigned long word)
return num; return num;
} }
#ifndef __HAVE_ARCH___FLS
#define __fls(word) generic___fls(word)
#endif
#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */ #endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
...@@ -3,14 +3,14 @@ ...@@ -3,14 +3,14 @@
#define _ASM_GENERIC_BITOPS_FLS_H_ #define _ASM_GENERIC_BITOPS_FLS_H_
/** /**
* fls - find last (most-significant) bit set * generic_fls - find last (most-significant) bit set
* @x: the word to search * @x: the word to search
* *
* This is defined the same way as ffs. * This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/ */
static __always_inline int fls(unsigned int x) static __always_inline int generic_fls(unsigned int x)
{ {
int r = 32; int r = 32;
...@@ -39,4 +39,8 @@ static __always_inline int fls(unsigned int x) ...@@ -39,4 +39,8 @@ static __always_inline int fls(unsigned int x)
return r; return r;
} }
#ifndef __HAVE_ARCH_FLS
#define fls(x) generic_fls(x)
#endif
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ #endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
...@@ -3013,6 +3013,7 @@ struct drm_i915_query_item { ...@@ -3013,6 +3013,7 @@ struct drm_i915_query_item {
* - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions) * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
* - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`) * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
* - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info) * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
* - %DRM_I915_QUERY_GUC_SUBMISSION_VERSION (see struct drm_i915_query_guc_submission_version)
*/ */
__u64 query_id; __u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1 #define DRM_I915_QUERY_TOPOLOGY_INFO 1
...@@ -3021,6 +3022,7 @@ struct drm_i915_query_item { ...@@ -3021,6 +3022,7 @@ struct drm_i915_query_item {
#define DRM_I915_QUERY_MEMORY_REGIONS 4 #define DRM_I915_QUERY_MEMORY_REGIONS 4
#define DRM_I915_QUERY_HWCONFIG_BLOB 5 #define DRM_I915_QUERY_HWCONFIG_BLOB 5
#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6 #define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
#define DRM_I915_QUERY_GUC_SUBMISSION_VERSION 7
/* Must be kept compact -- no holes and well documented */ /* Must be kept compact -- no holes and well documented */
/** /**
...@@ -3566,6 +3568,20 @@ struct drm_i915_query_memory_regions { ...@@ -3566,6 +3568,20 @@ struct drm_i915_query_memory_regions {
struct drm_i915_memory_region_info regions[]; struct drm_i915_memory_region_info regions[];
}; };
/**
* struct drm_i915_query_guc_submission_version - query GuC submission interface version
*/
struct drm_i915_query_guc_submission_version {
/** @branch: Firmware branch version. */
__u32 branch;
/** @major: Firmware major version. */
__u32 major;
/** @minor: Firmware minor version. */
__u32 minor;
/** @patch: Firmware patch version. */
__u32 patch;
};
/** /**
* DOC: GuC HWCONFIG blob uAPI * DOC: GuC HWCONFIG blob uAPI
* *
......
...@@ -64,6 +64,24 @@ struct fstrim_range { ...@@ -64,6 +64,24 @@ struct fstrim_range {
__u64 minlen; __u64 minlen;
}; };
/*
* We include a length field because some filesystems (vfat) have an identifier
* that we do want to expose as a UUID, but doesn't have the standard length.
*
* We use a fixed size buffer beacuse this interface will, by fiat, never
* support "UUIDs" longer than 16 bytes; we don't want to force all downstream
* users to have to deal with that.
*/
struct fsuuid2 {
__u8 len;
__u8 uuid[16];
};
struct fs_sysfs_path {
__u8 len;
__u8 name[128];
};
/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */ /* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */
#define FILE_DEDUPE_RANGE_SAME 0 #define FILE_DEDUPE_RANGE_SAME 0
#define FILE_DEDUPE_RANGE_DIFFERS 1 #define FILE_DEDUPE_RANGE_DIFFERS 1
...@@ -215,6 +233,13 @@ struct fsxattr { ...@@ -215,6 +233,13 @@ struct fsxattr {
#define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr) #define FS_IOC_FSSETXATTR _IOW('X', 32, struct fsxattr)
#define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX]) #define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX])
#define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX]) #define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX])
/* Returns the external filesystem UUID, the same one blkid returns */
#define FS_IOC_GETFSUUID _IOR(0x15, 0, struct fsuuid2)
/*
* Returns the path component under /sys/fs/ that refers to this filesystem;
* also /sys/kernel/debug/ for filesystems with debugfs exports
*/
#define FS_IOC_GETFSSYSFSPATH _IOR(0x15, 1, struct fs_sysfs_path)
/* /*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
...@@ -301,9 +326,12 @@ typedef int __bitwise __kernel_rwf_t; ...@@ -301,9 +326,12 @@ typedef int __bitwise __kernel_rwf_t;
/* per-IO O_APPEND */ /* per-IO O_APPEND */
#define RWF_APPEND ((__force __kernel_rwf_t)0x00000010) #define RWF_APPEND ((__force __kernel_rwf_t)0x00000010)
/* per-IO negation of O_APPEND */
#define RWF_NOAPPEND ((__force __kernel_rwf_t)0x00000020)
/* mask of flags supported by the kernel */ /* mask of flags supported by the kernel */
#define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ #define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
RWF_APPEND) RWF_APPEND | RWF_NOAPPEND)
/* Pagemap ioctl */ /* Pagemap ioctl */
#define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg) #define PAGEMAP_SCAN _IOWR('f', 16, struct pm_scan_arg)
......
...@@ -16,6 +16,11 @@ ...@@ -16,6 +16,11 @@
#define KVM_API_VERSION 12 #define KVM_API_VERSION 12
/*
* Backwards-compatible definitions.
*/
#define __KVM_HAVE_GUEST_DEBUG
/* for KVM_SET_USER_MEMORY_REGION */ /* for KVM_SET_USER_MEMORY_REGION */
struct kvm_userspace_memory_region { struct kvm_userspace_memory_region {
__u32 slot; __u32 slot;
...@@ -85,43 +90,6 @@ struct kvm_pit_config { ...@@ -85,43 +90,6 @@ struct kvm_pit_config {
#define KVM_PIT_SPEAKER_DUMMY 1 #define KVM_PIT_SPEAKER_DUMMY 1
struct kvm_s390_skeys {
__u64 start_gfn;
__u64 count;
__u64 skeydata_addr;
__u32 flags;
__u32 reserved[9];
};
#define KVM_S390_CMMA_PEEK (1 << 0)
/**
* kvm_s390_cmma_log - Used for CMMA migration.
*
* Used both for input and output.
*
* @start_gfn: Guest page number to start from.
* @count: Size of the result buffer.
* @flags: Control operation mode via KVM_S390_CMMA_* flags
* @remaining: Used with KVM_S390_GET_CMMA_BITS. Indicates how many dirty
* pages are still remaining.
* @mask: Used with KVM_S390_SET_CMMA_BITS. Bitmap of bits to actually set
* in the PGSTE.
* @values: Pointer to the values buffer.
*
* Used in KVM_S390_{G,S}ET_CMMA_BITS ioctls.
*/
struct kvm_s390_cmma_log {
__u64 start_gfn;
__u32 count;
__u32 flags;
union {
__u64 remaining;
__u64 mask;
};
__u64 values;
};
struct kvm_hyperv_exit { struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1 #define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2 #define KVM_EXIT_HYPERV_HCALL 2
...@@ -315,11 +283,6 @@ struct kvm_run { ...@@ -315,11 +283,6 @@ struct kvm_run {
__u32 ipb; __u32 ipb;
} s390_sieic; } s390_sieic;
/* KVM_EXIT_S390_RESET */ /* KVM_EXIT_S390_RESET */
#define KVM_S390_RESET_POR 1
#define KVM_S390_RESET_CLEAR 2
#define KVM_S390_RESET_SUBSYSTEM 4
#define KVM_S390_RESET_CPU_INIT 8
#define KVM_S390_RESET_IPL 16
__u64 s390_reset_flags; __u64 s390_reset_flags;
/* KVM_EXIT_S390_UCONTROL */ /* KVM_EXIT_S390_UCONTROL */
struct { struct {
...@@ -536,43 +499,6 @@ struct kvm_translation { ...@@ -536,43 +499,6 @@ struct kvm_translation {
__u8 pad[5]; __u8 pad[5];
}; };
/* for KVM_S390_MEM_OP */
struct kvm_s390_mem_op {
/* in */
__u64 gaddr; /* the guest address */
__u64 flags; /* flags */
__u32 size; /* amount of bytes */
__u32 op; /* type of operation */
__u64 buf; /* buffer in userspace */
union {
struct {
__u8 ar; /* the access register number */
__u8 key; /* access key, ignored if flag unset */
__u8 pad1[6]; /* ignored */
__u64 old_addr; /* ignored if cmpxchg flag unset */
};
__u32 sida_offset; /* offset into the sida */
__u8 reserved[32]; /* ignored */
};
};
/* types for kvm_s390_mem_op->op */
#define KVM_S390_MEMOP_LOGICAL_READ 0
#define KVM_S390_MEMOP_LOGICAL_WRITE 1
#define KVM_S390_MEMOP_SIDA_READ 2
#define KVM_S390_MEMOP_SIDA_WRITE 3
#define KVM_S390_MEMOP_ABSOLUTE_READ 4
#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6
/* flags for kvm_s390_mem_op->flags */
#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0)
#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1)
/* for KVM_INTERRUPT */ /* for KVM_INTERRUPT */
struct kvm_interrupt { struct kvm_interrupt {
/* in */ /* in */
...@@ -637,124 +563,6 @@ struct kvm_mp_state { ...@@ -637,124 +563,6 @@ struct kvm_mp_state {
__u32 mp_state; __u32 mp_state;
}; };
struct kvm_s390_psw {
__u64 mask;
__u64 addr;
};
/* valid values for type in kvm_s390_interrupt */
#define KVM_S390_SIGP_STOP 0xfffe0000u
#define KVM_S390_PROGRAM_INT 0xfffe0001u
#define KVM_S390_SIGP_SET_PREFIX 0xfffe0002u
#define KVM_S390_RESTART 0xfffe0003u
#define KVM_S390_INT_PFAULT_INIT 0xfffe0004u
#define KVM_S390_INT_PFAULT_DONE 0xfffe0005u
#define KVM_S390_MCHK 0xfffe1000u
#define KVM_S390_INT_CLOCK_COMP 0xffff1004u
#define KVM_S390_INT_CPU_TIMER 0xffff1005u
#define KVM_S390_INT_VIRTIO 0xffff2603u
#define KVM_S390_INT_SERVICE 0xffff2401u
#define KVM_S390_INT_EMERGENCY 0xffff1201u
#define KVM_S390_INT_EXTERNAL_CALL 0xffff1202u
/* Anything below 0xfffe0000u is taken by INT_IO */
#define KVM_S390_INT_IO(ai,cssid,ssid,schid) \
(((schid)) | \
((ssid) << 16) | \
((cssid) << 18) | \
((ai) << 26))
#define KVM_S390_INT_IO_MIN 0x00000000u
#define KVM_S390_INT_IO_MAX 0xfffdffffu
#define KVM_S390_INT_IO_AI_MASK 0x04000000u
struct kvm_s390_interrupt {
__u32 type;
__u32 parm;
__u64 parm64;
};
struct kvm_s390_io_info {
__u16 subchannel_id;
__u16 subchannel_nr;
__u32 io_int_parm;
__u32 io_int_word;
};
struct kvm_s390_ext_info {
__u32 ext_params;
__u32 pad;
__u64 ext_params2;
};
struct kvm_s390_pgm_info {
__u64 trans_exc_code;
__u64 mon_code;
__u64 per_address;
__u32 data_exc_code;
__u16 code;
__u16 mon_class_nr;
__u8 per_code;
__u8 per_atmid;
__u8 exc_access_id;
__u8 per_access_id;
__u8 op_access_id;
#define KVM_S390_PGM_FLAGS_ILC_VALID 0x01
#define KVM_S390_PGM_FLAGS_ILC_0 0x02
#define KVM_S390_PGM_FLAGS_ILC_1 0x04
#define KVM_S390_PGM_FLAGS_ILC_MASK 0x06
#define KVM_S390_PGM_FLAGS_NO_REWIND 0x08
__u8 flags;
__u8 pad[2];
};
struct kvm_s390_prefix_info {
__u32 address;
};
struct kvm_s390_extcall_info {
__u16 code;
};
struct kvm_s390_emerg_info {
__u16 code;
};
#define KVM_S390_STOP_FLAG_STORE_STATUS 0x01
struct kvm_s390_stop_info {
__u32 flags;
};
struct kvm_s390_mchk_info {
__u64 cr14;
__u64 mcic;
__u64 failing_storage_address;
__u32 ext_damage_code;
__u32 pad;
__u8 fixed_logout[16];
};
struct kvm_s390_irq {
__u64 type;
union {
struct kvm_s390_io_info io;
struct kvm_s390_ext_info ext;
struct kvm_s390_pgm_info pgm;
struct kvm_s390_emerg_info emerg;
struct kvm_s390_extcall_info extcall;
struct kvm_s390_prefix_info prefix;
struct kvm_s390_stop_info stop;
struct kvm_s390_mchk_info mchk;
char reserved[64];
} u;
};
struct kvm_s390_irq_state {
__u64 buf;
__u32 flags; /* will stay unused for compatibility reasons */
__u32 len;
__u32 reserved[4]; /* will stay unused for compatibility reasons */
};
/* for KVM_SET_GUEST_DEBUG */ /* for KVM_SET_GUEST_DEBUG */
#define KVM_GUESTDBG_ENABLE 0x00000001 #define KVM_GUESTDBG_ENABLE 0x00000001
...@@ -810,50 +618,6 @@ struct kvm_enable_cap { ...@@ -810,50 +618,6 @@ struct kvm_enable_cap {
__u8 pad[64]; __u8 pad[64];
}; };
/* for KVM_PPC_GET_PVINFO */
#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
struct kvm_ppc_pvinfo {
/* out */
__u32 flags;
__u32 hcall[4];
__u8 pad[108];
};
/* for KVM_PPC_GET_SMMU_INFO */
#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
struct kvm_ppc_one_page_size {
__u32 page_shift; /* Page shift (or 0) */
__u32 pte_enc; /* Encoding in the HPTE (>>12) */
};
struct kvm_ppc_one_seg_page_size {
__u32 page_shift; /* Base page shift of segment (or 0) */
__u32 slb_enc; /* SLB encoding for BookS */
struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
#define KVM_PPC_1T_SEGMENTS 0x00000002
#define KVM_PPC_NO_HASH 0x00000004
struct kvm_ppc_smmu_info {
__u64 flags;
__u32 slb_size;
__u16 data_keys; /* # storage keys supported for data */
__u16 instr_keys; /* # storage keys supported for instructions */
struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
};
/* for KVM_PPC_RESIZE_HPT_{PREPARE,COMMIT} */
struct kvm_ppc_resize_hpt {
__u64 flags;
__u32 shift;
__u32 pad;
};
#define KVMIO 0xAE #define KVMIO 0xAE
/* machine type bits, to be used as argument to KVM_CREATE_VM */ /* machine type bits, to be used as argument to KVM_CREATE_VM */
...@@ -923,9 +687,7 @@ struct kvm_ppc_resize_hpt { ...@@ -923,9 +687,7 @@ struct kvm_ppc_resize_hpt {
/* Bug in KVM_SET_USER_MEMORY_REGION fixed: */ /* Bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21 #define KVM_CAP_DESTROY_MEMORY_REGION_WORKS 21
#define KVM_CAP_USER_NMI 22 #define KVM_CAP_USER_NMI 22
#ifdef __KVM_HAVE_GUEST_DEBUG
#define KVM_CAP_SET_GUEST_DEBUG 23 #define KVM_CAP_SET_GUEST_DEBUG 23
#endif
#ifdef __KVM_HAVE_PIT #ifdef __KVM_HAVE_PIT
#define KVM_CAP_REINJECT_CONTROL 24 #define KVM_CAP_REINJECT_CONTROL 24
#endif #endif
...@@ -1156,8 +918,6 @@ struct kvm_ppc_resize_hpt { ...@@ -1156,8 +918,6 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_GUEST_MEMFD 234 #define KVM_CAP_GUEST_MEMFD 234
#define KVM_CAP_VM_TYPES 235 #define KVM_CAP_VM_TYPES 235
#ifdef KVM_CAP_IRQ_ROUTING
struct kvm_irq_routing_irqchip { struct kvm_irq_routing_irqchip {
__u32 irqchip; __u32 irqchip;
__u32 pin; __u32 pin;
...@@ -1222,42 +982,6 @@ struct kvm_irq_routing { ...@@ -1222,42 +982,6 @@ struct kvm_irq_routing {
struct kvm_irq_routing_entry entries[]; struct kvm_irq_routing_entry entries[];
}; };
#endif
#ifdef KVM_CAP_MCE
/* x86 MCE */
struct kvm_x86_mce {
__u64 status;
__u64 addr;
__u64 misc;
__u64 mcg_status;
__u8 bank;
__u8 pad1[7];
__u64 pad2[3];
};
#endif
#ifdef KVM_CAP_XEN_HVM
#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
#define KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE (1 << 7)
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
__u64 blob_addr_32;
__u64 blob_addr_64;
__u8 blob_size_32;
__u8 blob_size_64;
__u8 pad2[30];
};
#endif
#define KVM_IRQFD_FLAG_DEASSIGN (1 << 0) #define KVM_IRQFD_FLAG_DEASSIGN (1 << 0)
/* /*
* Available with KVM_CAP_IRQFD_RESAMPLE * Available with KVM_CAP_IRQFD_RESAMPLE
...@@ -1442,11 +1166,6 @@ struct kvm_vfio_spapr_tce { ...@@ -1442,11 +1166,6 @@ struct kvm_vfio_spapr_tce {
struct kvm_userspace_memory_region2) struct kvm_userspace_memory_region2)
/* enable ucontrol for s390 */ /* enable ucontrol for s390 */
struct kvm_s390_ucas_mapping {
__u64 user_addr;
__u64 vcpu_addr;
__u64 length;
};
#define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping) #define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping)
#define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping) #define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping)
#define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long) #define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long)
...@@ -1641,89 +1360,6 @@ struct kvm_enc_region { ...@@ -1641,89 +1360,6 @@ struct kvm_enc_region {
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3) #define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4) #define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
struct kvm_s390_pv_sec_parm {
__u64 origin;
__u64 length;
};
struct kvm_s390_pv_unp {
__u64 addr;
__u64 size;
__u64 tweak;
};
enum pv_cmd_dmp_id {
KVM_PV_DUMP_INIT,
KVM_PV_DUMP_CONFIG_STOR_STATE,
KVM_PV_DUMP_COMPLETE,
KVM_PV_DUMP_CPU,
};
struct kvm_s390_pv_dmp {
__u64 subcmd;
__u64 buff_addr;
__u64 buff_len;
__u64 gaddr; /* For dump storage state */
__u64 reserved[4];
};
enum pv_cmd_info_id {
KVM_PV_INFO_VM,
KVM_PV_INFO_DUMP,
};
struct kvm_s390_pv_info_dump {
__u64 dump_cpu_buffer_len;
__u64 dump_config_mem_buffer_per_1m;
__u64 dump_config_finalize_len;
};
struct kvm_s390_pv_info_vm {
__u64 inst_calls_list[4];
__u64 max_cpus;
__u64 max_guests;
__u64 max_guest_addr;
__u64 feature_indication;
};
struct kvm_s390_pv_info_header {
__u32 id;
__u32 len_max;
__u32 len_written;
__u32 reserved;
};
struct kvm_s390_pv_info {
struct kvm_s390_pv_info_header header;
union {
struct kvm_s390_pv_info_dump dump;
struct kvm_s390_pv_info_vm vm;
};
};
enum pv_cmd_id {
KVM_PV_ENABLE,
KVM_PV_DISABLE,
KVM_PV_SET_SEC_PARMS,
KVM_PV_UNPACK,
KVM_PV_VERIFY,
KVM_PV_PREP_RESET,
KVM_PV_UNSHARE_ALL,
KVM_PV_INFO,
KVM_PV_DUMP,
KVM_PV_ASYNC_CLEANUP_PREPARE,
KVM_PV_ASYNC_CLEANUP_PERFORM,
};
struct kvm_pv_cmd {
__u32 cmd; /* Command to be executed */
__u16 rc; /* Ultravisor return code */
__u16 rrc; /* Ultravisor return reason code */
__u64 data; /* Data or address */
__u32 flags; /* flags for future extensions. Must be 0 for now */
__u32 reserved[3];
};
/* Available with KVM_CAP_S390_PROTECTED */ /* Available with KVM_CAP_S390_PROTECTED */
#define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd) #define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd)
...@@ -1737,58 +1373,6 @@ struct kvm_pv_cmd { ...@@ -1737,58 +1373,6 @@ struct kvm_pv_cmd {
#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr) #define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr)
#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr) #define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr)
struct kvm_xen_hvm_attr {
__u16 type;
__u16 pad[3];
union {
__u8 long_mode;
__u8 vector;
__u8 runstate_update_flag;
struct {
__u64 gfn;
#define KVM_XEN_INVALID_GFN ((__u64)-1)
} shared_info;
struct {
__u32 send_port;
__u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
__u32 flags;
#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
#define KVM_XEN_EVTCHN_RESET (1 << 2)
/*
* Events sent by the guest are either looped back to
* the guest itself (potentially on a different port#)
* or signalled via an eventfd.
*/
union {
struct {
__u32 port;
__u32 vcpu;
__u32 priority;
} port;
struct {
__u32 port; /* Zero for eventfd */
__s32 fd;
} eventfd;
__u32 padding[4];
} deliver;
} evtchn;
__u32 xen_version;
__u64 pad[8];
} u;
};
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */
#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5
/* Per-vCPU Xen attributes */ /* Per-vCPU Xen attributes */
#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr) #define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr) #define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
...@@ -1799,242 +1383,6 @@ struct kvm_xen_hvm_attr { ...@@ -1799,242 +1383,6 @@ struct kvm_xen_hvm_attr {
#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2) #define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2) #define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
struct kvm_xen_vcpu_attr {
__u16 type;
__u16 pad[3];
union {
__u64 gpa;
#define KVM_XEN_INVALID_GPA ((__u64)-1)
__u64 pad[8];
struct {
__u64 state;
__u64 state_entry_time;
__u64 time_running;
__u64 time_runnable;
__u64 time_blocked;
__u64 time_offline;
} runstate;
__u32 vcpu_id;
struct {
__u32 port;
__u32 priority;
__u64 expires_ns;
} timer;
__u8 vector;
} u;
};
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
KVM_SEV_INIT = 0,
KVM_SEV_ES_INIT,
/* Guest launch commands */
KVM_SEV_LAUNCH_START,
KVM_SEV_LAUNCH_UPDATE_DATA,
KVM_SEV_LAUNCH_UPDATE_VMSA,
KVM_SEV_LAUNCH_SECRET,
KVM_SEV_LAUNCH_MEASURE,
KVM_SEV_LAUNCH_FINISH,
/* Guest migration commands (outgoing) */
KVM_SEV_SEND_START,
KVM_SEV_SEND_UPDATE_DATA,
KVM_SEV_SEND_UPDATE_VMSA,
KVM_SEV_SEND_FINISH,
/* Guest migration commands (incoming) */
KVM_SEV_RECEIVE_START,
KVM_SEV_RECEIVE_UPDATE_DATA,
KVM_SEV_RECEIVE_UPDATE_VMSA,
KVM_SEV_RECEIVE_FINISH,
/* Guest status and debug commands */
KVM_SEV_GUEST_STATUS,
KVM_SEV_DBG_DECRYPT,
KVM_SEV_DBG_ENCRYPT,
/* Guest certificates commands */
KVM_SEV_CERT_EXPORT,
/* Attestation report */
KVM_SEV_GET_ATTESTATION_REPORT,
/* Guest Migration Extension */
KVM_SEV_SEND_CANCEL,
KVM_SEV_NR_MAX,
};
struct kvm_sev_cmd {
__u32 id;
__u64 data;
__u32 error;
__u32 sev_fd;
};
struct kvm_sev_launch_start {
__u32 handle;
__u32 policy;
__u64 dh_uaddr;
__u32 dh_len;
__u64 session_uaddr;
__u32 session_len;
};
struct kvm_sev_launch_update_data {
__u64 uaddr;
__u32 len;
};
struct kvm_sev_launch_secret {
__u64 hdr_uaddr;
__u32 hdr_len;
__u64 guest_uaddr;
__u32 guest_len;
__u64 trans_uaddr;
__u32 trans_len;
};
struct kvm_sev_launch_measure {
__u64 uaddr;
__u32 len;
};
struct kvm_sev_guest_status {
__u32 handle;
__u32 policy;
__u32 state;
};
struct kvm_sev_dbg {
__u64 src_uaddr;
__u64 dst_uaddr;
__u32 len;
};
struct kvm_sev_attestation_report {
__u8 mnonce[16];
__u64 uaddr;
__u32 len;
};
struct kvm_sev_send_start {
__u32 policy;
__u64 pdh_cert_uaddr;
__u32 pdh_cert_len;
__u64 plat_certs_uaddr;
__u32 plat_certs_len;
__u64 amd_certs_uaddr;
__u32 amd_certs_len;
__u64 session_uaddr;
__u32 session_len;
};
struct kvm_sev_send_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
__u64 guest_uaddr;
__u32 guest_len;
__u64 trans_uaddr;
__u32 trans_len;
};
struct kvm_sev_receive_start {
__u32 handle;
__u32 policy;
__u64 pdh_uaddr;
__u32 pdh_len;
__u64 session_uaddr;
__u32 session_len;
};
struct kvm_sev_receive_update_data {
__u64 hdr_uaddr;
__u32 hdr_len;
__u64 guest_uaddr;
__u32 guest_len;
__u64 trans_uaddr;
__u32 trans_len;
};
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
struct kvm_assigned_pci_dev {
__u32 assigned_dev_id;
__u32 busnr;
__u32 devfn;
__u32 flags;
__u32 segnr;
union {
__u32 reserved[11];
};
};
#define KVM_DEV_IRQ_HOST_INTX (1 << 0)
#define KVM_DEV_IRQ_HOST_MSI (1 << 1)
#define KVM_DEV_IRQ_HOST_MSIX (1 << 2)
#define KVM_DEV_IRQ_GUEST_INTX (1 << 8)
#define KVM_DEV_IRQ_GUEST_MSI (1 << 9)
#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10)
#define KVM_DEV_IRQ_HOST_MASK 0x00ff
#define KVM_DEV_IRQ_GUEST_MASK 0xff00
struct kvm_assigned_irq {
__u32 assigned_dev_id;
__u32 host_irq; /* ignored (legacy field) */
__u32 guest_irq;
__u32 flags;
union {
__u32 reserved[12];
};
};
struct kvm_assigned_msix_nr {
__u32 assigned_dev_id;
__u16 entry_nr;
__u16 padding;
};
#define KVM_MAX_MSIX_PER_DEV 256
struct kvm_assigned_msix_entry {
__u32 assigned_dev_id;
__u32 gsi;
__u16 entry; /* The index of entry in the MSI-X table */
__u16 padding[3];
};
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
/* Available with KVM_CAP_ARM_USER_IRQ */
/* Bits for run->s.regs.device_irq_level */
#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
#define KVM_ARM_DEV_PMU (1 << 2)
struct kvm_hyperv_eventfd {
__u32 conn_id;
__s32 fd;
__u32 flags;
__u32 padding[3];
};
#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0) #define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1) #define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
...@@ -2180,33 +1528,6 @@ struct kvm_stats_desc { ...@@ -2180,33 +1528,6 @@ struct kvm_stats_desc {
/* Available with KVM_CAP_S390_ZPCI_OP */ /* Available with KVM_CAP_S390_ZPCI_OP */
#define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op) #define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op)
struct kvm_s390_zpci_op {
/* in */
__u32 fh; /* target device */
__u8 op; /* operation to perform */
__u8 pad[3];
union {
/* for KVM_S390_ZPCIOP_REG_AEN */
struct {
__u64 ibv; /* Guest addr of interrupt bit vector */
__u64 sb; /* Guest addr of summary bit */
__u32 flags;
__u32 noi; /* Number of interrupts */
__u8 isc; /* Guest interrupt subclass */
__u8 sbo; /* Offset of guest summary bit vector */
__u16 pad;
} reg_aen;
__u64 reserved[8];
} u;
};
/* types for kvm_s390_zpci_op->op */
#define KVM_S390_ZPCIOP_REG_AEN 0
#define KVM_S390_ZPCIOP_DEREG_AEN 1
/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
/* Available with KVM_CAP_MEMORY_ATTRIBUTES */ /* Available with KVM_CAP_MEMORY_ATTRIBUTES */
#define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes) #define KVM_SET_MEMORY_ATTRIBUTES _IOW(KVMIO, 0xd2, struct kvm_memory_attributes)
......
...@@ -142,7 +142,7 @@ struct snd_hwdep_dsp_image { ...@@ -142,7 +142,7 @@ struct snd_hwdep_dsp_image {
* * * *
*****************************************************************************/ *****************************************************************************/
#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 16) #define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 17)
typedef unsigned long snd_pcm_uframes_t; typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t; typedef signed long snd_pcm_sframes_t;
...@@ -416,7 +416,7 @@ struct snd_pcm_hw_params { ...@@ -416,7 +416,7 @@ struct snd_pcm_hw_params {
unsigned int rmask; /* W: requested masks */ unsigned int rmask; /* W: requested masks */
unsigned int cmask; /* R: changed masks */ unsigned int cmask; /* R: changed masks */
unsigned int info; /* R: Info flags for returned setup */ unsigned int info; /* R: Info flags for returned setup */
unsigned int msbits; /* R: used most significant bits */ unsigned int msbits; /* R: used most significant bits (in sample bit-width) */
unsigned int rate_num; /* R: rate numerator */ unsigned int rate_num; /* R: rate numerator */
unsigned int rate_den; /* R: rate denominator */ unsigned int rate_den; /* R: rate denominator */
snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */ snd_pcm_uframes_t fifo_size; /* R: chip FIFO size in frames */
......
...@@ -970,7 +970,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, ...@@ -970,7 +970,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel,
if (dso->annotate_warned) if (dso->annotate_warned)
return -1; return -1;
if (not_annotated) { if (not_annotated || !sym->annotate2) {
err = symbol__annotate2(ms, evsel, &browser.arch); err = symbol__annotate2(ms, evsel, &browser.arch);
if (err) { if (err) {
char msg[BUFSIZ]; char msg[BUFSIZ];
......
...@@ -2461,6 +2461,9 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, ...@@ -2461,6 +2461,9 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
if (parch) if (parch)
*parch = arch; *parch = arch;
if (!list_empty(&notes->src->source))
return 0;
args.arch = arch; args.arch = arch;
args.ms = *ms; args.ms = *ms;
if (annotate_opts.full_addr) if (annotate_opts.full_addr)
......
...@@ -284,6 +284,7 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags) ...@@ -284,6 +284,7 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
struct task_struct *curr; struct task_struct *curr;
struct mm_struct___old *mm_old; struct mm_struct___old *mm_old;
struct mm_struct___new *mm_new; struct mm_struct___new *mm_new;
struct sighand_struct *sighand;
switch (flags) { switch (flags) {
case LCB_F_READ: /* rwsem */ case LCB_F_READ: /* rwsem */
...@@ -305,7 +306,9 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags) ...@@ -305,7 +306,9 @@ static inline __u32 check_lock_type(__u64 lock, __u32 flags)
break; break;
case LCB_F_SPIN: /* spinlock */ case LCB_F_SPIN: /* spinlock */
curr = bpf_get_current_task_btf(); curr = bpf_get_current_task_btf();
if (&curr->sighand->siglock == (void *)lock) sighand = curr->sighand;
if (sighand && &sighand->siglock == (void *)lock)
return LCD_F_SIGHAND_LOCK; return LCD_F_SIGHAND_LOCK;
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment