Commit 4557062d authored by Catalin Marinas's avatar Catalin Marinas

Merge branches 'for-next/misc', 'for-next/vmcoreinfo', 'for-next/cpufeature',...

Merge branches 'for-next/misc', 'for-next/vmcoreinfo', 'for-next/cpufeature', 'for-next/acpi', 'for-next/perf', 'for-next/timens', 'for-next/msi-iommu' and 'for-next/trivial' into for-next/core

* for-next/misc:
  : Miscellaneous fixes and cleanups
  arm64: use IRQ_STACK_SIZE instead of THREAD_SIZE for irq stack
  arm64/mm: save memory access in check_and_switch_context() fast switch path
  recordmcount: only record relocation of type R_AARCH64_CALL26 on arm64.
  arm64: Reserve HWCAP2_MTE as (1 << 18)
  arm64/entry: deduplicate SW PAN entry/exit routines
  arm64: s/AMEVTYPE/AMEVTYPER
  arm64/hugetlb: Reserve CMA areas for gigantic pages on 16K and 64K configs
  arm64: stacktrace: Move export for save_stack_trace_tsk()
  smccc: Make constants available to assembly
  arm64/mm: Redefine CONT_{PTE, PMD}_SHIFT
  arm64/defconfig: Enable CONFIG_KEXEC_FILE
  arm64: Document sysctls for emulated deprecated instructions
  arm64/panic: Unify all three existing notifier blocks
  arm64/module: Optimize module load time by optimizing PLT counting

* for-next/vmcoreinfo:
  : Export the virtual and physical address sizes in vmcoreinfo
  arm64/crash_core: Export TCR_EL1.T1SZ in vmcoreinfo
  crash_core, vmcoreinfo: Append 'MAX_PHYSMEM_BITS' to vmcoreinfo

* for-next/cpufeature:
  : CPU feature handling cleanups
  arm64/cpufeature: Validate feature bits spacing in arm64_ftr_regs[]
  arm64/cpufeature: Replace all open bits shift encodings with macros
  arm64/cpufeature: Add remaining feature bits in ID_AA64MMFR2 register
  arm64/cpufeature: Add remaining feature bits in ID_AA64MMFR1 register
  arm64/cpufeature: Add remaining feature bits in ID_AA64MMFR0 register

* for-next/acpi:
  : ACPI updates for arm64
  arm64/acpi: disallow writeable AML opregion mapping for EFI code regions
  arm64/acpi: disallow AML memory opregions to access kernel memory

* for-next/perf:
  : perf updates for arm64
  arm64: perf: Expose some new events via sysfs
  tools headers UAPI: Update tools's copy of linux/perf_event.h
  arm64: perf: Add cap_user_time_short
  perf: Add perf_event_mmap_page::cap_user_time_short ABI
  arm64: perf: Only advertise cap_user_time for arch_timer
  arm64: perf: Implement correct cap_user_time
  time/sched_clock: Use raw_read_seqcount_latch()
  sched_clock: Expose struct clock_read_data
  arm64: perf: Correct the event index in sysfs
  perf/smmuv3: To simplify code for ioremap page in pmcg

* for-next/timens:
  : Time namespace support for arm64
  arm64: enable time namespace support
  arm64/vdso: Restrict splitting VVAR VMA
  arm64/vdso: Handle faults on timens page
  arm64/vdso: Add time namespace page
  arm64/vdso: Zap vvar pages when switching to a time namespace
  arm64/vdso: use the fault callback to map vvar pages

* for-next/msi-iommu:
  : Make the MSI/IOMMU input/output ID translation PCI agnostic, augment the
  : MSI/IOMMU ACPI/OF ID mapping APIs to accept an input ID bus-specific parameter
  : and apply the resulting changes to the device ID space provided by the
  : Freescale FSL bus
  bus: fsl-mc: Add ACPI support for fsl-mc
  bus/fsl-mc: Refactor the MSI domain creation in the DPRC driver
  of/irq: Make of_msi_map_rid() PCI bus agnostic
  of/irq: make of_msi_map_get_device_domain() bus agnostic
  dt-bindings: arm: fsl: Add msi-map device-tree binding for fsl-mc bus
  of/device: Add input id to of_dma_configure()
  of/iommu: Make of_map_rid() PCI agnostic
  ACPI/IORT: Add an input ID to acpi_dma_configure()
  ACPI/IORT: Remove useless PCI bus walk
  ACPI/IORT: Make iort_msi_map_rid() PCI agnostic
  ACPI/IORT: Make iort_get_device_domain IRQ domain agnostic
  ACPI/IORT: Make iort_match_node_callback walk the ACPI namespace for NC

* for-next/trivial:
  : Trivial fixes
  arm64: sigcontext.h: delete duplicated word
  arm64: ptrace.h: delete duplicated word
  arm64: pgtable-hwdef.h: delete duplicated words
...@@ -93,6 +93,11 @@ It exists in the sparse memory mapping model, and it is also somewhat ...@@ -93,6 +93,11 @@ It exists in the sparse memory mapping model, and it is also somewhat
similar to the mem_map variable, both of them are used to translate an similar to the mem_map variable, both of them are used to translate an
address. address.
MAX_PHYSMEM_BITS
----------------
Defines the maximum supported physical address space memory.
page page
---- ----
...@@ -399,6 +404,17 @@ KERNELPACMASK ...@@ -399,6 +404,17 @@ KERNELPACMASK
The mask to extract the Pointer Authentication Code from a kernel virtual The mask to extract the Pointer Authentication Code from a kernel virtual
address. address.
TCR_EL1.T1SZ
------------
Indicates the size offset of the memory region addressed by TTBR1_EL1.
The region size is 2^(64-T1SZ) bytes.
TTBR1_EL1 is the table base address register specified by ARMv8-A
architecture which is used to lookup the page-tables for the Virtual
addresses in the higher VA range (refer to ARMv8 ARM document for
more details).
arm arm
=== ===
......
...@@ -28,6 +28,16 @@ Documentation/devicetree/bindings/iommu/iommu.txt. ...@@ -28,6 +28,16 @@ Documentation/devicetree/bindings/iommu/iommu.txt.
For arm-smmu binding, see: For arm-smmu binding, see:
Documentation/devicetree/bindings/iommu/arm,smmu.yaml. Documentation/devicetree/bindings/iommu/arm,smmu.yaml.
The MSI writes are accompanied by sideband data which is derived from the ICID.
The msi-map property is used to associate the devices with both the ITS
controller and the sideband data which accompanies the writes.
For generic MSI bindings, see
Documentation/devicetree/bindings/interrupt-controller/msi.txt.
For GICv3 and GIC ITS bindings, see:
Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml.
Required properties: Required properties:
- compatible - compatible
...@@ -49,11 +59,6 @@ Required properties: ...@@ -49,11 +59,6 @@ Required properties:
region may not be present in some scenarios, such region may not be present in some scenarios, such
as in the device tree presented to a virtual machine. as in the device tree presented to a virtual machine.
- msi-parent
Value type: <phandle>
Definition: Must be present and point to the MSI controller node
handling message interrupts for the MC.
- ranges - ranges
Value type: <prop-encoded-array> Value type: <prop-encoded-array>
Definition: A standard property. Defines the mapping between the child Definition: A standard property. Defines the mapping between the child
...@@ -119,6 +124,28 @@ Optional properties: ...@@ -119,6 +124,28 @@ Optional properties:
associated with the listed IOMMU, with the iommu-specifier associated with the listed IOMMU, with the iommu-specifier
(i - icid-base + iommu-base). (i - icid-base + iommu-base).
- msi-map: Maps an ICID to a GIC ITS and associated msi-specifier
data.
The property is an arbitrary number of tuples of
(icid-base,gic-its,msi-base,length).
Any ICID in the interval [icid-base, icid-base + length) is
associated with the listed GIC ITS, with the msi-specifier
(i - icid-base + msi-base).
Deprecated properties:
- msi-parent
Value type: <phandle>
Definition: Describes the MSI controller node handling message
interrupts for the MC. When there is no translation
between the ICID and deviceID this property can be used
to describe the MSI controller used by the devices on the
mc-bus.
The use of this property for mc-bus is deprecated. Please
use msi-map.
Example: Example:
smmu: iommu@5000000 { smmu: iommu@5000000 {
...@@ -128,13 +155,24 @@ Example: ...@@ -128,13 +155,24 @@ Example:
... ...
}; };
gic: interrupt-controller@6000000 {
compatible = "arm,gic-v3";
...
}
its: gic-its@6020000 {
compatible = "arm,gic-v3-its";
msi-controller;
...
};
fsl_mc: fsl-mc@80c000000 { fsl_mc: fsl-mc@80c000000 {
compatible = "fsl,qoriq-mc"; compatible = "fsl,qoriq-mc";
reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
<0x00000000 0x08340000 0 0x40000>; /* MC control reg */ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */
msi-parent = <&its>;
/* define map for ICIDs 23-64 */ /* define map for ICIDs 23-64 */
iommu-map = <23 &smmu 23 41>; iommu-map = <23 &smmu 23 41>;
/* define msi map for ICIDs 23-64 */
msi-map = <23 &its 23 41>;
#address-cells = <3>; #address-cells = <3>;
#size-cells = <1>; #size-cells = <1>;
......
...@@ -118,6 +118,7 @@ config ARM64 ...@@ -118,6 +118,7 @@ config ARM64
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select GENERIC_GETTIMEOFDAY select GENERIC_GETTIMEOFDAY
select GENERIC_VDSO_TIME_NS
select HANDLE_DOMAIN_IRQ select HANDLE_DOMAIN_IRQ
select HARDIRQS_SW_RESEND select HARDIRQS_SW_RESEND
select HAVE_PCI select HAVE_PCI
......
...@@ -47,20 +47,7 @@ ...@@ -47,20 +47,7 @@
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr); pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
/* ACPI table mapping after acpi_permanent_mmap is set */ /* ACPI table mapping after acpi_permanent_mmap is set */
static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys, void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
acpi_size size)
{
/* For normal memory we already have a cacheable mapping. */
if (memblock_is_map_memory(phys))
return (void __iomem *)__phys_to_virt(phys);
/*
* We should still honor the memory's attribute here because
* crash dump kernel possibly excludes some ACPI (reclaim)
* regions from memblock list.
*/
return __ioremap(phys, size, __acpi_get_mem_attribute(phys));
}
#define acpi_os_ioremap acpi_os_ioremap #define acpi_os_ioremap acpi_os_ioremap
typedef u64 phys_cpuid_t; typedef u64 phys_cpuid_t;
......
...@@ -72,6 +72,13 @@ ...@@ -72,6 +72,13 @@
#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x36 #define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x36
#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x37 #define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x37
#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x38 #define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x38
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x39
#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x3A
#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x3B
#define ARMV8_PMUV3_PERFCTR_STALL 0x3C
#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x3D
#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x3E
#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x3F
/* Statistical profiling extension microarchitectural events */ /* Statistical profiling extension microarchitectural events */
#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000 #define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000
...@@ -79,6 +86,26 @@ ...@@ -79,6 +86,26 @@
#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002 #define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002
#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003 #define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003
/* AMUv1 architecture events */
#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004
#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005
/* long-latency read miss events */
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B
/* additional latency from alignment events */
#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020
#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021
#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022
/* Armv8.5 Memory Tagging Extension events */
#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024
#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025
#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026
/* ARMv8 recommended implementation defined event types */ /* ARMv8 recommended implementation defined event types */
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41 #define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* Size mapped by an entry at level n ( 0 <= n <= 3) * Size mapped by an entry at level n ( 0 <= n <= 3)
* We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits * We map (PAGE_SHIFT - 3) at all translation levels and PAGE_SHIFT bits
* in the final page. The maximum number of translation levels supported by * in the final page. The maximum number of translation levels supported by
* the architecture is 4. Hence, starting at at level n, we have further * the architecture is 4. Hence, starting at level n, we have further
* ((4 - n) - 1) levels of translation excluding the offset within the page. * ((4 - n) - 1) levels of translation excluding the offset within the page.
* So, the total number of bits mapped by an entry at level n is : * So, the total number of bits mapped by an entry at level n is :
* *
...@@ -98,7 +98,7 @@ ...@@ -98,7 +98,7 @@
#define CONT_PMDS (1 << (CONT_PMD_SHIFT - PMD_SHIFT)) #define CONT_PMDS (1 << (CONT_PMD_SHIFT - PMD_SHIFT))
#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE) #define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1)) #define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1))
/* the the numerical offset of the PTE within a range of CONT_PTES */ /* the numerical offset of the PTE within a range of CONT_PTES */
#define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1)) #define CONT_RANGE_OFFSET(addr) (((addr)>>PAGE_SHIFT)&(CONT_PTES-1))
/* /*
...@@ -216,6 +216,7 @@ ...@@ -216,6 +216,7 @@
#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) #define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
#define TCR_TxSZ_WIDTH 6 #define TCR_TxSZ_WIDTH 6
#define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET) #define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET)
#define TCR_T1SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T1SZ_OFFSET)
#define TCR_EPD0_SHIFT 7 #define TCR_EPD0_SHIFT 7
#define TCR_EPD0_MASK (UL(1) << TCR_EPD0_SHIFT) #define TCR_EPD0_MASK (UL(1) << TCR_EPD0_SHIFT)
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* *
* Some code sections either automatically switch back to PSR.I or explicitly * Some code sections either automatically switch back to PSR.I or explicitly
* require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included * require to not use priority masking. If bit GIC_PRIO_PSR_I_SET is included
* in the the priority mask, it indicates that PSR.I should be set and * in the priority mask, it indicates that PSR.I should be set and
* interrupt disabling temporarily does not rely on IRQ priorities. * interrupt disabling temporarily does not rely on IRQ priorities.
*/ */
#define GIC_PRIO_IRQON 0xe0 #define GIC_PRIO_IRQON 0xe0
......
...@@ -706,6 +706,9 @@ ...@@ -706,6 +706,9 @@
#define ID_AA64ZFR0_SVEVER_SVE2 0x1 #define ID_AA64ZFR0_SVEVER_SVE2 0x1
/* id_aa64mmfr0 */ /* id_aa64mmfr0 */
#define ID_AA64MMFR0_ECV_SHIFT 60
#define ID_AA64MMFR0_FGT_SHIFT 56
#define ID_AA64MMFR0_EXS_SHIFT 44
#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 #define ID_AA64MMFR0_TGRAN4_2_SHIFT 40
#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 #define ID_AA64MMFR0_TGRAN64_2_SHIFT 36
#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 #define ID_AA64MMFR0_TGRAN16_2_SHIFT 32
...@@ -734,6 +737,10 @@ ...@@ -734,6 +737,10 @@
#endif #endif
/* id_aa64mmfr1 */ /* id_aa64mmfr1 */
#define ID_AA64MMFR1_ETS_SHIFT 36
#define ID_AA64MMFR1_TWED_SHIFT 32
#define ID_AA64MMFR1_XNX_SHIFT 28
#define ID_AA64MMFR1_SPECSEI_SHIFT 24
#define ID_AA64MMFR1_PAN_SHIFT 20 #define ID_AA64MMFR1_PAN_SHIFT 20
#define ID_AA64MMFR1_LOR_SHIFT 16 #define ID_AA64MMFR1_LOR_SHIFT 16
#define ID_AA64MMFR1_HPD_SHIFT 12 #define ID_AA64MMFR1_HPD_SHIFT 12
...@@ -746,8 +753,15 @@ ...@@ -746,8 +753,15 @@
/* id_aa64mmfr2 */ /* id_aa64mmfr2 */
#define ID_AA64MMFR2_E0PD_SHIFT 60 #define ID_AA64MMFR2_E0PD_SHIFT 60
#define ID_AA64MMFR2_EVT_SHIFT 56
#define ID_AA64MMFR2_BBM_SHIFT 52
#define ID_AA64MMFR2_TTL_SHIFT 48
#define ID_AA64MMFR2_FWB_SHIFT 40 #define ID_AA64MMFR2_FWB_SHIFT 40
#define ID_AA64MMFR2_IDS_SHIFT 36
#define ID_AA64MMFR2_AT_SHIFT 32 #define ID_AA64MMFR2_AT_SHIFT 32
#define ID_AA64MMFR2_ST_SHIFT 28
#define ID_AA64MMFR2_NV_SHIFT 24
#define ID_AA64MMFR2_CCIDX_SHIFT 20
#define ID_AA64MMFR2_LVA_SHIFT 16 #define ID_AA64MMFR2_LVA_SHIFT 16
#define ID_AA64MMFR2_IESB_SHIFT 12 #define ID_AA64MMFR2_IESB_SHIFT 12
#define ID_AA64MMFR2_LSM_SHIFT 8 #define ID_AA64MMFR2_LSM_SHIFT 8
...@@ -755,6 +769,7 @@ ...@@ -755,6 +769,7 @@
#define ID_AA64MMFR2_CNP_SHIFT 0 #define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */ /* id_aa64dfr0 */
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
#define ID_AA64DFR0_PMSVER_SHIFT 32 #define ID_AA64DFR0_PMSVER_SHIFT 32
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28 #define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20 #define ID_AA64DFR0_WRPS_SHIFT 20
...@@ -807,18 +822,40 @@ ...@@ -807,18 +822,40 @@
#define ID_ISAR6_DP_SHIFT 4 #define ID_ISAR6_DP_SHIFT 4
#define ID_ISAR6_JSCVT_SHIFT 0 #define ID_ISAR6_JSCVT_SHIFT 0
#define ID_MMFR0_INNERSHR_SHIFT 28
#define ID_MMFR0_FCSE_SHIFT 24
#define ID_MMFR0_AUXREG_SHIFT 20
#define ID_MMFR0_TCM_SHIFT 16
#define ID_MMFR0_SHARELVL_SHIFT 12
#define ID_MMFR0_OUTERSHR_SHIFT 8
#define ID_MMFR0_PMSA_SHIFT 4
#define ID_MMFR0_VMSA_SHIFT 0
#define ID_MMFR4_EVT_SHIFT 28 #define ID_MMFR4_EVT_SHIFT 28
#define ID_MMFR4_CCIDX_SHIFT 24 #define ID_MMFR4_CCIDX_SHIFT 24
#define ID_MMFR4_LSM_SHIFT 20 #define ID_MMFR4_LSM_SHIFT 20
#define ID_MMFR4_HPDS_SHIFT 16 #define ID_MMFR4_HPDS_SHIFT 16
#define ID_MMFR4_CNP_SHIFT 12 #define ID_MMFR4_CNP_SHIFT 12
#define ID_MMFR4_XNX_SHIFT 8 #define ID_MMFR4_XNX_SHIFT 8
#define ID_MMFR4_AC2_SHIFT 4
#define ID_MMFR4_SPECSEI_SHIFT 0 #define ID_MMFR4_SPECSEI_SHIFT 0
#define ID_MMFR5_ETS_SHIFT 0 #define ID_MMFR5_ETS_SHIFT 0
#define ID_PFR0_DIT_SHIFT 24 #define ID_PFR0_DIT_SHIFT 24
#define ID_PFR0_CSV2_SHIFT 16 #define ID_PFR0_CSV2_SHIFT 16
#define ID_PFR0_STATE3_SHIFT 12
#define ID_PFR0_STATE2_SHIFT 8
#define ID_PFR0_STATE1_SHIFT 4
#define ID_PFR0_STATE0_SHIFT 0
#define ID_DFR0_PERFMON_SHIFT 24
#define ID_DFR0_MPROFDBG_SHIFT 20
#define ID_DFR0_MMAPTRC_SHIFT 16
#define ID_DFR0_COPTRC_SHIFT 12
#define ID_DFR0_MMAPDBG_SHIFT 8
#define ID_DFR0_COPSDBG_SHIFT 4
#define ID_DFR0_COPDBG_SHIFT 0
#define ID_PFR2_SSBS_SHIFT 4 #define ID_PFR2_SSBS_SHIFT 4
#define ID_PFR2_CSV3_SHIFT 0 #define ID_PFR2_CSV3_SHIFT 0
...@@ -861,6 +898,11 @@ ...@@ -861,6 +898,11 @@
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED #define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
#endif #endif
#define MVFR2_FPMISC_SHIFT 4
#define MVFR2_SIMDMISC_SHIFT 0
#define DCZID_DZP_SHIFT 4
#define DCZID_BS_SHIFT 0
/* /*
* The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
*/ */
#define VDSO_LBASE 0x0 #define VDSO_LBASE 0x0
#define __VVAR_PAGES 2
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <generated/vdso-offsets.h> #include <generated/vdso-offsets.h>
......
...@@ -152,6 +152,18 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void) ...@@ -152,6 +152,18 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
return ret; return ret;
} }
#ifdef CONFIG_TIME_NS
static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
{
const struct vdso_data *ret;
/* See __arch_get_vdso_data(). */
asm volatile("mov %0, %1" : "=r"(ret) : "r"(_timens_data));
return ret;
}
#endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
...@@ -96,6 +96,14 @@ const struct vdso_data *__arch_get_vdso_data(void) ...@@ -96,6 +96,14 @@ const struct vdso_data *__arch_get_vdso_data(void)
return _vdso_data; return _vdso_data;
} }
#ifdef CONFIG_TIME_NS
static __always_inline
const struct vdso_data *__arch_get_timens_vdso_data(void)
{
return _timens_data;
}
#endif
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
...@@ -179,7 +179,7 @@ struct sve_context { ...@@ -179,7 +179,7 @@ struct sve_context {
* The same convention applies when returning from a signal: a caller * The same convention applies when returning from a signal: a caller
* will need to remove or resize the sve_context block if it wants to * will need to remove or resize the sve_context block if it wants to
* make the SVE registers live when they were previously non-live or * make the SVE registers live when they were previously non-live or
* vice-versa. This may require the the caller to allocate fresh * vice-versa. This may require the caller to allocate fresh
* memory and/or move other context blocks in the signal frame. * memory and/or move other context blocks in the signal frame.
* *
* Changing the vector length during signal return is not permitted: * Changing the vector length during signal return is not permitted:
......
...@@ -261,6 +261,81 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr) ...@@ -261,6 +261,81 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
return __pgprot(PROT_DEVICE_nGnRnE); return __pgprot(PROT_DEVICE_nGnRnE);
} }
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
efi_memory_desc_t *md, *region = NULL;
pgprot_t prot;
if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
return NULL;
for_each_efi_memory_desc(md) {
u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
if (phys < md->phys_addr || phys >= end)
continue;
if (phys + size > end) {
pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
return NULL;
}
region = md;
break;
}
/*
* It is fine for AML to remap regions that are not represented in the
* EFI memory map at all, as it only describes normal memory, and MMIO
* regions that require a virtual mapping to make them accessible to
* the EFI runtime services.
*/
prot = __pgprot(PROT_DEVICE_nGnRnE);
if (region) {
switch (region->type) {
case EFI_LOADER_CODE:
case EFI_LOADER_DATA:
case EFI_BOOT_SERVICES_CODE:
case EFI_BOOT_SERVICES_DATA:
case EFI_CONVENTIONAL_MEMORY:
case EFI_PERSISTENT_MEMORY:
pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys);
return NULL;
case EFI_RUNTIME_SERVICES_CODE:
/*
* This would be unusual, but not problematic per se,
* as long as we take care not to create a writable
* mapping for executable code.
*/
prot = PAGE_KERNEL_RO;
break;
case EFI_ACPI_RECLAIM_MEMORY:
/*
* ACPI reclaim memory is used to pass firmware tables
* and other data that is intended for consumption by
* the OS only, which may decide it wants to reclaim
* that memory and use it for something else. We never
* do that, but we usually add it to the linear map
* anyway, in which case we should use the existing
* mapping.
*/
if (memblock_is_map_memory(phys))
return (void __iomem *)__phys_to_virt(phys);
/* fall through */
default:
if (region->attribute & EFI_MEMORY_WB)
prot = PAGE_KERNEL;
else if (region->attribute & EFI_MEMORY_WT)
prot = __pgprot(PROT_NORMAL_WT);
else if (region->attribute & EFI_MEMORY_WC)
prot = __pgprot(PROT_NORMAL_NC);
}
}
return __ioremap(phys, size, prot);
}
/* /*
* Claim Synchronous External Aborts as a firmware first notification. * Claim Synchronous External Aborts as a firmware first notification.
* *
......
...@@ -256,6 +256,9 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { ...@@ -256,6 +256,9 @@ static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ECV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_FGT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EXS_SHIFT, 4, 0),
/* /*
* Page size not being supported at Stage-2 is not fatal. You * Page size not being supported at Stage-2 is not fatal. You
* just give up KVM if PAGE_SIZE isn't supported there. Go fix * just give up KVM if PAGE_SIZE isn't supported there. Go fix
...@@ -299,6 +302,10 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { ...@@ -299,6 +302,10 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
}; };
static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_ETS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_TWED_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_XNX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_SPECSEI_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
...@@ -310,8 +317,15 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { ...@@ -310,8 +317,15 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = { static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EVT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_BBM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_TTL_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IDS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_ST_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_NV_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CCIDX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
...@@ -332,7 +346,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = { ...@@ -332,7 +346,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
* make use of *minLine. * make use of *minLine.
* If we have differing I-cache policies, report it as the weakest - VIPT. * If we have differing I-cache policies, report it as the weakest - VIPT.
*/ */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_L1IP_SHIFT, 2, ICACHE_POLICY_VIPT), /* L1Ip */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -343,19 +357,19 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = { ...@@ -343,19 +357,19 @@ struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
}; };
static const struct arm64_ftr_bits ftr_id_mmfr0[] = { static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_INNERSHR_SHIFT, 4, 0xf),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_FCSE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_AUXREG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_TCM_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_SHARELVL_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_OUTERSHR_SHIFT, 4, 0xf),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_PMSA_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_VMSA_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 36, 4, 0), S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_DOUBLELOCK_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
...@@ -371,14 +385,14 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = { ...@@ -371,14 +385,14 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
}; };
static const struct arm64_ftr_bits ftr_mvfr2[] = { static const struct arm64_ftr_bits ftr_mvfr2[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_FPMISC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_SIMDMISC_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
static const struct arm64_ftr_bits ftr_dczid[] = { static const struct arm64_ftr_bits ftr_dczid[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_DZP_SHIFT, 1, 1),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_BS_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -410,7 +424,8 @@ static const struct arm64_ftr_bits ftr_id_mmfr4[] = { ...@@ -410,7 +424,8 @@ static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_HPDS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_CNP_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_XNX_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_AC2_SHIFT, 4, 0),
/* /*
* SpecSEI = 1 indicates that the PE might generate an SError on an * SpecSEI = 1 indicates that the PE might generate an SError on an
* external abort on speculative read. It is safe to assume that an * external abort on speculative read. It is safe to assume that an
...@@ -452,10 +467,10 @@ static const struct arm64_ftr_bits ftr_id_isar6[] = { ...@@ -452,10 +467,10 @@ static const struct arm64_ftr_bits ftr_id_isar6[] = {
static const struct arm64_ftr_bits ftr_id_pfr0[] = { static const struct arm64_ftr_bits ftr_id_pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_DIT_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE1_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_STATE0_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -479,13 +494,13 @@ static const struct arm64_ftr_bits ftr_id_pfr2[] = { ...@@ -479,13 +494,13 @@ static const struct arm64_ftr_bits ftr_id_pfr2[] = {
static const struct arm64_ftr_bits ftr_id_dfr0[] = { static const struct arm64_ftr_bits ftr_id_dfr0[] = {
/* [31:28] TraceFilt */ /* [31:28] TraceFilt */
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_PERFMON_SHIFT, 4, 0xf),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MPROFDBG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPTRC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPTRC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_MMAPDBG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPSDBG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_COPDBG_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };
...@@ -684,11 +699,52 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new, ...@@ -684,11 +699,52 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
static void __init sort_ftr_regs(void) static void __init sort_ftr_regs(void)
{ {
int i; unsigned int i;
for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) {
const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg;
const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits;
unsigned int j = 0;
/*
* Features here must be sorted in descending order with respect
* to their shift values and should not overlap with each other.
*/
for (; ftr_bits->width != 0; ftr_bits++, j++) {
unsigned int width = ftr_reg->ftr_bits[j].width;
unsigned int shift = ftr_reg->ftr_bits[j].shift;
unsigned int prev_shift;
/* Check that the array is sorted so that we can do the binary search */ WARN((shift + width) > 64,
for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++) "%s has invalid feature at shift %d\n",
ftr_reg->name, shift);
/*
* Skip the first feature. There is nothing to
* compare against for now.
*/
if (j == 0)
continue;
prev_shift = ftr_reg->ftr_bits[j - 1].shift;
WARN((shift + width) > prev_shift,
"%s has feature overlap at shift %d\n",
ftr_reg->name, shift);
}
/*
* Skip the first register. There is nothing to
* compare against for now.
*/
if (i == 0)
continue;
/*
* Registers here must be sorted in ascending order with respect
* to sys_id for subsequent binary search in get_arm64_ftr_reg()
* to work correctly.
*/
BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id); BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
}
} }
/* /*
......
...@@ -7,6 +7,14 @@ ...@@ -7,6 +7,14 @@
#include <linux/crash_core.h> #include <linux/crash_core.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
static inline u64 get_tcr_el1_t1sz(void);
static inline u64 get_tcr_el1_t1sz(void)
{
return (read_sysreg(tcr_el1) & TCR_T1SZ_MASK) >> TCR_T1SZ_OFFSET;
}
void arch_crash_save_vmcoreinfo(void) void arch_crash_save_vmcoreinfo(void)
{ {
...@@ -16,6 +24,8 @@ void arch_crash_save_vmcoreinfo(void) ...@@ -16,6 +24,8 @@ void arch_crash_save_vmcoreinfo(void)
kimage_voffset); kimage_voffset);
vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n", vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
PHYS_OFFSET); PHYS_OFFSET);
vmcoreinfo_append_str("NUMBER(TCR_EL1_T1SZ)=0x%llx\n",
get_tcr_el1_t1sz());
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
vmcoreinfo_append_str("NUMBER(KERNELPACMASK)=0x%llx\n", vmcoreinfo_append_str("NUMBER(KERNELPACMASK)=0x%llx\n",
system_supports_address_auth() ? system_supports_address_auth() ?
......
...@@ -13,12 +13,15 @@ ...@@ -13,12 +13,15 @@
#include <asm/sysreg.h> #include <asm/sysreg.h>
#include <asm/virt.h> #include <asm/virt.h>
#include <clocksource/arm_arch_timer.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/perf/arm_pmu.h> #include <linux/perf/arm_pmu.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/sched_clock.h>
#include <linux/smp.h> #include <linux/smp.h>
/* ARMv8 Cortex-A53 specific event types. */ /* ARMv8 Cortex-A53 specific event types. */
...@@ -155,7 +158,7 @@ armv8pmu_events_sysfs_show(struct device *dev, ...@@ -155,7 +158,7 @@ armv8pmu_events_sysfs_show(struct device *dev,
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
return sprintf(page, "event=0x%03llx\n", pmu_attr->id); return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
} }
#define ARMV8_EVENT_ATTR(name, config) \ #define ARMV8_EVENT_ATTR(name, config) \
...@@ -222,10 +225,29 @@ static struct attribute *armv8_pmuv3_event_attrs[] = { ...@@ -222,10 +225,29 @@ static struct attribute *armv8_pmuv3_event_attrs[] = {
ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD), ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD),
ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD), ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD),
ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD), ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD),
ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD),
ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED),
ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC),
ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL),
ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND),
ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND),
ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT),
ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP), ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP),
ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED), ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED),
ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE), ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE),
ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION), ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION),
ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES),
ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM),
ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS),
ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD),
ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS),
ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD),
ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT),
ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT),
ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT),
ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED),
ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD),
ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR),
NULL, NULL,
}; };
...@@ -244,10 +266,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj, ...@@ -244,10 +266,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj,
test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
return attr->mode; return attr->mode;
pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
return attr->mode; if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
test_bit(id, cpu_pmu->pmceid_ext_bitmap))
return attr->mode;
}
return 0; return 0;
} }
...@@ -1165,28 +1190,54 @@ device_initcall(armv8_pmu_driver_init) ...@@ -1165,28 +1190,54 @@ device_initcall(armv8_pmu_driver_init)
void arch_perf_update_userpage(struct perf_event *event, void arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg, u64 now) struct perf_event_mmap_page *userpg, u64 now)
{ {
u32 freq; struct clock_read_data *rd;
u32 shift; unsigned int seq;
u64 ns;
/* userpg->cap_user_time = 0;
* Internal timekeeping for enabled/running/stopped times userpg->cap_user_time_zero = 0;
* is always computed with the sched_clock. userpg->cap_user_time_short = 0;
*/
freq = arch_timer_get_rate(); do {
userpg->cap_user_time = 1; rd = sched_clock_read_begin(&seq);
if (rd->read_sched_clock != arch_timer_read_counter)
return;
userpg->time_mult = rd->mult;
userpg->time_shift = rd->shift;
userpg->time_zero = rd->epoch_ns;
userpg->time_cycles = rd->epoch_cyc;
userpg->time_mask = rd->sched_clock_mask;
/*
* Subtract the cycle base, such that software that
* doesn't know about cap_user_time_short still 'works'
* assuming no wraps.
*/
ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
userpg->time_zero -= ns;
} while (sched_clock_read_retry(seq));
userpg->time_offset = userpg->time_zero - now;
clocks_calc_mult_shift(&userpg->time_mult, &shift, freq,
NSEC_PER_SEC, 0);
/* /*
* time_shift is not expected to be greater than 31 due to * time_shift is not expected to be greater than 31 due to
* the original published conversion algorithm shifting a * the original published conversion algorithm shifting a
* 32-bit value (now specifies a 64-bit value) - refer * 32-bit value (now specifies a 64-bit value) - refer
* perf_event_mmap_page documentation in perf_event.h. * perf_event_mmap_page documentation in perf_event.h.
*/ */
if (shift == 32) { if (userpg->time_shift == 32) {
shift = 31; userpg->time_shift = 31;
userpg->time_mult >>= 1; userpg->time_mult >>= 1;
} }
userpg->time_shift = (u16)shift;
userpg->time_offset = -now; /*
* Internal timekeeping for enabled/running/stopped times
* is always computed with the sched_clock.
*/
userpg->cap_user_time = 1;
userpg->cap_user_time_zero = 1;
userpg->cap_user_time_short = 1;
} }
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/time_namespace.h>
#include <linux/timekeeper_internal.h> #include <linux/timekeeper_internal.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <vdso/datapage.h> #include <vdso/datapage.h>
...@@ -40,6 +41,12 @@ enum vdso_abi { ...@@ -40,6 +41,12 @@ enum vdso_abi {
#endif /* CONFIG_COMPAT_VDSO */ #endif /* CONFIG_COMPAT_VDSO */
}; };
enum vvar_pages {
VVAR_DATA_PAGE_OFFSET,
VVAR_TIMENS_PAGE_OFFSET,
VVAR_NR_PAGES,
};
struct vdso_abi_info { struct vdso_abi_info {
const char *name; const char *name;
const char *vdso_code_start; const char *vdso_code_start;
...@@ -107,25 +114,122 @@ static int __vdso_init(enum vdso_abi abi) ...@@ -107,25 +114,122 @@ static int __vdso_init(enum vdso_abi abi)
vdso_info[abi].vdso_code_start) >> vdso_info[abi].vdso_code_start) >>
PAGE_SHIFT; PAGE_SHIFT;
/* Allocate the vDSO pagelist, plus a page for the data. */ vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages + 1,
sizeof(struct page *), sizeof(struct page *),
GFP_KERNEL); GFP_KERNEL);
if (vdso_pagelist == NULL) if (vdso_pagelist == NULL)
return -ENOMEM; return -ENOMEM;
/* Grab the vDSO data page. */
vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
/* Grab the vDSO code pages. */ /* Grab the vDSO code pages. */
pfn = sym_to_pfn(vdso_info[abi].vdso_code_start); pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
for (i = 0; i < vdso_info[abi].vdso_pages; i++) for (i = 0; i < vdso_info[abi].vdso_pages; i++)
vdso_pagelist[i + 1] = pfn_to_page(pfn + i); vdso_pagelist[i] = pfn_to_page(pfn + i);
vdso_info[abi].cm->pages = vdso_pagelist;
return 0;
}
#ifdef CONFIG_TIME_NS
struct vdso_data *arch_get_vdso_data(void *vvar_page)
{
return (struct vdso_data *)(vvar_page);
}
/*
* The vvar mapping contains data for a specific time namespace, so when a task
* changes namespace we must unmap its vvar data for the old namespace.
* Subsequent faults will map in data for the new namespace.
*
* For more details see timens_setup_vdso_data().
*/
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
struct mm_struct *mm = task->mm;
struct vm_area_struct *vma;
mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long size = vma->vm_end - vma->vm_start;
vdso_info[abi].dm->pages = &vdso_pagelist[0]; if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
vdso_info[abi].cm->pages = &vdso_pagelist[1]; zap_page_range(vma, vma->vm_start, size);
#ifdef CONFIG_COMPAT_VDSO
if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
zap_page_range(vma, vma->vm_start, size);
#endif
}
mmap_read_unlock(mm);
return 0;
}
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
{
if (likely(vma->vm_mm == current->mm))
return current->nsproxy->time_ns->vvar_page;
/*
* VM_PFNMAP | VM_IO protect .fault() handler from being called
* through interfaces like /proc/$pid/mem or
* process_vm_{readv,writev}() as long as there's no .access()
* in special_mapping_vmops.
* For more details check_vma_flags() and __access_remote_vm()
*/
WARN(1, "vvar_page accessed remotely");
return NULL;
}
#else
static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
{
return NULL;
}
#endif
static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *timens_page = find_timens_vvar_page(vma);
unsigned long pfn;
switch (vmf->pgoff) {
case VVAR_DATA_PAGE_OFFSET:
if (timens_page)
pfn = page_to_pfn(timens_page);
else
pfn = sym_to_pfn(vdso_data);
break;
#ifdef CONFIG_TIME_NS
case VVAR_TIMENS_PAGE_OFFSET:
/*
* If a task belongs to a time namespace then a namespace
* specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
* the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
* offset.
* See also the comment near timens_setup_vdso_data().
*/
if (!timens_page)
return VM_FAULT_SIGBUS;
pfn = sym_to_pfn(vdso_data);
break;
#endif /* CONFIG_TIME_NS */
default:
return VM_FAULT_SIGBUS;
}
return vmf_insert_pfn(vma, vmf->address, pfn);
}
static int vvar_mremap(const struct vm_special_mapping *sm,
struct vm_area_struct *new_vma)
{
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
return -EINVAL;
return 0; return 0;
} }
...@@ -139,9 +243,11 @@ static int __setup_additional_pages(enum vdso_abi abi, ...@@ -139,9 +243,11 @@ static int __setup_additional_pages(enum vdso_abi abi,
unsigned long gp_flags = 0; unsigned long gp_flags = 0;
void *ret; void *ret;
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT; vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
/* Be sure to map the data page */ /* Be sure to map the data page */
vdso_mapping_len = vdso_text_len + PAGE_SIZE; vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
if (IS_ERR_VALUE(vdso_base)) { if (IS_ERR_VALUE(vdso_base)) {
...@@ -149,8 +255,8 @@ static int __setup_additional_pages(enum vdso_abi abi, ...@@ -149,8 +255,8 @@ static int __setup_additional_pages(enum vdso_abi abi,
goto up_fail; goto up_fail;
} }
ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
VM_READ|VM_MAYREAD, VM_READ|VM_MAYREAD|VM_PFNMAP,
vdso_info[abi].dm); vdso_info[abi].dm);
if (IS_ERR(ret)) if (IS_ERR(ret))
goto up_fail; goto up_fail;
...@@ -158,7 +264,7 @@ static int __setup_additional_pages(enum vdso_abi abi, ...@@ -158,7 +264,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti()) if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
gp_flags = VM_ARM64_BTI; gp_flags = VM_ARM64_BTI;
vdso_base += PAGE_SIZE; vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
mm->context.vdso = (void *)vdso_base; mm->context.vdso = (void *)vdso_base;
ret = _install_special_mapping(mm, vdso_base, vdso_text_len, ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
VM_READ|VM_EXEC|gp_flags| VM_READ|VM_EXEC|gp_flags|
...@@ -206,6 +312,8 @@ static struct vm_special_mapping aarch32_vdso_maps[] = { ...@@ -206,6 +312,8 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
#ifdef CONFIG_COMPAT_VDSO #ifdef CONFIG_COMPAT_VDSO
[AA32_MAP_VVAR] = { [AA32_MAP_VVAR] = {
.name = "[vvar]", .name = "[vvar]",
.fault = vvar_fault,
.mremap = vvar_mremap,
}, },
[AA32_MAP_VDSO] = { [AA32_MAP_VDSO] = {
.name = "[vdso]", .name = "[vdso]",
...@@ -371,6 +479,8 @@ enum aarch64_map { ...@@ -371,6 +479,8 @@ enum aarch64_map {
static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = { static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
[AA64_MAP_VVAR] = { [AA64_MAP_VVAR] = {
.name = "[vvar]", .name = "[vvar]",
.fault = vvar_fault,
.mremap = vvar_mremap,
}, },
[AA64_MAP_VDSO] = { [AA64_MAP_VDSO] = {
.name = "[vdso]", .name = "[vdso]",
......
...@@ -17,7 +17,10 @@ OUTPUT_ARCH(aarch64) ...@@ -17,7 +17,10 @@ OUTPUT_ARCH(aarch64)
SECTIONS SECTIONS
{ {
PROVIDE(_vdso_data = . - PAGE_SIZE); PROVIDE(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
#ifdef CONFIG_TIME_NS
PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
#endif
. = VDSO_LBASE + SIZEOF_HEADERS; . = VDSO_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text .hash : { *(.hash) } :text
......
...@@ -17,7 +17,10 @@ OUTPUT_ARCH(arm) ...@@ -17,7 +17,10 @@ OUTPUT_ARCH(arm)
SECTIONS SECTIONS
{ {
PROVIDE_HIDDEN(_vdso_data = . - PAGE_SIZE); PROVIDE_HIDDEN(_vdso_data = . - __VVAR_PAGES * PAGE_SIZE);
#ifdef CONFIG_TIME_NS
PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE);
#endif
. = VDSO_LBASE + SIZEOF_HEADERS; . = VDSO_LBASE + SIZEOF_HEADERS;
.hash : { *(.hash) } :text .hash : { *(.hash) } :text
......
...@@ -264,15 +264,31 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node, ...@@ -264,15 +264,31 @@ static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) { if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_device *adev = to_acpi_device_node(dev->fwnode); struct acpi_device *adev;
struct acpi_iort_named_component *ncomp; struct acpi_iort_named_component *ncomp;
struct device *nc_dev = dev;
/*
* Walk the device tree to find a device with an
* ACPI companion; there is no point in scanning
* IORT for a device matching a named component if
* the device does not have an ACPI companion to
* start with.
*/
do {
adev = ACPI_COMPANION(nc_dev);
if (adev)
break;
nc_dev = nc_dev->parent;
} while (nc_dev);
if (!adev) if (!adev)
goto out; goto out;
status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
dev_warn(dev, "Can't get device full path name\n"); dev_warn(nc_dev, "Can't get device full path name\n");
goto out; goto out;
} }
...@@ -534,7 +550,6 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev) ...@@ -534,7 +550,6 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
node = iort_get_iort_node(dev->fwnode); node = iort_get_iort_node(dev->fwnode);
if (node) if (node)
return node; return node;
/* /*
* if not, then it should be a platform device defined in * if not, then it should be a platform device defined in
* DSDT/SSDT (with Named Component node in IORT) * DSDT/SSDT (with Named Component node in IORT)
...@@ -543,32 +558,29 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev) ...@@ -543,32 +558,29 @@ static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
iort_match_node_callback, dev); iort_match_node_callback, dev);
} }
/* Find a PCI root bus */
pbus = to_pci_dev(dev)->bus; pbus = to_pci_dev(dev)->bus;
while (!pci_is_root_bus(pbus))
pbus = pbus->parent;
return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX, return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &pbus->dev); iort_match_node_callback, &pbus->dev);
} }
/** /**
* iort_msi_map_rid() - Map a MSI requester ID for a device * iort_msi_map_id() - Map a MSI input ID for a device
* @dev: The device for which the mapping is to be done. * @dev: The device for which the mapping is to be done.
* @req_id: The device requester ID. * @input_id: The device input ID.
* *
* Returns: mapped MSI RID on success, input requester ID otherwise * Returns: mapped MSI ID on success, input ID otherwise
*/ */
u32 iort_msi_map_rid(struct device *dev, u32 req_id) u32 iort_msi_map_id(struct device *dev, u32 input_id)
{ {
struct acpi_iort_node *node; struct acpi_iort_node *node;
u32 dev_id; u32 dev_id;
node = iort_find_dev_node(dev); node = iort_find_dev_node(dev);
if (!node) if (!node)
return req_id; return input_id;
iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE); iort_node_map_id(node, input_id, &dev_id, IORT_MSI_TYPE);
return dev_id; return dev_id;
} }
...@@ -625,13 +637,13 @@ static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base) ...@@ -625,13 +637,13 @@ static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
/** /**
* iort_dev_find_its_id() - Find the ITS identifier for a device * iort_dev_find_its_id() - Find the ITS identifier for a device
* @dev: The device. * @dev: The device.
* @req_id: Device's requester ID * @id: Device's ID
* @idx: Index of the ITS identifier list. * @idx: Index of the ITS identifier list.
* @its_id: ITS identifier. * @its_id: ITS identifier.
* *
* Returns: 0 on success, appropriate error value otherwise * Returns: 0 on success, appropriate error value otherwise
*/ */
static int iort_dev_find_its_id(struct device *dev, u32 req_id, static int iort_dev_find_its_id(struct device *dev, u32 id,
unsigned int idx, int *its_id) unsigned int idx, int *its_id)
{ {
struct acpi_iort_its_group *its; struct acpi_iort_its_group *its;
...@@ -641,7 +653,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, ...@@ -641,7 +653,7 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
if (!node) if (!node)
return -ENXIO; return -ENXIO;
node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE); node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE);
if (!node) if (!node)
return -ENXIO; return -ENXIO;
...@@ -664,19 +676,20 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id, ...@@ -664,19 +676,20 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
* *
* Returns: the MSI domain for this device, NULL otherwise * Returns: the MSI domain for this device, NULL otherwise
*/ */
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id) struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token)
{ {
struct fwnode_handle *handle; struct fwnode_handle *handle;
int its_id; int its_id;
if (iort_dev_find_its_id(dev, req_id, 0, &its_id)) if (iort_dev_find_its_id(dev, id, 0, &its_id))
return NULL; return NULL;
handle = iort_find_domain_token(its_id); handle = iort_find_domain_token(its_id);
if (!handle) if (!handle)
return NULL; return NULL;
return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI); return irq_find_matching_fwnode(handle, bus_token);
} }
static void iort_set_device_domain(struct device *dev, static void iort_set_device_domain(struct device *dev,
...@@ -965,19 +978,54 @@ static void iort_named_component_init(struct device *dev, ...@@ -965,19 +978,54 @@ static void iort_named_component_init(struct device *dev,
nc->node_flags); nc->node_flags);
} }
static int iort_nc_iommu_map(struct device *dev, struct acpi_iort_node *node)
{
struct acpi_iort_node *parent;
int err = -ENODEV, i = 0;
u32 streamid = 0;
do {
parent = iort_node_map_platform_id(node, &streamid,
IORT_IOMMU_TYPE,
i++);
if (parent)
err = iort_iommu_xlate(dev, parent, streamid);
} while (parent && !err);
return err;
}
static int iort_nc_iommu_map_id(struct device *dev,
struct acpi_iort_node *node,
const u32 *in_id)
{
struct acpi_iort_node *parent;
u32 streamid;
parent = iort_node_map_id(node, *in_id, &streamid, IORT_IOMMU_TYPE);
if (parent)
return iort_iommu_xlate(dev, parent, streamid);
return -ENODEV;
}
/** /**
* iort_iommu_configure - Set-up IOMMU configuration for a device. * iort_iommu_configure_id - Set-up IOMMU configuration for a device.
* *
* @dev: device to configure * @dev: device to configure
* @id_in: optional input id const value pointer
* *
* Returns: iommu_ops pointer on configuration success * Returns: iommu_ops pointer on configuration success
* NULL on configuration failure * NULL on configuration failure
*/ */
const struct iommu_ops *iort_iommu_configure(struct device *dev) const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
const u32 *id_in)
{ {
struct acpi_iort_node *node, *parent; struct acpi_iort_node *node;
const struct iommu_ops *ops; const struct iommu_ops *ops;
u32 streamid = 0;
int err = -ENODEV; int err = -ENODEV;
/* /*
...@@ -1006,21 +1054,13 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) ...@@ -1006,21 +1054,13 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (fwspec && iort_pci_rc_supports_ats(node)) if (fwspec && iort_pci_rc_supports_ats(node))
fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else { } else {
int i = 0;
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT, node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev); iort_match_node_callback, dev);
if (!node) if (!node)
return NULL; return NULL;
do { err = id_in ? iort_nc_iommu_map_id(dev, node, id_in) :
parent = iort_node_map_platform_id(node, &streamid, iort_nc_iommu_map(dev, node);
IORT_IOMMU_TYPE,
i++);
if (parent)
err = iort_iommu_xlate(dev, parent, streamid);
} while (parent && !err);
if (!err) if (!err)
iort_named_component_init(dev, node); iort_named_component_init(dev, node);
...@@ -1045,6 +1085,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) ...@@ -1045,6 +1085,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
return ops; return ops;
} }
#else #else
static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev) static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
{ return NULL; } { return NULL; }
...@@ -1053,7 +1094,8 @@ static inline int iort_add_device_replay(const struct iommu_ops *ops, ...@@ -1053,7 +1094,8 @@ static inline int iort_add_device_replay(const struct iommu_ops *ops,
{ return 0; } { return 0; }
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ return 0; } { return 0; }
const struct iommu_ops *iort_iommu_configure(struct device *dev) const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
const u32 *input_id)
{ return NULL; } { return NULL; }
#endif #endif
......
...@@ -1457,8 +1457,10 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, ...@@ -1457,8 +1457,10 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
* acpi_dma_configure - Set-up DMA configuration for the device. * acpi_dma_configure - Set-up DMA configuration for the device.
* @dev: The pointer to the device * @dev: The pointer to the device
* @attr: device dma attributes * @attr: device dma attributes
* @input_id: input device id const value pointer
*/ */
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id)
{ {
const struct iommu_ops *iommu; const struct iommu_ops *iommu;
u64 dma_addr = 0, size = 0; u64 dma_addr = 0, size = 0;
...@@ -1470,7 +1472,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) ...@@ -1470,7 +1472,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
iort_dma_setup(dev, &dma_addr, &size); iort_dma_setup(dev, &dma_addr, &size);
iommu = iort_iommu_configure(dev); iommu = iort_iommu_configure_id(dev, input_id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER; return -EPROBE_DEFER;
...@@ -1479,7 +1481,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr) ...@@ -1479,7 +1481,7 @@ int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(acpi_dma_configure); EXPORT_SYMBOL_GPL(acpi_dma_configure_id);
static void acpi_init_coherency(struct acpi_device *adev) static void acpi_init_coherency(struct acpi_device *adev)
{ {
......
...@@ -592,6 +592,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) ...@@ -592,6 +592,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
bool mc_io_created = false; bool mc_io_created = false;
bool msi_domain_set = false; bool msi_domain_set = false;
u16 major_ver, minor_ver; u16 major_ver, minor_ver;
struct irq_domain *mc_msi_domain;
if (!is_fsl_mc_bus_dprc(mc_dev)) if (!is_fsl_mc_bus_dprc(mc_dev))
return -EINVAL; return -EINVAL;
...@@ -621,31 +622,15 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) ...@@ -621,31 +622,15 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
return error; return error;
mc_io_created = true; mc_io_created = true;
}
/* mc_msi_domain = fsl_mc_find_msi_domain(&mc_dev->dev);
* Inherit parent MSI domain: if (!mc_msi_domain) {
*/ dev_warn(&mc_dev->dev,
dev_set_msi_domain(&mc_dev->dev, "WARNING: MC bus without interrupt support\n");
dev_get_msi_domain(parent_dev));
msi_domain_set = true;
} else { } else {
/* dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
* This is a root DPRC msi_domain_set = true;
*/
struct irq_domain *mc_msi_domain;
if (dev_is_fsl_mc(parent_dev))
return -EINVAL;
error = fsl_mc_find_msi_domain(parent_dev,
&mc_msi_domain);
if (error < 0) {
dev_warn(&mc_dev->dev,
"WARNING: MC bus without interrupt support\n");
} else {
dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
msi_domain_set = true;
}
} }
error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/acpi.h>
#include <linux/iommu.h>
#include "fsl-mc-private.h" #include "fsl-mc-private.h"
...@@ -38,6 +40,7 @@ struct fsl_mc { ...@@ -38,6 +40,7 @@ struct fsl_mc {
struct fsl_mc_device *root_mc_bus_dev; struct fsl_mc_device *root_mc_bus_dev;
u8 num_translation_ranges; u8 num_translation_ranges;
struct fsl_mc_addr_translation_range *translation_ranges; struct fsl_mc_addr_translation_range *translation_ranges;
void *fsl_mc_regs;
}; };
/** /**
...@@ -56,6 +59,10 @@ struct fsl_mc_addr_translation_range { ...@@ -56,6 +59,10 @@ struct fsl_mc_addr_translation_range {
phys_addr_t start_phys_addr; phys_addr_t start_phys_addr;
}; };
#define FSL_MC_FAPR 0x28
#define MC_FAPR_PL BIT(18)
#define MC_FAPR_BMT BIT(17)
/** /**
* fsl_mc_bus_match - device to driver matching callback * fsl_mc_bus_match - device to driver matching callback
* @dev: the fsl-mc device to match against * @dev: the fsl-mc device to match against
...@@ -118,11 +125,16 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) ...@@ -118,11 +125,16 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
static int fsl_mc_dma_configure(struct device *dev) static int fsl_mc_dma_configure(struct device *dev)
{ {
struct device *dma_dev = dev; struct device *dma_dev = dev;
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
u32 input_id = mc_dev->icid;
while (dev_is_fsl_mc(dma_dev)) while (dev_is_fsl_mc(dma_dev))
dma_dev = dma_dev->parent; dma_dev = dma_dev->parent;
return of_dma_configure(dev, dma_dev->of_node, 0); if (dev_of_node(dma_dev))
return of_dma_configure_id(dev, dma_dev->of_node, 0, &input_id);
return acpi_dma_configure_id(dev, DEV_DMA_COHERENT, &input_id);
} }
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
...@@ -368,8 +380,8 @@ EXPORT_SYMBOL_GPL(fsl_mc_get_version); ...@@ -368,8 +380,8 @@ EXPORT_SYMBOL_GPL(fsl_mc_get_version);
/** /**
* fsl_mc_get_root_dprc - function to traverse to the root dprc * fsl_mc_get_root_dprc - function to traverse to the root dprc
*/ */
static void fsl_mc_get_root_dprc(struct device *dev, void fsl_mc_get_root_dprc(struct device *dev,
struct device **root_dprc_dev) struct device **root_dprc_dev)
{ {
if (!dev) { if (!dev) {
*root_dprc_dev = NULL; *root_dprc_dev = NULL;
...@@ -863,8 +875,11 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) ...@@ -863,8 +875,11 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
struct fsl_mc_io *mc_io = NULL; struct fsl_mc_io *mc_io = NULL;
int container_id; int container_id;
phys_addr_t mc_portal_phys_addr; phys_addr_t mc_portal_phys_addr;
u32 mc_portal_size; u32 mc_portal_size, mc_stream_id;
struct resource res; struct resource *plat_res;
if (!iommu_present(&fsl_mc_bus_type))
return -EPROBE_DEFER;
mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL);
if (!mc) if (!mc)
...@@ -872,19 +887,33 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) ...@@ -872,19 +887,33 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mc); platform_set_drvdata(pdev, mc);
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
mc->fsl_mc_regs = devm_ioremap_resource(&pdev->dev, plat_res);
if (IS_ERR(mc->fsl_mc_regs))
return PTR_ERR(mc->fsl_mc_regs);
if (IS_ENABLED(CONFIG_ACPI) && !dev_of_node(&pdev->dev)) {
mc_stream_id = readl(mc->fsl_mc_regs + FSL_MC_FAPR);
/*
* HW ORs the PL and BMT bit, places the result in bit 15 of
* the StreamID and ORs in the ICID. Calculate it accordingly.
*/
mc_stream_id = (mc_stream_id & 0xffff) |
((mc_stream_id & (MC_FAPR_PL | MC_FAPR_BMT)) ?
0x4000 : 0);
error = acpi_dma_configure_id(&pdev->dev, DEV_DMA_COHERENT,
&mc_stream_id);
if (error)
dev_warn(&pdev->dev, "failed to configure dma: %d.\n",
error);
}
/* /*
* Get physical address of MC portal for the root DPRC: * Get physical address of MC portal for the root DPRC:
*/ */
error = of_address_to_resource(pdev->dev.of_node, 0, &res); plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (error < 0) { mc_portal_phys_addr = plat_res->start;
dev_err(&pdev->dev, mc_portal_size = resource_size(plat_res);
"of_address_to_resource() failed for %pOF\n",
pdev->dev.of_node);
return error;
}
mc_portal_phys_addr = res.start;
mc_portal_size = resource_size(&res);
error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr, error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr,
mc_portal_size, NULL, mc_portal_size, NULL,
FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io); FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, &mc_io);
...@@ -901,11 +930,13 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) ...@@ -901,11 +930,13 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n", dev_info(&pdev->dev, "MC firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision); mc_version.major, mc_version.minor, mc_version.revision);
error = get_mc_addr_translation_ranges(&pdev->dev, if (dev_of_node(&pdev->dev)) {
&mc->translation_ranges, error = get_mc_addr_translation_ranges(&pdev->dev,
&mc->num_translation_ranges); &mc->translation_ranges,
if (error < 0) &mc->num_translation_ranges);
goto error_cleanup_mc_io; if (error < 0)
goto error_cleanup_mc_io;
}
error = dprc_get_container_id(mc_io, 0, &container_id); error = dprc_get_container_id(mc_io, 0, &container_id);
if (error < 0) { if (error < 0) {
...@@ -932,6 +963,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev) ...@@ -932,6 +963,7 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
goto error_cleanup_mc_io; goto error_cleanup_mc_io;
mc->root_mc_bus_dev = mc_bus_dev; mc->root_mc_bus_dev = mc_bus_dev;
mc_bus_dev->dev.fwnode = pdev->dev.fwnode;
return 0; return 0;
error_cleanup_mc_io: error_cleanup_mc_io:
...@@ -965,11 +997,18 @@ static const struct of_device_id fsl_mc_bus_match_table[] = { ...@@ -965,11 +997,18 @@ static const struct of_device_id fsl_mc_bus_match_table[] = {
MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table); MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table);
static const struct acpi_device_id fsl_mc_bus_acpi_match_table[] = {
{"NXP0008", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, fsl_mc_bus_acpi_match_table);
static struct platform_driver fsl_mc_bus_driver = { static struct platform_driver fsl_mc_bus_driver = {
.driver = { .driver = {
.name = "fsl_mc_bus", .name = "fsl_mc_bus",
.pm = NULL, .pm = NULL,
.of_match_table = fsl_mc_bus_match_table, .of_match_table = fsl_mc_bus_match_table,
.acpi_match_table = fsl_mc_bus_acpi_match_table,
}, },
.probe = fsl_mc_bus_probe, .probe = fsl_mc_bus_probe,
.remove = fsl_mc_bus_remove, .remove = fsl_mc_bus_remove,
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/acpi_iort.h>
#include "fsl-mc-private.h" #include "fsl-mc-private.h"
...@@ -177,23 +178,36 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode, ...@@ -177,23 +178,36 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain; return domain;
} }
int fsl_mc_find_msi_domain(struct device *mc_platform_dev, struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
struct irq_domain **mc_msi_domain)
{ {
struct device *root_dprc_dev;
struct device *bus_dev;
struct irq_domain *msi_domain; struct irq_domain *msi_domain;
struct device_node *mc_of_node = mc_platform_dev->of_node; struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
msi_domain = of_msi_get_domain(mc_platform_dev, mc_of_node, fsl_mc_get_root_dprc(dev, &root_dprc_dev);
DOMAIN_BUS_FSL_MC_MSI); bus_dev = root_dprc_dev->parent;
if (!msi_domain) {
pr_err("Unable to find fsl-mc MSI domain for %pOF\n", if (bus_dev->of_node) {
mc_of_node); msi_domain = of_msi_map_get_device_domain(dev,
mc_dev->icid,
DOMAIN_BUS_FSL_MC_MSI);
return -ENOENT; /*
* if the msi-map property is missing assume that all the
* child containers inherit the domain from the parent
*/
if (!msi_domain)
msi_domain = of_msi_get_domain(bus_dev,
bus_dev->of_node,
DOMAIN_BUS_FSL_MC_MSI);
} else {
msi_domain = iort_get_device_domain(dev, mc_dev->icid,
DOMAIN_BUS_FSL_MC_MSI);
} }
*mc_msi_domain = msi_domain; return msi_domain;
return 0;
} }
static void fsl_mc_msi_free_descs(struct device *dev) static void fsl_mc_msi_free_descs(struct device *dev)
......
...@@ -595,8 +595,7 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev, ...@@ -595,8 +595,7 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
void fsl_mc_msi_domain_free_irqs(struct device *dev); void fsl_mc_msi_domain_free_irqs(struct device *dev);
int fsl_mc_find_msi_domain(struct device *mc_platform_dev, struct irq_domain *fsl_mc_find_msi_domain(struct device *dev);
struct irq_domain **mc_msi_domain);
int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, int fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus,
unsigned int irq_count); unsigned int irq_count);
...@@ -613,6 +612,9 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io); ...@@ -613,6 +612,9 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io);
bool fsl_mc_is_root_dprc(struct device *dev); bool fsl_mc_is_root_dprc(struct device *dev);
void fsl_mc_get_root_dprc(struct device *dev,
struct device **root_dprc_dev);
struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc, struct fsl_mc_device *fsl_mc_device_lookup(struct fsl_mc_obj_desc *obj_desc,
struct fsl_mc_device *mc_bus_dev); struct fsl_mc_device *mc_bus_dev);
......
...@@ -118,46 +118,66 @@ static int of_iommu_xlate(struct device *dev, ...@@ -118,46 +118,66 @@ static int of_iommu_xlate(struct device *dev,
return ret; return ret;
} }
struct of_pci_iommu_alias_info { static int of_iommu_configure_dev_id(struct device_node *master_np,
struct device *dev; struct device *dev,
struct device_node *np; const u32 *id)
};
static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
{ {
struct of_pci_iommu_alias_info *info = data;
struct of_phandle_args iommu_spec = { .args_count = 1 }; struct of_phandle_args iommu_spec = { .args_count = 1 };
int err; int err;
err = of_map_rid(info->np, alias, "iommu-map", "iommu-map-mask", err = of_map_id(master_np, *id, "iommu-map",
&iommu_spec.np, iommu_spec.args); "iommu-map-mask", &iommu_spec.np,
iommu_spec.args);
if (err) if (err)
return err == -ENODEV ? NO_IOMMU : err; return err == -ENODEV ? NO_IOMMU : err;
err = of_iommu_xlate(info->dev, &iommu_spec); err = of_iommu_xlate(dev, &iommu_spec);
of_node_put(iommu_spec.np); of_node_put(iommu_spec.np);
return err; return err;
} }
static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev, static int of_iommu_configure_dev(struct device_node *master_np,
struct device_node *master_np) struct device *dev)
{ {
struct of_phandle_args iommu_spec = { .args_count = 1 }; struct of_phandle_args iommu_spec;
int err; int err = NO_IOMMU, idx = 0;
err = of_map_rid(master_np, mc_dev->icid, "iommu-map", while (!of_parse_phandle_with_args(master_np, "iommus",
"iommu-map-mask", &iommu_spec.np, "#iommu-cells",
iommu_spec.args); idx, &iommu_spec)) {
if (err) err = of_iommu_xlate(dev, &iommu_spec);
return err == -ENODEV ? NO_IOMMU : err; of_node_put(iommu_spec.np);
idx++;
if (err)
break;
}
err = of_iommu_xlate(&mc_dev->dev, &iommu_spec);
of_node_put(iommu_spec.np);
return err; return err;
} }
struct of_pci_iommu_alias_info {
struct device *dev;
struct device_node *np;
};
static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
{
struct of_pci_iommu_alias_info *info = data;
u32 input_id = alias;
return of_iommu_configure_dev_id(info->np, info->dev, &input_id);
}
static int of_iommu_configure_device(struct device_node *master_np,
struct device *dev, const u32 *id)
{
return (id) ? of_iommu_configure_dev_id(master_np, dev, id) :
of_iommu_configure_dev(master_np, dev);
}
const struct iommu_ops *of_iommu_configure(struct device *dev, const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np) struct device_node *master_np,
const u32 *id)
{ {
const struct iommu_ops *ops = NULL; const struct iommu_ops *ops = NULL;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
...@@ -188,21 +208,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev, ...@@ -188,21 +208,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
pci_request_acs(); pci_request_acs();
err = pci_for_each_dma_alias(to_pci_dev(dev), err = pci_for_each_dma_alias(to_pci_dev(dev),
of_pci_iommu_init, &info); of_pci_iommu_init, &info);
} else if (dev_is_fsl_mc(dev)) {
err = of_fsl_mc_iommu_init(to_fsl_mc_device(dev), master_np);
} else { } else {
struct of_phandle_args iommu_spec; err = of_iommu_configure_device(master_np, dev, id);
int idx = 0;
while (!of_parse_phandle_with_args(master_np, "iommus",
"#iommu-cells",
idx, &iommu_spec)) {
err = of_iommu_xlate(dev, &iommu_spec);
of_node_put(iommu_spec.np);
idx++;
if (err)
break;
}
fwspec = dev_iommu_fwspec_get(dev); fwspec = dev_iommu_fwspec_get(dev);
if (!err && fwspec) if (!err && fwspec)
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
* *
*/ */
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/irq.h> #include <linux/irq.h>
...@@ -23,6 +25,19 @@ static struct irq_chip its_msi_irq_chip = { ...@@ -23,6 +25,19 @@ static struct irq_chip its_msi_irq_chip = {
.irq_set_affinity = msi_domain_set_affinity .irq_set_affinity = msi_domain_set_affinity
}; };
static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain,
struct fsl_mc_device *mc_dev)
{
struct device_node *of_node;
u32 out_id;
of_node = irq_domain_get_of_node(domain);
out_id = of_node ? of_msi_map_id(&mc_dev->dev, of_node, mc_dev->icid) :
iort_msi_map_id(&mc_dev->dev, mc_dev->icid);
return out_id;
}
static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
struct device *dev, struct device *dev,
int nvec, msi_alloc_info_t *info) int nvec, msi_alloc_info_t *info)
...@@ -43,7 +58,8 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain, ...@@ -43,7 +58,8 @@ static int its_fsl_mc_msi_prepare(struct irq_domain *msi_domain,
* NOTE: This device id corresponds to the IOMMU stream ID * NOTE: This device id corresponds to the IOMMU stream ID
* associated with the DPRC object (ICID). * associated with the DPRC object (ICID).
*/ */
info->scratchpad[0].ul = mc_bus_dev->icid; info->scratchpad[0].ul = fsl_mc_msi_domain_get_msi_id(msi_domain,
mc_bus_dev);
msi_info = msi_get_domain_info(msi_domain->parent); msi_info = msi_get_domain_info(msi_domain->parent);
/* Allocate at least 32 MSIs, and always as a power of 2 */ /* Allocate at least 32 MSIs, and always as a power of 2 */
...@@ -66,12 +82,71 @@ static const struct of_device_id its_device_id[] = { ...@@ -66,12 +82,71 @@ static const struct of_device_id its_device_id[] = {
{}, {},
}; };
static int __init its_fsl_mc_msi_init(void) static void __init its_fsl_mc_msi_init_one(struct fwnode_handle *handle,
const char *name)
{ {
struct device_node *np;
struct irq_domain *parent; struct irq_domain *parent;
struct irq_domain *mc_msi_domain; struct irq_domain *mc_msi_domain;
parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS);
if (!parent || !msi_get_domain_info(parent)) {
pr_err("%s: unable to locate ITS domain\n", name);
return;
}
mc_msi_domain = fsl_mc_msi_create_irq_domain(handle,
&its_fsl_mc_msi_domain_info,
parent);
if (!mc_msi_domain) {
pr_err("%s: unable to create fsl-mc domain\n", name);
return;
}
pr_info("fsl-mc MSI: %s domain created\n", name);
}
#ifdef CONFIG_ACPI
static int __init
its_fsl_mc_msi_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_generic_translator *its_entry;
struct fwnode_handle *dom_handle;
const char *node_name;
int err = 0;
its_entry = (struct acpi_madt_generic_translator *)header;
node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
(long)its_entry->base_address);
dom_handle = iort_find_domain_token(its_entry->translation_id);
if (!dom_handle) {
pr_err("%s: Unable to locate ITS domain handle\n", node_name);
err = -ENXIO;
goto out;
}
its_fsl_mc_msi_init_one(dom_handle, node_name);
out:
kfree(node_name);
return err;
}
static void __init its_fsl_mc_acpi_msi_init(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
its_fsl_mc_msi_parse_madt, 0);
}
#else
static inline void its_fsl_mc_acpi_msi_init(void) { }
#endif
static void __init its_fsl_mc_of_msi_init(void)
{
struct device_node *np;
for (np = of_find_matching_node(NULL, its_device_id); np; for (np = of_find_matching_node(NULL, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) { np = of_find_matching_node(np, its_device_id)) {
if (!of_device_is_available(np)) if (!of_device_is_available(np))
...@@ -79,23 +154,15 @@ static int __init its_fsl_mc_msi_init(void) ...@@ -79,23 +154,15 @@ static int __init its_fsl_mc_msi_init(void)
if (!of_property_read_bool(np, "msi-controller")) if (!of_property_read_bool(np, "msi-controller"))
continue; continue;
parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS); its_fsl_mc_msi_init_one(of_node_to_fwnode(np),
if (!parent || !msi_get_domain_info(parent)) { np->full_name);
pr_err("%pOF: unable to locate ITS domain\n", np);
continue;
}
mc_msi_domain = fsl_mc_msi_create_irq_domain(
of_node_to_fwnode(np),
&its_fsl_mc_msi_domain_info,
parent);
if (!mc_msi_domain) {
pr_err("%pOF: unable to create fsl-mc domain\n", np);
continue;
}
pr_info("fsl-mc MSI: %pOF domain created\n", np);
} }
}
static int __init its_fsl_mc_msi_init(void)
{
its_fsl_mc_of_msi_init();
its_fsl_mc_acpi_msi_init();
return 0; return 0;
} }
......
...@@ -2201,15 +2201,15 @@ int of_find_last_cache_level(unsigned int cpu) ...@@ -2201,15 +2201,15 @@ int of_find_last_cache_level(unsigned int cpu)
} }
/** /**
* of_map_rid - Translate a requester ID through a downstream mapping. * of_map_id - Translate an ID through a downstream mapping.
* @np: root complex device node. * @np: root complex device node.
* @rid: device requester ID to map. * @id: device ID to map.
* @map_name: property name of the map to use. * @map_name: property name of the map to use.
* @map_mask_name: optional property name of the mask to use. * @map_mask_name: optional property name of the mask to use.
* @target: optional pointer to a target device node. * @target: optional pointer to a target device node.
* @id_out: optional pointer to receive the translated ID. * @id_out: optional pointer to receive the translated ID.
* *
* Given a device requester ID, look up the appropriate implementation-defined * Given a device ID, look up the appropriate implementation-defined
* platform ID and/or the target device which receives transactions on that * platform ID and/or the target device which receives transactions on that
* ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
* @id_out may be NULL if only the other is required. If @target points to * @id_out may be NULL if only the other is required. If @target points to
...@@ -2219,11 +2219,11 @@ int of_find_last_cache_level(unsigned int cpu) ...@@ -2219,11 +2219,11 @@ int of_find_last_cache_level(unsigned int cpu)
* *
* Return: 0 on success or a standard error code on failure. * Return: 0 on success or a standard error code on failure.
*/ */
int of_map_rid(struct device_node *np, u32 rid, int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name, const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out) struct device_node **target, u32 *id_out)
{ {
u32 map_mask, masked_rid; u32 map_mask, masked_id;
int map_len; int map_len;
const __be32 *map = NULL; const __be32 *map = NULL;
...@@ -2235,7 +2235,7 @@ int of_map_rid(struct device_node *np, u32 rid, ...@@ -2235,7 +2235,7 @@ int of_map_rid(struct device_node *np, u32 rid,
if (target) if (target)
return -ENODEV; return -ENODEV;
/* Otherwise, no map implies no translation */ /* Otherwise, no map implies no translation */
*id_out = rid; *id_out = id;
return 0; return 0;
} }
...@@ -2255,22 +2255,22 @@ int of_map_rid(struct device_node *np, u32 rid, ...@@ -2255,22 +2255,22 @@ int of_map_rid(struct device_node *np, u32 rid,
if (map_mask_name) if (map_mask_name)
of_property_read_u32(np, map_mask_name, &map_mask); of_property_read_u32(np, map_mask_name, &map_mask);
masked_rid = map_mask & rid; masked_id = map_mask & id;
for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
struct device_node *phandle_node; struct device_node *phandle_node;
u32 rid_base = be32_to_cpup(map + 0); u32 id_base = be32_to_cpup(map + 0);
u32 phandle = be32_to_cpup(map + 1); u32 phandle = be32_to_cpup(map + 1);
u32 out_base = be32_to_cpup(map + 2); u32 out_base = be32_to_cpup(map + 2);
u32 rid_len = be32_to_cpup(map + 3); u32 id_len = be32_to_cpup(map + 3);
if (rid_base & ~map_mask) { if (id_base & ~map_mask) {
pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n", pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
np, map_name, map_name, np, map_name, map_name,
map_mask, rid_base); map_mask, id_base);
return -EFAULT; return -EFAULT;
} }
if (masked_rid < rid_base || masked_rid >= rid_base + rid_len) if (masked_id < id_base || masked_id >= id_base + id_len)
continue; continue;
phandle_node = of_find_node_by_phandle(phandle); phandle_node = of_find_node_by_phandle(phandle);
...@@ -2288,20 +2288,20 @@ int of_map_rid(struct device_node *np, u32 rid, ...@@ -2288,20 +2288,20 @@ int of_map_rid(struct device_node *np, u32 rid,
} }
if (id_out) if (id_out)
*id_out = masked_rid - rid_base + out_base; *id_out = masked_id - id_base + out_base;
pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n", pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
np, map_name, map_mask, rid_base, out_base, np, map_name, map_mask, id_base, out_base,
rid_len, rid, masked_rid - rid_base + out_base); id_len, id, masked_id - id_base + out_base);
return 0; return 0;
} }
pr_info("%pOF: no %s translation for rid 0x%x on %pOF\n", np, map_name, pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
rid, target && *target ? *target : NULL); id, target && *target ? *target : NULL);
/* Bypasses translation */ /* Bypasses translation */
if (id_out) if (id_out)
*id_out = rid; *id_out = id;
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(of_map_rid); EXPORT_SYMBOL_GPL(of_map_id);
...@@ -78,6 +78,7 @@ int of_device_add(struct platform_device *ofdev) ...@@ -78,6 +78,7 @@ int of_device_add(struct platform_device *ofdev)
* @np: Pointer to OF node having DMA configuration * @np: Pointer to OF node having DMA configuration
* @force_dma: Whether device is to be set up by of_dma_configure() even if * @force_dma: Whether device is to be set up by of_dma_configure() even if
* DMA capability is not explicitly described by firmware. * DMA capability is not explicitly described by firmware.
* @id: Optional const pointer value input id
* *
* Try to get devices's DMA configuration from DT and update it * Try to get devices's DMA configuration from DT and update it
* accordingly. * accordingly.
...@@ -86,7 +87,8 @@ int of_device_add(struct platform_device *ofdev) ...@@ -86,7 +87,8 @@ int of_device_add(struct platform_device *ofdev)
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration. * to fix up DMA configuration.
*/ */
int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) int of_dma_configure_id(struct device *dev, struct device_node *np,
bool force_dma, const u32 *id)
{ {
u64 dma_addr, paddr, size = 0; u64 dma_addr, paddr, size = 0;
int ret; int ret;
...@@ -160,7 +162,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) ...@@ -160,7 +162,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
dev_dbg(dev, "device is%sdma coherent\n", dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not "); coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np); iommu = of_iommu_configure(dev, np, id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER; return -EPROBE_DEFER;
...@@ -171,7 +173,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma) ...@@ -171,7 +173,7 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(of_dma_configure); EXPORT_SYMBOL_GPL(of_dma_configure_id);
int of_device_register(struct platform_device *pdev) int of_device_register(struct platform_device *pdev)
{ {
......
...@@ -576,55 +576,57 @@ void __init of_irq_init(const struct of_device_id *matches) ...@@ -576,55 +576,57 @@ void __init of_irq_init(const struct of_device_id *matches)
} }
} }
static u32 __of_msi_map_rid(struct device *dev, struct device_node **np, static u32 __of_msi_map_id(struct device *dev, struct device_node **np,
u32 rid_in) u32 id_in)
{ {
struct device *parent_dev; struct device *parent_dev;
u32 rid_out = rid_in; u32 id_out = id_in;
/* /*
* Walk up the device parent links looking for one with a * Walk up the device parent links looking for one with a
* "msi-map" property. * "msi-map" property.
*/ */
for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
if (!of_map_rid(parent_dev->of_node, rid_in, "msi-map", if (!of_map_id(parent_dev->of_node, id_in, "msi-map",
"msi-map-mask", np, &rid_out)) "msi-map-mask", np, &id_out))
break; break;
return rid_out; return id_out;
} }
/** /**
* of_msi_map_rid - Map a MSI requester ID for a device. * of_msi_map_id - Map a MSI ID for a device.
* @dev: device for which the mapping is to be done. * @dev: device for which the mapping is to be done.
* @msi_np: device node of the expected msi controller. * @msi_np: device node of the expected msi controller.
* @rid_in: unmapped MSI requester ID for the device. * @id_in: unmapped MSI ID for the device.
* *
* Walk up the device hierarchy looking for devices with a "msi-map" * Walk up the device hierarchy looking for devices with a "msi-map"
* property. If found, apply the mapping to @rid_in. * property. If found, apply the mapping to @id_in.
* *
* Returns the mapped MSI requester ID. * Returns the mapped MSI ID.
*/ */
u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in) u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in)
{ {
return __of_msi_map_rid(dev, &msi_np, rid_in); return __of_msi_map_id(dev, &msi_np, id_in);
} }
/** /**
* of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
* @dev: device for which the mapping is to be done. * @dev: device for which the mapping is to be done.
* @rid: Requester ID for the device. * @id: Device ID.
* @bus_token: Bus token
* *
* Walk up the device hierarchy looking for devices with a "msi-map" * Walk up the device hierarchy looking for devices with a "msi-map"
* property. * property.
* *
* Returns: the MSI domain for this device (or NULL on failure) * Returns: the MSI domain for this device (or NULL on failure)
*/ */
struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid) struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id,
u32 bus_token)
{ {
struct device_node *np = NULL; struct device_node *np = NULL;
__of_msi_map_rid(dev, &np, rid); __of_msi_map_id(dev, &np, id);
return irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI); return irq_find_matching_host(np, bus_token);
} }
/** /**
......
...@@ -1535,8 +1535,8 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) ...@@ -1535,8 +1535,8 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
of_node = irq_domain_get_of_node(domain); of_node = irq_domain_get_of_node(domain);
rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) : rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
iort_msi_map_rid(&pdev->dev, rid); iort_msi_map_id(&pdev->dev, rid);
return rid; return rid;
} }
...@@ -1556,9 +1556,10 @@ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) ...@@ -1556,9 +1556,10 @@ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
u32 rid = pci_dev_id(pdev); u32 rid = pci_dev_id(pdev);
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
dom = of_msi_map_get_device_domain(&pdev->dev, rid); dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
if (!dom) if (!dom)
dom = iort_get_device_domain(&pdev->dev, rid); dom = iort_get_device_domain(&pdev->dev, rid,
DOMAIN_BUS_PCI_MSI);
return dom; return dom;
} }
#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
...@@ -755,8 +755,7 @@ static int smmu_pmu_probe(struct platform_device *pdev) ...@@ -755,8 +755,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
.capabilities = PERF_PMU_CAP_NO_EXCLUDE, .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
}; };
res_0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); smmu_pmu->reg_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res_0);
smmu_pmu->reg_base = devm_ioremap_resource(dev, res_0);
if (IS_ERR(smmu_pmu->reg_base)) if (IS_ERR(smmu_pmu->reg_base))
return PTR_ERR(smmu_pmu->reg_base); return PTR_ERR(smmu_pmu->reg_base);
......
...@@ -588,8 +588,13 @@ bool acpi_dma_supported(struct acpi_device *adev); ...@@ -588,8 +588,13 @@ bool acpi_dma_supported(struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev); enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
u64 *size); u64 *size);
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr); int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id);
static inline int acpi_dma_configure(struct device *dev,
enum dev_dma_attr attr)
{
return acpi_dma_configure_id(dev, attr, NULL);
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent, struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
u64 address, bool check_children); u64 address, bool check_children);
int acpi_is_root_bridge(acpi_handle); int acpi_is_root_bridge(acpi_handle);
......
...@@ -905,6 +905,13 @@ static inline int acpi_dma_configure(struct device *dev, ...@@ -905,6 +905,13 @@ static inline int acpi_dma_configure(struct device *dev,
return 0; return 0;
} }
static inline int acpi_dma_configure_id(struct device *dev,
enum dev_dma_attr attr,
const u32 *input_id)
{
return 0;
}
#define ACPI_PTR(_ptr) (NULL) #define ACPI_PTR(_ptr) (NULL)
static inline void acpi_device_set_enumerated(struct acpi_device *adev) static inline void acpi_device_set_enumerated(struct acpi_device *adev)
......
...@@ -28,27 +28,29 @@ void iort_deregister_domain_token(int trans_id); ...@@ -28,27 +28,29 @@ void iort_deregister_domain_token(int trans_id);
struct fwnode_handle *iort_find_domain_token(int trans_id); struct fwnode_handle *iort_find_domain_token(int trans_id);
#ifdef CONFIG_ACPI_IORT #ifdef CONFIG_ACPI_IORT
void acpi_iort_init(void); void acpi_iort_init(void);
u32 iort_msi_map_rid(struct device *dev, u32 req_id); u32 iort_msi_map_id(struct device *dev, u32 id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id); struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
enum irq_domain_bus_token bus_token);
void acpi_configure_pmsi_domain(struct device *dev); void acpi_configure_pmsi_domain(struct device *dev);
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id); int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */ /* IOMMU interface */
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size); void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
const struct iommu_ops *iort_iommu_configure(struct device *dev); const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
const u32 *id_in);
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head); int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
#else #else
static inline void acpi_iort_init(void) { } static inline void acpi_iort_init(void) { }
static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id) static inline u32 iort_msi_map_id(struct device *dev, u32 id)
{ return req_id; } { return id; }
static inline struct irq_domain *iort_get_device_domain(struct device *dev, static inline struct irq_domain *iort_get_device_domain(
u32 req_id) struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ return NULL; } { return NULL; }
static inline void acpi_configure_pmsi_domain(struct device *dev) { } static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */ /* IOMMU interface */
static inline void iort_dma_setup(struct device *dev, u64 *dma_addr, static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
u64 *size) { } u64 *size) { }
static inline const struct iommu_ops *iort_iommu_configure( static inline const struct iommu_ops *iort_iommu_configure_id(
struct device *dev) struct device *dev, const u32 *id_in)
{ return NULL; } { return NULL; }
static inline static inline
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head) int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
......
...@@ -554,7 +554,7 @@ bool of_console_check(struct device_node *dn, char *name, int index); ...@@ -554,7 +554,7 @@ bool of_console_check(struct device_node *dn, char *name, int index);
extern int of_cpu_node_to_id(struct device_node *np); extern int of_cpu_node_to_id(struct device_node *np);
int of_map_rid(struct device_node *np, u32 rid, int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name, const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out); struct device_node **target, u32 *id_out);
...@@ -978,7 +978,7 @@ static inline int of_cpu_node_to_id(struct device_node *np) ...@@ -978,7 +978,7 @@ static inline int of_cpu_node_to_id(struct device_node *np)
return -ENODEV; return -ENODEV;
} }
static inline int of_map_rid(struct device_node *np, u32 rid, static inline int of_map_id(struct device_node *np, u32 id,
const char *map_name, const char *map_mask_name, const char *map_name, const char *map_mask_name,
struct device_node **target, u32 *id_out) struct device_node **target, u32 *id_out)
{ {
......
...@@ -55,9 +55,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) ...@@ -55,9 +55,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node); return of_node_get(cpu_dev->of_node);
} }
int of_dma_configure(struct device *dev, int of_dma_configure_id(struct device *dev,
struct device_node *np, struct device_node *np,
bool force_dma); bool force_dma, const u32 *id);
static inline int of_dma_configure(struct device *dev,
struct device_node *np,
bool force_dma)
{
return of_dma_configure_id(dev, np, force_dma, NULL);
}
#else /* CONFIG_OF */ #else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev, static inline int of_driver_match_device(struct device *dev,
...@@ -106,6 +112,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) ...@@ -106,6 +112,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return NULL; return NULL;
} }
static inline int of_dma_configure_id(struct device *dev,
struct device_node *np,
bool force_dma)
{
return 0;
}
static inline int of_dma_configure(struct device *dev, static inline int of_dma_configure(struct device *dev,
struct device_node *np, struct device_node *np,
bool force_dma) bool force_dma)
......
...@@ -13,7 +13,8 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix, ...@@ -13,7 +13,8 @@ extern int of_get_dma_window(struct device_node *dn, const char *prefix,
size_t *size); size_t *size);
extern const struct iommu_ops *of_iommu_configure(struct device *dev, extern const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np); struct device_node *master_np,
const u32 *id);
#else #else
...@@ -25,7 +26,8 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, ...@@ -25,7 +26,8 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
} }
static inline const struct iommu_ops *of_iommu_configure(struct device *dev, static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np) struct device_node *master_np,
const u32 *id)
{ {
return NULL; return NULL;
} }
......
...@@ -52,9 +52,10 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev, ...@@ -52,9 +52,10 @@ extern struct irq_domain *of_msi_get_domain(struct device *dev,
struct device_node *np, struct device_node *np,
enum irq_domain_bus_token token); enum irq_domain_bus_token token);
extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 rid); u32 id,
u32 bus_token);
extern void of_msi_configure(struct device *dev, struct device_node *np); extern void of_msi_configure(struct device *dev, struct device_node *np);
u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in); u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else #else
static inline int of_irq_count(struct device_node *dev) static inline int of_irq_count(struct device_node *dev)
{ {
...@@ -85,17 +86,17 @@ static inline struct irq_domain *of_msi_get_domain(struct device *dev, ...@@ -85,17 +86,17 @@ static inline struct irq_domain *of_msi_get_domain(struct device *dev,
return NULL; return NULL;
} }
static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev, static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 rid) u32 id, u32 bus_token)
{ {
return NULL; return NULL;
} }
static inline void of_msi_configure(struct device *dev, struct device_node *np) static inline void of_msi_configure(struct device *dev, struct device_node *np)
{ {
} }
static inline u32 of_msi_map_rid(struct device *dev, static inline u32 of_msi_map_id(struct device *dev,
struct device_node *msi_np, u32 rid_in) struct device_node *msi_np, u32 id_in)
{ {
return rid_in; return id_in;
} }
#endif #endif
......
...@@ -6,6 +6,34 @@ ...@@ -6,6 +6,34 @@
#define LINUX_SCHED_CLOCK #define LINUX_SCHED_CLOCK
#ifdef CONFIG_GENERIC_SCHED_CLOCK #ifdef CONFIG_GENERIC_SCHED_CLOCK
/**
* struct clock_read_data - data required to read from sched_clock()
*
* @epoch_ns: sched_clock() value at last update
* @epoch_cyc: Clock cycle value at last update.
* @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
* clocks.
* @read_sched_clock: Current clock source (or dummy source when suspended).
* @mult: Multipler for scaled math conversion.
* @shift: Shift value for scaled math conversion.
*
* Care must be taken when updating this structure; it is read by
* some very hot code paths. It occupies <=40 bytes and, when combined
* with the seqcount used to synchronize access, comfortably fits into
* a 64 byte cache line.
*/
struct clock_read_data {
u64 epoch_ns;
u64 epoch_cyc;
u64 sched_clock_mask;
u64 (*read_sched_clock)(void);
u32 mult;
u32 shift;
};
extern struct clock_read_data *sched_clock_read_begin(unsigned int *seq);
extern int sched_clock_read_retry(unsigned int seq);
extern void generic_sched_clock_init(void); extern void generic_sched_clock_init(void);
extern void sched_clock_register(u64 (*read)(void), int bits, extern void sched_clock_register(u64 (*read)(void), int bits,
......
...@@ -532,9 +532,10 @@ struct perf_event_mmap_page { ...@@ -532,9 +532,10 @@ struct perf_event_mmap_page {
cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
cap_user_time : 1, /* The time_* fields are used */ cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */
cap_user_time_zero : 1, /* The time_zero field is used */ cap_user_time_zero : 1, /* The time_zero field is used */
cap_____res : 59; cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */
cap_____res : 58;
}; };
}; };
...@@ -593,13 +594,29 @@ struct perf_event_mmap_page { ...@@ -593,13 +594,29 @@ struct perf_event_mmap_page {
* ((rem * time_mult) >> time_shift); * ((rem * time_mult) >> time_shift);
*/ */
__u64 time_zero; __u64 time_zero;
__u32 size; /* Header size up to __reserved[] fields. */ __u32 size; /* Header size up to __reserved[] fields. */
__u32 __reserved_1;
/*
* If cap_usr_time_short, the hardware clock is less than 64bit wide
* and we must compute the 'cyc' value, as used by cap_usr_time, as:
*
* cyc = time_cycles + ((cyc - time_cycles) & time_mask)
*
* NOTE: this form is explicitly chosen such that cap_usr_time_short
* is a correction on top of cap_usr_time, and code that doesn't
* know about cap_usr_time_short still works under the assumption
* the counter doesn't wrap.
*/
__u64 time_cycles;
__u64 time_mask;
/* /*
* Hole for extension of the self monitor capabilities * Hole for extension of the self monitor capabilities
*/ */
__u8 __reserved[118*8+4]; /* align to 1k. */ __u8 __reserved[116*8]; /* align to 1k. */
/* /*
* Control data for the mmap() data buffer. * Control data for the mmap() data buffer.
......
...@@ -109,6 +109,7 @@ struct vdso_data { ...@@ -109,6 +109,7 @@ struct vdso_data {
* relocation, and this is what we need. * relocation, and this is what we need.
*/ */
extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden"))); extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
extern struct vdso_data _timens_data[CS_BASES] __attribute__((visibility("hidden")));
/* /*
* The generic vDSO implementation requires that gettimeofday.h * The generic vDSO implementation requires that gettimeofday.h
......
...@@ -413,6 +413,7 @@ static int __init crash_save_vmcoreinfo_init(void) ...@@ -413,6 +413,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
VMCOREINFO_STRUCT_SIZE(mem_section); VMCOREINFO_STRUCT_SIZE(mem_section);
VMCOREINFO_OFFSET(mem_section, section_mem_map); VMCOREINFO_OFFSET(mem_section, section_mem_map);
VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
#endif #endif
VMCOREINFO_STRUCT_SIZE(page); VMCOREINFO_STRUCT_SIZE(page);
VMCOREINFO_STRUCT_SIZE(pglist_data); VMCOREINFO_STRUCT_SIZE(pglist_data);
......
...@@ -19,31 +19,6 @@ ...@@ -19,31 +19,6 @@
#include "timekeeping.h" #include "timekeeping.h"
/**
* struct clock_read_data - data required to read from sched_clock()
*
* @epoch_ns: sched_clock() value at last update
* @epoch_cyc: Clock cycle value at last update.
* @sched_clock_mask: Bitmask for two's complement subtraction of non 64bit
* clocks.
* @read_sched_clock: Current clock source (or dummy source when suspended).
* @mult: Multipler for scaled math conversion.
* @shift: Shift value for scaled math conversion.
*
* Care must be taken when updating this structure; it is read by
* some very hot code paths. It occupies <=40 bytes and, when combined
* with the seqcount used to synchronize access, comfortably fits into
* a 64 byte cache line.
*/
struct clock_read_data {
u64 epoch_ns;
u64 epoch_cyc;
u64 sched_clock_mask;
u64 (*read_sched_clock)(void);
u32 mult;
u32 shift;
};
/** /**
* struct clock_data - all data needed for sched_clock() (including * struct clock_data - all data needed for sched_clock() (including
* registration of a new clock source) * registration of a new clock source)
...@@ -93,6 +68,17 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) ...@@ -93,6 +68,17 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
return (cyc * mult) >> shift; return (cyc * mult) >> shift;
} }
struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
{
*seq = raw_read_seqcount_latch(&cd.seq);
return cd.read_data + (*seq & 1);
}
int sched_clock_read_retry(unsigned int seq)
{
return read_seqcount_retry(&cd.seq, seq);
}
unsigned long long notrace sched_clock(void) unsigned long long notrace sched_clock(void)
{ {
u64 cyc, res; u64 cyc, res;
...@@ -100,13 +86,12 @@ unsigned long long notrace sched_clock(void) ...@@ -100,13 +86,12 @@ unsigned long long notrace sched_clock(void)
struct clock_read_data *rd; struct clock_read_data *rd;
do { do {
seq = raw_read_seqcount(&cd.seq); rd = sched_clock_read_begin(&seq);
rd = cd.read_data + (seq & 1);
cyc = (rd->read_sched_clock() - rd->epoch_cyc) & cyc = (rd->read_sched_clock() - rd->epoch_cyc) &
rd->sched_clock_mask; rd->sched_clock_mask;
res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift);
} while (read_seqcount_retry(&cd.seq, seq)); } while (sched_clock_read_retry(seq));
return res; return res;
} }
......
...@@ -532,9 +532,10 @@ struct perf_event_mmap_page { ...@@ -532,9 +532,10 @@ struct perf_event_mmap_page {
cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */ cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */ cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
cap_user_time : 1, /* The time_* fields are used */ cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */
cap_user_time_zero : 1, /* The time_zero field is used */ cap_user_time_zero : 1, /* The time_zero field is used */
cap_____res : 59; cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */
cap_____res : 58;
}; };
}; };
...@@ -593,13 +594,29 @@ struct perf_event_mmap_page { ...@@ -593,13 +594,29 @@ struct perf_event_mmap_page {
* ((rem * time_mult) >> time_shift); * ((rem * time_mult) >> time_shift);
*/ */
__u64 time_zero; __u64 time_zero;
__u32 size; /* Header size up to __reserved[] fields. */ __u32 size; /* Header size up to __reserved[] fields. */
__u32 __reserved_1;
/*
* If cap_usr_time_short, the hardware clock is less than 64bit wide
* and we must compute the 'cyc' value, as used by cap_usr_time, as:
*
* cyc = time_cycles + ((cyc - time_cycles) & time_mask)
*
* NOTE: this form is explicitly chosen such that cap_usr_time_short
* is a correction on top of cap_usr_time, and code that doesn't
* know about cap_usr_time_short still works under the assumption
* the counter doesn't wrap.
*/
__u64 time_cycles;
__u64 time_mask;
/* /*
* Hole for extension of the self monitor capabilities * Hole for extension of the self monitor capabilities
*/ */
__u8 __reserved[118*8+4]; /* align to 1k. */ __u8 __reserved[116*8]; /* align to 1k. */
/* /*
* Control data for the mmap() data buffer. * Control data for the mmap() data buffer.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment