Commit 6b04014f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates from Joerg Roedel:

 - Make the dma-iommu code more generic so that it can be used outside
   of the ARM context with other IOMMU drivers. Goal is to make use of
   it on x86 too.

 - Generic IOMMU domain support for the Intel VT-d driver. This driver
   now makes more use of common IOMMU code to allocate default domains
   for the devices it handles.

 - An IOMMU fault reporting API to userspace. With that the IOMMU fault
   handling can be done in user-space, for example to forward the faults
   to a VM.

 - Better handling for reserved regions requested by the firmware. These
   can be 'relaxed' now, meaning that those don't prevent a device being
   attached to a VM.

 - Suspend/Resume support for the Renesas IOMMU driver.

 - Added support for dumping SVA related fields of the DMAR table in the
   Intel VT-d driver via debugfs.

 - A pile of smaller fixes and cleanups.

* tag 'iommu-updates-v5.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (90 commits)
  iommu/omap: No need to check return value of debugfs_create functions
  iommu/arm-smmu-v3: Invalidate ATC when detaching a device
  iommu/arm-smmu-v3: Fix compilation when CONFIG_CMA=n
  iommu/vt-d: Cleanup unused variable
  iommu/amd: Flush not present cache in iommu_map_page
  iommu/amd: Only free resources once on init error
  iommu/amd: Move gart fallback to amd_iommu_init
  iommu/amd: Make iommu_disable safer
  iommu/io-pgtable: Support non-coherent page tables
  iommu/io-pgtable: Replace IO_PGTABLE_QUIRK_NO_DMA with specific flag
  iommu/io-pgtable-arm: Add support to use system cache
  iommu/arm-smmu-v3: Increase maximum size of queues
  iommu/vt-d: Silence a variable set but not used
  iommu/vt-d: Remove an unused variable "length"
  iommu: Fix integer truncation
  iommu: Add padding to struct iommu_fault
  iommu/vt-d: Consolidate domain_init() to avoid duplication
  iommu/vt-d: Cleanup after delegating DMA domain to generic iommu
  iommu/vt-d: Fix suspicious RCU usage in probe_acpi_namespace_devices()
  iommu/vt-d: Allow DMA domain attaching to rmrr locked device
  ...
parents c6b6cebb d95c3885
...@@ -24,3 +24,12 @@ Description: /sys/kernel/iommu_groups/reserved_regions list IOVA ...@@ -24,3 +24,12 @@ Description: /sys/kernel/iommu_groups/reserved_regions list IOVA
region is described on a single line: the 1st field is region is described on a single line: the 1st field is
the base IOVA, the second is the end IOVA and the third the base IOVA, the second is the end IOVA and the third
field describes the type of the region. field describes the type of the region.
What: /sys/kernel/iommu_groups/reserved_regions
Date: June 2019
KernelVersion: v5.3
Contact: Eric Auger <eric.auger@redhat.com>
Description: In case an RMRR is used only by graphics or USB devices
it is now exposed as "direct-relaxable" instead of "direct".
In device assignment use case, for instance, those RMRR
are considered to be relaxable and safe.
This diff is collapsed.
...@@ -619,9 +619,9 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) ...@@ -619,9 +619,9 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
pasid = ((event[0] >> 16) & 0xFFFF) pasid = ((event[0] >> 16) & 0xFFFF)
| ((event[1] << 6) & 0xF0000); | ((event[1] << 6) & 0xF0000);
tag = event[1] & 0x03FF; tag = event[1] & 0x03FF;
dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x]\n", dev_err(dev, "Event logged [INVALID_PPR_REQUEST device=%02x:%02x.%x pasid=0x%05x address=0x%llx flags=0x%04x tag=0x%03x]\n",
PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
pasid, address, flags); pasid, address, flags, tag);
break; break;
default: default:
dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n", dev_err(dev, "Event logged [UNKNOWN event[0]=0x%08x event[1]=0x%08x event[2]=0x%08x event[3]=0x%08x\n",
...@@ -1295,6 +1295,16 @@ static void domain_flush_complete(struct protection_domain *domain) ...@@ -1295,6 +1295,16 @@ static void domain_flush_complete(struct protection_domain *domain)
} }
} }
/* Flush the not present cache if it exists */
static void domain_flush_np_cache(struct protection_domain *domain,
dma_addr_t iova, size_t size)
{
if (unlikely(amd_iommu_np_cache)) {
domain_flush_pages(domain, iova, size);
domain_flush_complete(domain);
}
}
/* /*
* This function flushes the DTEs for all devices in domain * This function flushes the DTEs for all devices in domain
...@@ -2377,10 +2387,7 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -2377,10 +2387,7 @@ static dma_addr_t __map_single(struct device *dev,
} }
address += offset; address += offset;
if (unlikely(amd_iommu_np_cache)) { domain_flush_np_cache(&dma_dom->domain, address, size);
domain_flush_pages(&dma_dom->domain, address, size);
domain_flush_complete(&dma_dom->domain);
}
out: out:
return address; return address;
...@@ -2559,6 +2566,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -2559,6 +2566,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
s->dma_length = s->length; s->dma_length = s->length;
} }
if (s)
domain_flush_np_cache(domain, s->dma_address, s->dma_length);
return nelems; return nelems;
out_unmap: out_unmap:
...@@ -2597,7 +2607,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -2597,7 +2607,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
struct protection_domain *domain; struct protection_domain *domain;
struct dma_ops_domain *dma_dom; struct dma_ops_domain *dma_dom;
unsigned long startaddr; unsigned long startaddr;
int npages = 2; int npages;
domain = get_domain(dev); domain = get_domain(dev);
if (IS_ERR(domain)) if (IS_ERR(domain))
...@@ -3039,6 +3049,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, ...@@ -3039,6 +3049,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL); ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
mutex_unlock(&domain->api_lock); mutex_unlock(&domain->api_lock);
domain_flush_np_cache(domain, iova, page_size);
return ret; return ret;
} }
......
...@@ -406,6 +406,9 @@ static void iommu_enable(struct amd_iommu *iommu) ...@@ -406,6 +406,9 @@ static void iommu_enable(struct amd_iommu *iommu)
static void iommu_disable(struct amd_iommu *iommu) static void iommu_disable(struct amd_iommu *iommu)
{ {
if (!iommu->mmio_base)
return;
/* Disable command buffer */ /* Disable command buffer */
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
...@@ -2325,15 +2328,6 @@ static void __init free_iommu_resources(void) ...@@ -2325,15 +2328,6 @@ static void __init free_iommu_resources(void)
amd_iommu_dev_table = NULL; amd_iommu_dev_table = NULL;
free_iommu_all(); free_iommu_all();
#ifdef CONFIG_GART_IOMMU
/*
* We failed to initialize the AMD IOMMU - try fallback to GART
* if possible.
*/
gart_iommu_init();
#endif
} }
/* SB IOAPIC is always on this device in AMD systems */ /* SB IOAPIC is always on this device in AMD systems */
...@@ -2625,8 +2619,6 @@ static int __init state_next(void) ...@@ -2625,8 +2619,6 @@ static int __init state_next(void)
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
pr_info("AMD IOMMU disabled on kernel command-line\n"); pr_info("AMD IOMMU disabled on kernel command-line\n");
free_dma_resources();
free_iommu_resources();
init_state = IOMMU_CMDLINE_DISABLED; init_state = IOMMU_CMDLINE_DISABLED;
ret = -EINVAL; ret = -EINVAL;
} }
...@@ -2667,6 +2659,19 @@ static int __init state_next(void) ...@@ -2667,6 +2659,19 @@ static int __init state_next(void)
BUG(); BUG();
} }
if (ret) {
free_dma_resources();
if (!irq_remapping_enabled) {
disable_iommus();
free_iommu_resources();
} else {
struct amd_iommu *iommu;
uninit_device_table_dma();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
}
}
return ret; return ret;
} }
...@@ -2740,17 +2745,15 @@ static int __init amd_iommu_init(void) ...@@ -2740,17 +2745,15 @@ static int __init amd_iommu_init(void)
int ret; int ret;
ret = iommu_go_to_state(IOMMU_INITIALIZED); ret = iommu_go_to_state(IOMMU_INITIALIZED);
if (ret) { #ifdef CONFIG_GART_IOMMU
free_dma_resources(); if (ret && list_empty(&amd_iommu_list)) {
if (!irq_remapping_enabled) { /*
disable_iommus(); * We failed to initialize the AMD IOMMU - try fallback
free_iommu_resources(); * to GART if possible.
} else { */
uninit_device_table_dma(); gart_iommu_init();
for_each_iommu(iommu)
iommu_flush_all_caches(iommu);
}
} }
#endif
for_each_iommu(iommu) for_each_iommu(iommu)
amd_iommu_debugfs_setup(iommu); amd_iommu_debugfs_setup(iommu);
......
...@@ -192,6 +192,13 @@ ...@@ -192,6 +192,13 @@
#define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5) #define Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
#define Q_BASE_LOG2SIZE GENMASK(4, 0) #define Q_BASE_LOG2SIZE GENMASK(4, 0)
/* Ensure DMA allocations are naturally aligned */
#ifdef CONFIG_CMA_ALIGNMENT
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
#else
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
#endif
/* /*
* Stream table. * Stream table.
* *
...@@ -289,8 +296,9 @@ ...@@ -289,8 +296,9 @@
FIELD_GET(ARM64_TCR_##fld, tcr)) FIELD_GET(ARM64_TCR_##fld, tcr))
/* Command queue */ /* Command queue */
#define CMDQ_ENT_DWORDS 2 #define CMDQ_ENT_SZ_SHIFT 4
#define CMDQ_MAX_SZ_SHIFT 8 #define CMDQ_ENT_DWORDS ((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
#define CMDQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
#define CMDQ_CONS_ERR GENMASK(30, 24) #define CMDQ_CONS_ERR GENMASK(30, 24)
#define CMDQ_ERR_CERROR_NONE_IDX 0 #define CMDQ_ERR_CERROR_NONE_IDX 0
...@@ -336,14 +344,16 @@ ...@@ -336,14 +344,16 @@
#define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2) #define CMDQ_SYNC_1_MSIADDR_MASK GENMASK_ULL(51, 2)
/* Event queue */ /* Event queue */
#define EVTQ_ENT_DWORDS 4 #define EVTQ_ENT_SZ_SHIFT 5
#define EVTQ_MAX_SZ_SHIFT 7 #define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
#define EVTQ_0_ID GENMASK_ULL(7, 0) #define EVTQ_0_ID GENMASK_ULL(7, 0)
/* PRI queue */ /* PRI queue */
#define PRIQ_ENT_DWORDS 2 #define PRIQ_ENT_SZ_SHIFT 4
#define PRIQ_MAX_SZ_SHIFT 8 #define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
#define PRIQ_0_SID GENMASK_ULL(31, 0) #define PRIQ_0_SID GENMASK_ULL(31, 0)
#define PRIQ_0_SSID GENMASK_ULL(51, 32) #define PRIQ_0_SSID GENMASK_ULL(51, 32)
...@@ -798,7 +808,7 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) ...@@ -798,7 +808,7 @@ static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
/* High-level queue accessors */ /* High-level queue accessors */
static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
{ {
memset(cmd, 0, CMDQ_ENT_DWORDS << 3); memset(cmd, 0, 1 << CMDQ_ENT_SZ_SHIFT);
cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode); cmd[0] |= FIELD_PREP(CMDQ_0_OP, ent->opcode);
switch (ent->opcode) { switch (ent->opcode) {
...@@ -1785,13 +1795,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) ...@@ -1785,13 +1795,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
.pgsize_bitmap = smmu->pgsize_bitmap, .pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias, .ias = ias,
.oas = oas, .oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
.tlb = &arm_smmu_gather_ops, .tlb = &arm_smmu_gather_ops,
.iommu_dev = smmu->dev, .iommu_dev = smmu->dev,
}; };
if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
if (smmu_domain->non_strict) if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
...@@ -1884,9 +1892,13 @@ static int arm_smmu_enable_ats(struct arm_smmu_master *master) ...@@ -1884,9 +1892,13 @@ static int arm_smmu_enable_ats(struct arm_smmu_master *master)
static void arm_smmu_disable_ats(struct arm_smmu_master *master) static void arm_smmu_disable_ats(struct arm_smmu_master *master)
{ {
struct arm_smmu_cmdq_ent cmd;
if (!master->ats_enabled || !dev_is_pci(master->dev)) if (!master->ats_enabled || !dev_is_pci(master->dev))
return; return;
arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
arm_smmu_atc_inv_master(master, &cmd);
pci_disable_ats(to_pci_dev(master->dev)); pci_disable_ats(to_pci_dev(master->dev));
master->ats_enabled = false; master->ats_enabled = false;
} }
...@@ -1906,7 +1918,6 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master) ...@@ -1906,7 +1918,6 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
master->domain = NULL; master->domain = NULL;
arm_smmu_install_ste_for_dev(master); arm_smmu_install_ste_for_dev(master);
/* Disabling ATS invalidates all ATC entries */
arm_smmu_disable_ats(master); arm_smmu_disable_ats(master);
} }
...@@ -2270,17 +2281,32 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, ...@@ -2270,17 +2281,32 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
struct arm_smmu_queue *q, struct arm_smmu_queue *q,
unsigned long prod_off, unsigned long prod_off,
unsigned long cons_off, unsigned long cons_off,
size_t dwords) size_t dwords, const char *name)
{ {
size_t qsz = ((1 << q->max_n_shift) * dwords) << 3; size_t qsz;
do {
qsz = ((1 << q->max_n_shift) * dwords) << 3;
q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma,
GFP_KERNEL);
if (q->base || qsz < PAGE_SIZE)
break;
q->max_n_shift--;
} while (1);
q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
if (!q->base) { if (!q->base) {
dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n", dev_err(smmu->dev,
qsz); "failed to allocate queue (0x%zx bytes) for %s\n",
qsz, name);
return -ENOMEM; return -ENOMEM;
} }
if (!WARN_ON(q->base_dma & (qsz - 1))) {
dev_info(smmu->dev, "allocated %u entries for %s\n",
1 << q->max_n_shift, name);
}
q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu);
q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu); q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu);
q->ent_dwords = dwords; q->ent_dwords = dwords;
...@@ -2300,13 +2326,15 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) ...@@ -2300,13 +2326,15 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
/* cmdq */ /* cmdq */
spin_lock_init(&smmu->cmdq.lock); spin_lock_init(&smmu->cmdq.lock);
ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS); ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS,
"cmdq");
if (ret) if (ret)
return ret; return ret;
/* evtq */ /* evtq */
ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS); ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS,
"evtq");
if (ret) if (ret)
return ret; return ret;
...@@ -2315,7 +2343,8 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) ...@@ -2315,7 +2343,8 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
return 0; return 0;
return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD, return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS); ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS,
"priq");
} }
static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu) static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
...@@ -2879,7 +2908,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) ...@@ -2879,7 +2908,7 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
return -ENXIO; return -ENXIO;
} }
/* Queue sizes, capped at 4k */ /* Queue sizes, capped to ensure natural alignment */
smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, smmu->cmdq.q.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
FIELD_GET(IDR1_CMDQS, reg)); FIELD_GET(IDR1_CMDQS, reg));
if (!smmu->cmdq.q.max_n_shift) { if (!smmu->cmdq.q.max_n_shift) {
......
...@@ -892,13 +892,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -892,13 +892,11 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.pgsize_bitmap = smmu->pgsize_bitmap, .pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias, .ias = ias,
.oas = oas, .oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK,
.tlb = smmu_domain->tlb_ops, .tlb = smmu_domain->tlb_ops,
.iommu_dev = smmu->dev, .iommu_dev = smmu->dev,
}; };
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
if (smmu_domain->non_strict) if (smmu_domain->non_strict)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
......
This diff is collapsed.
...@@ -14,6 +14,17 @@ ...@@ -14,6 +14,17 @@
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include "intel-pasid.h"
struct tbl_walk {
u16 bus;
u16 devfn;
u32 pasid;
struct root_entry *rt_entry;
struct context_entry *ctx_entry;
struct pasid_entry *pasid_tbl_entry;
};
struct iommu_regset { struct iommu_regset {
int offset; int offset;
const char *regs; const char *regs;
...@@ -131,16 +142,86 @@ static int iommu_regset_show(struct seq_file *m, void *unused) ...@@ -131,16 +142,86 @@ static int iommu_regset_show(struct seq_file *m, void *unused)
} }
DEFINE_SHOW_ATTRIBUTE(iommu_regset); DEFINE_SHOW_ATTRIBUTE(iommu_regset);
static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu, static inline void print_tbl_walk(struct seq_file *m)
int bus)
{ {
struct context_entry *context; struct tbl_walk *tbl_wlk = m->private;
int devfn;
seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
tbl_wlk->ctx_entry->lo);
/*
* A legacy mode DMAR doesn't support PASID, hence default it to -1
* indicating that it's invalid. Also, default all PASID related fields
* to 0.
*/
if (!tbl_wlk->pasid_tbl_entry)
seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
(u64)0, (u64)0, (u64)0);
else
seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0],
tbl_wlk->pasid_tbl_entry->val[1],
tbl_wlk->pasid_tbl_entry->val[2]);
}
seq_printf(m, " Context Table Entries for Bus: %d\n", bus); static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
seq_puts(m, " Entry\tB:D.F\tHigh\tLow\n"); u16 dir_idx)
{
struct tbl_walk *tbl_wlk = m->private;
u8 tbl_idx;
for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
if (pasid_pte_is_present(tbl_entry)) {
tbl_wlk->pasid_tbl_entry = tbl_entry;
tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
print_tbl_walk(m);
}
tbl_entry++;
}
}
static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
u16 pasid_dir_size)
{
struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
struct pasid_entry *pasid_tbl;
u16 dir_idx;
for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
pasid_tbl = get_pasid_table_from_pde(dir_entry);
if (pasid_tbl)
pasid_tbl_walk(m, pasid_tbl, dir_idx);
dir_entry++;
}
}
static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
{
struct context_entry *context;
u16 devfn, pasid_dir_size;
u64 pasid_dir_ptr;
for (devfn = 0; devfn < 256; devfn++) { for (devfn = 0; devfn < 256; devfn++) {
struct tbl_walk tbl_wlk = {0};
/*
* Scalable mode root entry points to upper scalable mode
* context table and lower scalable mode context table. Each
* scalable mode context table has 128 context entries where as
* legacy mode context table has 256 context entries. So in
* scalable mode, the context entries for former 128 devices are
* in the lower scalable mode context table, while the latter
* 128 devices are in the upper scalable mode context table.
* In scalable mode, when devfn > 127, iommu_context_addr()
* automatically refers to upper scalable mode context table and
* hence the caller doesn't have to worry about differences
* between scalable mode and non scalable mode.
*/
context = iommu_context_addr(iommu, bus, devfn, 0); context = iommu_context_addr(iommu, bus, devfn, 0);
if (!context) if (!context)
return; return;
...@@ -148,33 +229,41 @@ static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu, ...@@ -148,33 +229,41 @@ static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
if (!context_present(context)) if (!context_present(context))
continue; continue;
seq_printf(m, " %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn, tbl_wlk.bus = bus;
bus, PCI_SLOT(devfn), PCI_FUNC(devfn), tbl_wlk.devfn = devfn;
context[0].hi, context[0].lo); tbl_wlk.rt_entry = &iommu->root_entry[bus];
tbl_wlk.ctx_entry = context;
m->private = &tbl_wlk;
if (pasid_supported(iommu) && is_pasid_enabled(context)) {
pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
pasid_dir_size = get_pasid_dir_size(context);
pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
continue;
}
print_tbl_walk(m);
} }
} }
static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu) static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
{ {
unsigned long flags; unsigned long flags;
int bus; u16 bus;
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name, seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
(u64)virt_to_phys(iommu->root_entry)); (u64)virt_to_phys(iommu->root_entry));
seq_puts(m, "Root Table Entries:\n"); seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
for (bus = 0; bus < 256; bus++) { /*
if (!(iommu->root_entry[bus].lo & 1)) * No need to check if the root entry is present or not because
continue; * iommu_context_addr() performs the same check before returning
* context entry.
*/
for (bus = 0; bus < 256; bus++)
ctx_tbl_walk(m, iommu, bus);
seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus,
iommu->root_entry[bus].hi,
iommu->root_entry[bus].lo);
ctx_tbl_entry_show(m, iommu, bus);
seq_putc(m, '\n');
}
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
...@@ -185,7 +274,7 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused) ...@@ -185,7 +274,7 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
rcu_read_lock(); rcu_read_lock();
for_each_active_iommu(iommu, drhd) { for_each_active_iommu(iommu, drhd) {
root_tbl_entry_show(m, iommu); root_tbl_walk(m, iommu);
seq_putc(m, '\n'); seq_putc(m, '\n');
} }
rcu_read_unlock(); rcu_read_unlock();
......
This diff is collapsed.
...@@ -169,23 +169,6 @@ int intel_pasid_alloc_table(struct device *dev) ...@@ -169,23 +169,6 @@ int intel_pasid_alloc_table(struct device *dev)
return 0; return 0;
} }
/* Get PRESENT bit of a PASID directory entry. */
static inline bool
pasid_pde_is_present(struct pasid_dir_entry *pde)
{
return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
}
/* Get PASID table from a PASID directory entry. */
static inline struct pasid_entry *
get_pasid_table_from_pde(struct pasid_dir_entry *pde)
{
if (!pasid_pde_is_present(pde))
return NULL;
return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
}
void intel_pasid_free_table(struct device *dev) void intel_pasid_free_table(struct device *dev)
{ {
struct device_domain_info *info; struct device_domain_info *info;
......
...@@ -18,6 +18,10 @@ ...@@ -18,6 +18,10 @@
#define PDE_PFN_MASK PAGE_MASK #define PDE_PFN_MASK PAGE_MASK
#define PASID_PDE_SHIFT 6 #define PASID_PDE_SHIFT 6
#define MAX_NR_PASID_BITS 20 #define MAX_NR_PASID_BITS 20
#define PASID_TBL_ENTRIES BIT(PASID_PDE_SHIFT)
#define is_pasid_enabled(entry) (((entry)->lo >> 3) & 0x1)
#define get_pasid_dir_size(entry) (1 << ((((entry)->lo >> 9) & 0x7) + 7))
/* /*
* Domain ID reserved for pasid entries programmed for first-level * Domain ID reserved for pasid entries programmed for first-level
...@@ -49,6 +53,28 @@ struct pasid_table { ...@@ -49,6 +53,28 @@ struct pasid_table {
struct list_head dev; /* device list */ struct list_head dev; /* device list */
}; };
/* Get PRESENT bit of a PASID directory entry. */
static inline bool pasid_pde_is_present(struct pasid_dir_entry *pde)
{
return READ_ONCE(pde->val) & PASID_PTE_PRESENT;
}
/* Get PASID table from a PASID directory entry. */
static inline struct pasid_entry *
get_pasid_table_from_pde(struct pasid_dir_entry *pde)
{
if (!pasid_pde_is_present(pde))
return NULL;
return phys_to_virt(READ_ONCE(pde->val) & PDE_PFN_MASK);
}
/* Get PRESENT bit of a PASID table entry. */
static inline bool pasid_pte_is_present(struct pasid_entry *pte)
{
return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT;
}
extern u32 intel_pasid_max_id; extern u32 intel_pasid_max_id;
int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp); int intel_pasid_alloc_id(void *ptr, int start, int end, gfp_t gfp);
void intel_pasid_free_id(int pasid); void intel_pasid_free_id(int pasid);
......
...@@ -366,6 +366,21 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -366,6 +366,21 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
} }
list_add_tail(&svm->list, &global_svm_list); list_add_tail(&svm->list, &global_svm_list);
} else {
/*
* Binding a new device with existing PASID, need to setup
* the PASID entry.
*/
spin_lock(&iommu->lock);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
mm ? 0 : PASID_FLAG_SUPERVISOR_MODE);
spin_unlock(&iommu->lock);
if (ret) {
kfree(sdev);
goto out;
}
} }
list_add_rcu(&sdev->list, &svm->devs); list_add_rcu(&sdev->list, &svm->devs);
......
...@@ -101,7 +101,7 @@ static void init_ir_status(struct intel_iommu *iommu) ...@@ -101,7 +101,7 @@ static void init_ir_status(struct intel_iommu *iommu)
iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED; iommu->flags |= VTD_FLAG_IRQ_REMAP_PRE_ENABLED;
} }
static int alloc_irte(struct intel_iommu *iommu, int irq, static int alloc_irte(struct intel_iommu *iommu,
struct irq_2_iommu *irq_iommu, u16 count) struct irq_2_iommu *irq_iommu, u16 count)
{ {
struct ir_table *table = iommu->ir_table; struct ir_table *table = iommu->ir_table;
...@@ -1374,7 +1374,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, ...@@ -1374,7 +1374,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
goto out_free_parent; goto out_free_parent;
down_read(&dmar_global_lock); down_read(&dmar_global_lock);
index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs); index = alloc_irte(iommu, &data->irq_2_iommu, nr_irqs);
up_read(&dmar_global_lock); up_read(&dmar_global_lock);
if (index < 0) { if (index < 0) {
pr_warn("Failed to allocate IRTE\n"); pr_warn("Failed to allocate IRTE\n");
......
...@@ -204,7 +204,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, ...@@ -204,7 +204,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
dev_err(dev, "Page table does not fit in PTE: %pa", &phys); dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free; goto out_free;
} }
if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { if (table && !cfg->coherent_walk) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma)) if (dma_mapping_error(dev, dma))
goto out_free; goto out_free;
...@@ -238,7 +238,7 @@ static void __arm_v7s_free_table(void *table, int lvl, ...@@ -238,7 +238,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
struct device *dev = cfg->iommu_dev; struct device *dev = cfg->iommu_dev;
size_t size = ARM_V7S_TABLE_SIZE(lvl); size_t size = ARM_V7S_TABLE_SIZE(lvl);
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) if (!cfg->coherent_walk)
dma_unmap_single(dev, __arm_v7s_dma_addr(table), size, dma_unmap_single(dev, __arm_v7s_dma_addr(table), size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (lvl == 1) if (lvl == 1)
...@@ -250,7 +250,7 @@ static void __arm_v7s_free_table(void *table, int lvl, ...@@ -250,7 +250,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
struct io_pgtable_cfg *cfg) struct io_pgtable_cfg *cfg)
{ {
if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) if (cfg->coherent_walk)
return; return;
dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
...@@ -716,7 +716,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, ...@@ -716,7 +716,6 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_TLBI_ON_MAP | IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_4GB | IO_PGTABLE_QUIRK_ARM_MTK_4GB |
IO_PGTABLE_QUIRK_NO_DMA |
IO_PGTABLE_QUIRK_NON_STRICT)) IO_PGTABLE_QUIRK_NON_STRICT))
return NULL; return NULL;
...@@ -779,8 +778,11 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, ...@@ -779,8 +778,11 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
/* TTBRs */ /* TTBRs */
cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) | cfg->arm_v7s_cfg.ttbr[0] = virt_to_phys(data->pgd) |
ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS | ARM_V7S_TTBR_S | ARM_V7S_TTBR_NOS |
ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) | (cfg->coherent_walk ?
ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA); (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA) |
ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA)) :
(ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC) |
ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC)));
cfg->arm_v7s_cfg.ttbr[1] = 0; cfg->arm_v7s_cfg.ttbr[1] = 0;
return &data->iop; return &data->iop;
...@@ -835,7 +837,8 @@ static int __init arm_v7s_do_selftests(void) ...@@ -835,7 +837,8 @@ static int __init arm_v7s_do_selftests(void)
.tlb = &dummy_tlb_ops, .tlb = &dummy_tlb_ops,
.oas = 32, .oas = 32,
.ias = 32, .ias = 32,
.quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA, .coherent_walk = true,
.quirks = IO_PGTABLE_QUIRK_ARM_NS,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
}; };
unsigned int iova, size, iova_start; unsigned int iova, size, iova_start;
......
...@@ -156,10 +156,12 @@ ...@@ -156,10 +156,12 @@
#define ARM_LPAE_MAIR_ATTR_MASK 0xff #define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
#define ARM_LPAE_MAIR_ATTR_NC 0x44 #define ARM_LPAE_MAIR_ATTR_NC 0x44
#define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
#define ARM_LPAE_MAIR_ATTR_IDX_NC 0 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
#define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
#define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0) #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
#define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2) #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
...@@ -239,7 +241,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, ...@@ -239,7 +241,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
return NULL; return NULL;
pages = page_address(p); pages = page_address(p);
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { if (!cfg->coherent_walk) {
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma)) if (dma_mapping_error(dev, dma))
goto out_free; goto out_free;
...@@ -265,7 +267,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, ...@@ -265,7 +267,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
static void __arm_lpae_free_pages(void *pages, size_t size, static void __arm_lpae_free_pages(void *pages, size_t size,
struct io_pgtable_cfg *cfg) struct io_pgtable_cfg *cfg)
{ {
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) if (!cfg->coherent_walk)
dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
size, DMA_TO_DEVICE); size, DMA_TO_DEVICE);
free_pages((unsigned long)pages, get_order(size)); free_pages((unsigned long)pages, get_order(size));
...@@ -283,7 +285,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, ...@@ -283,7 +285,7 @@ static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
{ {
*ptep = pte; *ptep = pte;
if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) if (!cfg->coherent_walk)
__arm_lpae_sync_pte(ptep, cfg); __arm_lpae_sync_pte(ptep, cfg);
} }
...@@ -361,8 +363,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, ...@@ -361,8 +363,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
old = cmpxchg64_relaxed(ptep, curr, new); old = cmpxchg64_relaxed(ptep, curr, new);
if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) || if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
(old & ARM_LPAE_PTE_SW_SYNC))
return old; return old;
/* Even if it's not ours, there's no point waiting; just kick it */ /* Even if it's not ours, there's no point waiting; just kick it */
...@@ -403,8 +404,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, ...@@ -403,8 +404,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
pte = arm_lpae_install_table(cptep, ptep, 0, cfg); pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
if (pte) if (pte)
__arm_lpae_free_pages(cptep, tblsz, cfg); __arm_lpae_free_pages(cptep, tblsz, cfg);
} else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) && } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
!(pte & ARM_LPAE_PTE_SW_SYNC)) {
__arm_lpae_sync_pte(ptep, cfg); __arm_lpae_sync_pte(ptep, cfg);
} }
...@@ -459,6 +459,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, ...@@ -459,6 +459,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
else if (prot & IOMMU_CACHE) else if (prot & IOMMU_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT); << ARM_LPAE_PTE_ATTRINDX_SHIFT);
else if (prot & IOMMU_QCOM_SYS_CACHE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
} }
if (prot & IOMMU_NOEXEC) if (prot & IOMMU_NOEXEC)
...@@ -783,7 +786,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -783,7 +786,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
u64 reg; u64 reg;
struct arm_lpae_io_pgtable *data; struct arm_lpae_io_pgtable *data;
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA | if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_NON_STRICT)) IO_PGTABLE_QUIRK_NON_STRICT))
return NULL; return NULL;
...@@ -792,9 +795,15 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -792,9 +795,15 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
return NULL; return NULL;
/* TCR */ /* TCR */
reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | if (cfg->coherent_walk) {
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
} else {
reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
}
switch (ARM_LPAE_GRANULE(data)) { switch (ARM_LPAE_GRANULE(data)) {
case SZ_4K: case SZ_4K:
...@@ -846,7 +855,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -846,7 +855,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_MAIR_ATTR_WBRWA (ARM_LPAE_MAIR_ATTR_WBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
(ARM_LPAE_MAIR_ATTR_DEVICE (ARM_LPAE_MAIR_ATTR_DEVICE
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
(ARM_LPAE_MAIR_ATTR_INC_OWBRWA
<< ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
cfg->arm_lpae_s1_cfg.mair[0] = reg; cfg->arm_lpae_s1_cfg.mair[0] = reg;
cfg->arm_lpae_s1_cfg.mair[1] = 0; cfg->arm_lpae_s1_cfg.mair[1] = 0;
...@@ -876,8 +887,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -876,8 +887,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data; struct arm_lpae_io_pgtable *data;
/* The NS quirk doesn't apply at stage 2 */ /* The NS quirk doesn't apply at stage 2 */
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA | if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
IO_PGTABLE_QUIRK_NON_STRICT))
return NULL; return NULL;
data = arm_lpae_alloc_pgtable(cfg); data = arm_lpae_alloc_pgtable(cfg);
...@@ -1212,7 +1222,7 @@ static int __init arm_lpae_do_selftests(void) ...@@ -1212,7 +1222,7 @@ static int __init arm_lpae_do_selftests(void)
struct io_pgtable_cfg cfg = { struct io_pgtable_cfg cfg = {
.tlb = &dummy_tlb_ops, .tlb = &dummy_tlb_ops,
.oas = 48, .oas = 48,
.quirks = IO_PGTABLE_QUIRK_NO_DMA, .coherent_walk = true,
}; };
for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
......
This diff is collapsed.
This diff is collapsed.
...@@ -236,17 +236,6 @@ DEBUG_FOPS_RO(regs); ...@@ -236,17 +236,6 @@ DEBUG_FOPS_RO(regs);
DEFINE_SHOW_ATTRIBUTE(tlb); DEFINE_SHOW_ATTRIBUTE(tlb);
DEFINE_SHOW_ATTRIBUTE(pagetable); DEFINE_SHOW_ATTRIBUTE(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \
{ \
struct dentry *dent; \
dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
obj, &attr##_fops); \
if (!dent) \
goto err; \
}
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
void omap_iommu_debugfs_add(struct omap_iommu *obj) void omap_iommu_debugfs_add(struct omap_iommu *obj)
{ {
struct dentry *d; struct dentry *d;
...@@ -254,23 +243,13 @@ void omap_iommu_debugfs_add(struct omap_iommu *obj) ...@@ -254,23 +243,13 @@ void omap_iommu_debugfs_add(struct omap_iommu *obj)
if (!iommu_debug_root) if (!iommu_debug_root)
return; return;
obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root); d = debugfs_create_dir(obj->name, iommu_debug_root);
if (!obj->debug_dir) obj->debug_dir = d;
return;
d = debugfs_create_u32("nr_tlb_entries", 0400, obj->debug_dir, debugfs_create_u32("nr_tlb_entries", 0400, d, &obj->nr_tlb_entries);
&obj->nr_tlb_entries); debugfs_create_file("regs", 0400, d, obj, &regs_fops);
if (!d) debugfs_create_file("tlb", 0400, d, obj, &tlb_fops);
return; debugfs_create_file("pagetable", 0400, d, obj, &pagetable_fops);
DEBUG_ADD_FILE_RO(regs);
DEBUG_ADD_FILE_RO(tlb);
DEBUG_ADD_FILE_RO(pagetable);
return;
err:
debugfs_remove_recursive(obj->debug_dir);
} }
void omap_iommu_debugfs_remove(struct omap_iommu *obj) void omap_iommu_debugfs_remove(struct omap_iommu *obj)
...@@ -284,8 +263,6 @@ void omap_iommu_debugfs_remove(struct omap_iommu *obj) ...@@ -284,8 +263,6 @@ void omap_iommu_debugfs_remove(struct omap_iommu *obj)
void __init omap_iommu_debugfs_init(void) void __init omap_iommu_debugfs_init(void)
{ {
iommu_debug_root = debugfs_create_dir("omap_iommu", NULL); iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
if (!iommu_debug_root)
pr_err("can't create debugfs dir\n");
} }
void __exit omap_iommu_debugfs_exit(void) void __exit omap_iommu_debugfs_exit(void)
......
...@@ -35,8 +35,7 @@ ...@@ -35,8 +35,7 @@
static const struct iommu_ops omap_iommu_ops; static const struct iommu_ops omap_iommu_ops;
#define to_iommu(dev) \ #define to_iommu(dev) ((struct omap_iommu *)dev_get_drvdata(dev))
((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
/* bitmap of the page sizes currently supported */ /* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
......
...@@ -42,6 +42,7 @@ struct iommu_ops; ...@@ -42,6 +42,7 @@ struct iommu_ops;
struct iommu_group; struct iommu_group;
struct iommu_fwspec; struct iommu_fwspec;
struct dev_pin_info; struct dev_pin_info;
struct iommu_param;
struct bus_attribute { struct bus_attribute {
struct attribute attr; struct attribute attr;
...@@ -961,6 +962,7 @@ struct dev_links_info { ...@@ -961,6 +962,7 @@ struct dev_links_info {
* device (i.e. the bus driver that discovered the device). * device (i.e. the bus driver that discovered the device).
* @iommu_group: IOMMU group the device belongs to. * @iommu_group: IOMMU group the device belongs to.
* @iommu_fwspec: IOMMU-specific properties supplied by firmware. * @iommu_fwspec: IOMMU-specific properties supplied by firmware.
* @iommu_param: Per device generic IOMMU runtime data
* *
* @offline_disabled: If set, the device is permanently online. * @offline_disabled: If set, the device is permanently online.
* @offline: Set after successful invocation of bus type's .offline(). * @offline: Set after successful invocation of bus type's .offline().
...@@ -1054,6 +1056,7 @@ struct device { ...@@ -1054,6 +1056,7 @@ struct device {
void (*release)(struct device *dev); void (*release)(struct device *dev);
struct iommu_group *iommu_group; struct iommu_group *iommu_group;
struct iommu_fwspec *iommu_fwspec; struct iommu_fwspec *iommu_fwspec;
struct iommu_param *iommu_param;
bool offline_disabled:1; bool offline_disabled:1;
bool offline:1; bool offline:1;
......
...@@ -5,59 +5,21 @@ ...@@ -5,59 +5,21 @@
#ifndef __DMA_IOMMU_H #ifndef __DMA_IOMMU_H
#define __DMA_IOMMU_H #define __DMA_IOMMU_H
#ifdef __KERNEL__ #include <linux/errno.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/errno.h>
#ifdef CONFIG_IOMMU_DMA #ifdef CONFIG_IOMMU_DMA
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/msi.h> #include <linux/msi.h>
int iommu_dma_init(void);
/* Domain management interface for IOMMU drivers */ /* Domain management interface for IOMMU drivers */
int iommu_get_dma_cookie(struct iommu_domain *domain); int iommu_get_dma_cookie(struct iommu_domain *domain);
int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
void iommu_put_dma_cookie(struct iommu_domain *domain); void iommu_put_dma_cookie(struct iommu_domain *domain);
/* Setup call for arch DMA mapping code */ /* Setup call for arch DMA mapping code */
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
u64 size, struct device *dev);
/* General helpers for DMA-API <-> IOMMU-API interaction */
int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs);
/*
* These implement the bulk of the relevant DMA mapping callbacks, but require
* the arch code to take care of attributes and cache maintenance
*/
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t));
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle);
int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma);
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int prot);
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int prot);
/*
* Arch code with no special attribute handling may use these
* directly as DMA mapping callbacks for simplicity
*/
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir, unsigned long attrs);
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs);
/* The DMA API isn't _quite_ the whole story, though... */ /* The DMA API isn't _quite_ the whole story, though... */
/* /*
...@@ -75,16 +37,16 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, ...@@ -75,16 +37,16 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc,
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
#else #else /* CONFIG_IOMMU_DMA */
struct iommu_domain; struct iommu_domain;
struct msi_desc; struct msi_desc;
struct msi_msg; struct msi_msg;
struct device; struct device;
static inline int iommu_dma_init(void) static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base,
u64 size)
{ {
return 0;
} }
static inline int iommu_get_dma_cookie(struct iommu_domain *domain) static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
...@@ -117,5 +79,4 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he ...@@ -117,5 +79,4 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he
} }
#endif /* CONFIG_IOMMU_DMA */ #endif /* CONFIG_IOMMU_DMA */
#endif /* __KERNEL__ */
#endif /* __DMA_IOMMU_H */ #endif /* __DMA_IOMMU_H */
...@@ -435,6 +435,12 @@ enum { ...@@ -435,6 +435,12 @@ enum {
#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0) #define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1) #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
extern int intel_iommu_sm;
#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
#define pasid_supported(iommu) (sm_supported(iommu) && \
ecap_pasid((iommu)->ecap))
struct pasid_entry; struct pasid_entry;
struct pasid_state_entry; struct pasid_state_entry;
struct page_req_dsc; struct page_req_dsc;
...@@ -642,7 +648,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); ...@@ -642,7 +648,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void); extern int dmar_ir_support(void);
struct dmar_domain *get_valid_domain_for_dev(struct device *dev);
void *alloc_pgtable_page(int node); void *alloc_pgtable_page(int node);
void free_pgtable_page(void *vaddr); void free_pgtable_page(void *vaddr);
struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
......
...@@ -49,7 +49,7 @@ struct svm_dev_ops { ...@@ -49,7 +49,7 @@ struct svm_dev_ops {
/** /**
* intel_svm_bind_mm() - Bind the current process to a PASID * intel_svm_bind_mm() - Bind the current process to a PASID
* @dev: Device to be granted acccess * @dev: Device to be granted access
* @pasid: Address for allocated PASID * @pasid: Address for allocated PASID
* @flags: Flags. Later for requesting supervisor mode, etc. * @flags: Flags. Later for requesting supervisor mode, etc.
* @ops: Callbacks to device driver * @ops: Callbacks to device driver
......
...@@ -44,6 +44,8 @@ struct iommu_gather_ops { ...@@ -44,6 +44,8 @@ struct iommu_gather_ops {
* tables. * tables.
* @ias: Input address (iova) size, in bits. * @ias: Input address (iova) size, in bits.
* @oas: Output address (paddr) size, in bits. * @oas: Output address (paddr) size, in bits.
* @coherent_walk A flag to indicate whether or not page table walks made
* by the IOMMU are coherent with the CPU caches.
* @tlb: TLB management callbacks for this set of tables. * @tlb: TLB management callbacks for this set of tables.
* @iommu_dev: The device representing the DMA configuration for the * @iommu_dev: The device representing the DMA configuration for the
* page table walker. * page table walker.
...@@ -68,11 +70,6 @@ struct io_pgtable_cfg { ...@@ -68,11 +70,6 @@ struct io_pgtable_cfg {
* when the SoC is in "4GB mode" and they can only access the high * when the SoC is in "4GB mode" and they can only access the high
* remap of DRAM (0x1_00000000 to 0x1_ffffffff). * remap of DRAM (0x1_00000000 to 0x1_ffffffff).
* *
* IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
* software-emulated IOMMU), such that pagetable updates need not
* be treated as explicit DMA data.
*
* IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
* on unmap, for DMA domains using the flush queue mechanism for * on unmap, for DMA domains using the flush queue mechanism for
* delayed invalidation. * delayed invalidation.
...@@ -81,12 +78,12 @@ struct io_pgtable_cfg { ...@@ -81,12 +78,12 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4) #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(5)
unsigned long quirks; unsigned long quirks;
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
unsigned int ias; unsigned int ias;
unsigned int oas; unsigned int oas;
bool coherent_walk;
const struct iommu_gather_ops *tlb; const struct iommu_gather_ops *tlb;
struct device *iommu_dev; struct device *iommu_dev;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/of.h> #include <linux/of.h>
#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0) #define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1) #define IOMMU_WRITE (1 << 1)
...@@ -29,6 +30,12 @@ ...@@ -29,6 +30,12 @@
* if the IOMMU page table format is equivalent. * if the IOMMU page table format is equivalent.
*/ */
#define IOMMU_PRIV (1 << 5) #define IOMMU_PRIV (1 << 5)
/*
* Non-coherent masters on few Qualcomm SoCs can use this page protection flag
* to set correct cacheability attributes to use an outer level of cache -
* last level cache, aka system cache.
*/
#define IOMMU_QCOM_SYS_CACHE (1 << 6)
struct iommu_ops; struct iommu_ops;
struct iommu_group; struct iommu_group;
...@@ -37,6 +44,7 @@ struct device; ...@@ -37,6 +44,7 @@ struct device;
struct iommu_domain; struct iommu_domain;
struct notifier_block; struct notifier_block;
struct iommu_sva; struct iommu_sva;
struct iommu_fault_event;
/* iommu fault flags */ /* iommu fault flags */
#define IOMMU_FAULT_READ 0x0 #define IOMMU_FAULT_READ 0x0
...@@ -46,6 +54,7 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *, ...@@ -46,6 +54,7 @@ typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *); struct device *, unsigned long, int, void *);
typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *, typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
void *); void *);
typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
struct iommu_domain_geometry { struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */ dma_addr_t aperture_start; /* First address that can be mapped */
...@@ -123,6 +132,12 @@ enum iommu_attr { ...@@ -123,6 +132,12 @@ enum iommu_attr {
enum iommu_resv_type { enum iommu_resv_type {
/* Memory regions which must be mapped 1:1 at all times */ /* Memory regions which must be mapped 1:1 at all times */
IOMMU_RESV_DIRECT, IOMMU_RESV_DIRECT,
/*
* Memory regions which are advertised to be 1:1 but are
* commonly considered relaxable in some conditions,
* for instance in device assignment use case (USB, Graphics)
*/
IOMMU_RESV_DIRECT_RELAXABLE,
/* Arbitrary "never map this or give it to a device" address ranges */ /* Arbitrary "never map this or give it to a device" address ranges */
IOMMU_RESV_RESERVED, IOMMU_RESV_RESERVED,
/* Hardware MSI region (untranslated) */ /* Hardware MSI region (untranslated) */
...@@ -212,6 +227,7 @@ struct iommu_sva_ops { ...@@ -212,6 +227,7 @@ struct iommu_sva_ops {
* @sva_bind: Bind process address space to device * @sva_bind: Bind process address space to device
* @sva_unbind: Unbind process address space from device * @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle * @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
* @pgsize_bitmap: bitmap of all possible supported page sizes * @pgsize_bitmap: bitmap of all possible supported page sizes
*/ */
struct iommu_ops { struct iommu_ops {
...@@ -272,6 +288,10 @@ struct iommu_ops { ...@@ -272,6 +288,10 @@ struct iommu_ops {
void (*sva_unbind)(struct iommu_sva *handle); void (*sva_unbind)(struct iommu_sva *handle);
int (*sva_get_pasid)(struct iommu_sva *handle); int (*sva_get_pasid)(struct iommu_sva *handle);
int (*page_response)(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg);
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
}; };
...@@ -289,6 +309,48 @@ struct iommu_device { ...@@ -289,6 +309,48 @@ struct iommu_device {
struct device *dev; struct device *dev;
}; };
/**
* struct iommu_fault_event - Generic fault event
*
* Can represent recoverable faults such as a page requests or
* unrecoverable faults such as DMA or IRQ remapping faults.
*
* @fault: fault descriptor
* @list: pending fault event list, used for tracking responses
*/
struct iommu_fault_event {
struct iommu_fault fault;
struct list_head list;
};
/**
* struct iommu_fault_param - per-device IOMMU fault data
* @handler: Callback function to handle IOMMU faults at device level
* @data: handler private data
* @faults: holds the pending faults which needs response
* @lock: protect pending faults list
*/
struct iommu_fault_param {
iommu_dev_fault_handler_t handler;
void *data;
struct list_head faults;
struct mutex lock;
};
/**
* struct iommu_param - collection of per-device IOMMU data
*
* @fault_param: IOMMU detected device fault reporting data
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
* struct iommu_fwspec *iommu_fwspec;
*/
struct iommu_param {
struct mutex lock;
struct iommu_fault_param *fault_param;
};
int iommu_device_register(struct iommu_device *iommu); int iommu_device_register(struct iommu_device *iommu);
void iommu_device_unregister(struct iommu_device *iommu); void iommu_device_unregister(struct iommu_device *iommu);
int iommu_device_sysfs_add(struct iommu_device *iommu, int iommu_device_sysfs_add(struct iommu_device *iommu,
...@@ -350,6 +412,7 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain, ...@@ -350,6 +412,7 @@ extern void iommu_set_fault_handler(struct iommu_domain *domain,
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
extern int iommu_request_dm_for_dev(struct device *dev); extern int iommu_request_dm_for_dev(struct device *dev);
extern int iommu_request_dma_domain_for_dev(struct device *dev);
extern struct iommu_resv_region * extern struct iommu_resv_region *
iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
enum iommu_resv_type type); enum iommu_resv_type type);
...@@ -378,6 +441,17 @@ extern int iommu_group_register_notifier(struct iommu_group *group, ...@@ -378,6 +441,17 @@ extern int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb); struct notifier_block *nb);
extern int iommu_group_unregister_notifier(struct iommu_group *group, extern int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb); struct notifier_block *nb);
extern int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
void *data);
extern int iommu_unregister_device_fault_handler(struct device *dev);
extern int iommu_report_device_fault(struct device *dev,
struct iommu_fault_event *evt);
extern int iommu_page_response(struct device *dev,
struct iommu_page_response *msg);
extern int iommu_group_id(struct iommu_group *group); extern int iommu_group_id(struct iommu_group *group);
extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
...@@ -492,6 +566,7 @@ struct iommu_ops {}; ...@@ -492,6 +566,7 @@ struct iommu_ops {};
struct iommu_group {}; struct iommu_group {};
struct iommu_fwspec {}; struct iommu_fwspec {};
struct iommu_device {}; struct iommu_device {};
struct iommu_fault_param {};
static inline bool iommu_present(struct bus_type *bus) static inline bool iommu_present(struct bus_type *bus)
{ {
...@@ -614,6 +689,11 @@ static inline int iommu_request_dm_for_dev(struct device *dev) ...@@ -614,6 +689,11 @@ static inline int iommu_request_dm_for_dev(struct device *dev)
return -ENODEV; return -ENODEV;
} }
static inline int iommu_request_dma_domain_for_dev(struct device *dev)
{
return -ENODEV;
}
static inline int iommu_attach_group(struct iommu_domain *domain, static inline int iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group) struct iommu_group *group)
{ {
...@@ -685,6 +765,31 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group, ...@@ -685,6 +765,31 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group,
return 0; return 0;
} }
static inline
int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
void *data)
{
return -ENODEV;
}
static inline int iommu_unregister_device_fault_handler(struct device *dev)
{
return 0;
}
static inline
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
{
return -ENODEV;
}
static inline int iommu_page_response(struct device *dev,
struct iommu_page_response *msg)
{
return -ENODEV;
}
static inline int iommu_group_id(struct iommu_group *group) static inline int iommu_group_id(struct iommu_group *group)
{ {
return -ENODEV; return -ENODEV;
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* IOMMU user API definitions
*/
#ifndef _UAPI_IOMMU_H
#define _UAPI_IOMMU_H
#include <linux/types.h>
#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */
#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */
/* Generic fault types, can be expanded IRQ remapping fault */
enum iommu_fault_type {
IOMMU_FAULT_DMA_UNRECOV = 1, /* unrecoverable fault */
IOMMU_FAULT_PAGE_REQ, /* page request fault */
};
enum iommu_fault_reason {
IOMMU_FAULT_REASON_UNKNOWN = 0,
/* Could not access the PASID table (fetch caused external abort) */
IOMMU_FAULT_REASON_PASID_FETCH,
/* PASID entry is invalid or has configuration errors */
IOMMU_FAULT_REASON_BAD_PASID_ENTRY,
/*
* PASID is out of range (e.g. exceeds the maximum PASID
* supported by the IOMMU) or disabled.
*/
IOMMU_FAULT_REASON_PASID_INVALID,
/*
* An external abort occurred fetching (or updating) a translation
* table descriptor
*/
IOMMU_FAULT_REASON_WALK_EABT,
/*
* Could not access the page table entry (Bad address),
* actual translation fault
*/
IOMMU_FAULT_REASON_PTE_FETCH,
/* Protection flag check failed */
IOMMU_FAULT_REASON_PERMISSION,
/* access flag check failed */
IOMMU_FAULT_REASON_ACCESS,
/* Output address of a translation stage caused Address Size fault */
IOMMU_FAULT_REASON_OOR_ADDRESS,
};
/**
* struct iommu_fault_unrecoverable - Unrecoverable fault data
* @reason: reason of the fault, from &enum iommu_fault_reason
* @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values)
* @pasid: Process Address Space ID
* @perm: requested permission access using by the incoming transaction
* (IOMMU_FAULT_PERM_* values)
* @addr: offending page address
* @fetch_addr: address that caused a fetch abort, if any
*/
struct iommu_fault_unrecoverable {
__u32 reason;
#define IOMMU_FAULT_UNRECOV_PASID_VALID (1 << 0)
#define IOMMU_FAULT_UNRECOV_ADDR_VALID (1 << 1)
#define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID (1 << 2)
__u32 flags;
__u32 pasid;
__u32 perm;
__u64 addr;
__u64 fetch_addr;
};
/**
* struct iommu_fault_page_request - Page Request data
* @flags: encodes whether the corresponding fields are valid and whether this
* is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values)
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
* @addr: page address
* @private_data: device-specific private information
*/
struct iommu_fault_page_request {
#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0)
#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
__u32 flags;
__u32 pasid;
__u32 grpid;
__u32 perm;
__u64 addr;
__u64 private_data[2];
};
/**
* struct iommu_fault - Generic fault data
* @type: fault type from &enum iommu_fault_type
* @padding: reserved for future use (should be zero)
* @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV
* @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ
* @padding2: sets the fault size to allow for future extensions
*/
struct iommu_fault {
__u32 type;
__u32 padding;
union {
struct iommu_fault_unrecoverable event;
struct iommu_fault_page_request prm;
__u8 padding2[56];
};
};
/**
* enum iommu_page_response_code - Return status of fault handlers
* @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables
* populated, retry the access. This is "Success" in PCI PRI.
* @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from
* this device if possible. This is "Response Failure" in PCI PRI.
* @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the
* access. This is "Invalid Request" in PCI PRI.
*/
enum iommu_page_response_code {
IOMMU_PAGE_RESP_SUCCESS = 0,
IOMMU_PAGE_RESP_INVALID,
IOMMU_PAGE_RESP_FAILURE,
};
/**
* struct iommu_page_response - Generic page response information
* @version: API version of this structure
* @flags: encodes whether the corresponding fields are valid
* (IOMMU_FAULT_PAGE_RESPONSE_* values)
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @code: response code from &enum iommu_page_response_code
*/
struct iommu_page_response {
#define IOMMU_PAGE_RESP_VERSION_1 1
__u32 version;
#define IOMMU_PAGE_RESP_PASID_VALID (1 << 0)
__u32 flags;
__u32 pasid;
__u32 grpid;
__u32 code;
};
#endif /* _UAPI_IOMMU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment