Commit 781ca2de authored by Tom Murphy's avatar Tom Murphy Committed by Joerg Roedel

iommu: Add gfp parameter to iommu_ops::map

Add a gfp_t parameter to the iommu_ops::map function.
Remove the needless locking in the AMD iommu driver.

The iommu_ops::map function (or the iommu_map function which calls it)
was always supposed to be sleepable (according to Joerg's comment in
this thread: https://lore.kernel.org/patchwork/patch/977520/ ) and so
should probably have had a "might_sleep()" since it was written. However
currently the dma-iommu api can call iommu_map in an atomic context,
which it shouldn't do. This doesn't cause any problems because any iommu
driver which uses the dma-iommu api uses gfp_atomic in it's
iommu_ops::map function. But doing this wastes the memory allocators
atomic pools.
Signed-off-by: default avatarTom Murphy <murphyt7@tcd.ie>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 37ec8eb8
...@@ -3106,7 +3106,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom, ...@@ -3106,7 +3106,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
} }
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
phys_addr_t paddr, size_t page_size, int iommu_prot) phys_addr_t paddr, size_t page_size, int iommu_prot,
gfp_t gfp)
{ {
struct protection_domain *domain = to_pdomain(dom); struct protection_domain *domain = to_pdomain(dom);
int prot = 0; int prot = 0;
......
...@@ -2448,7 +2448,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -2448,7 +2448,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
} }
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
......
...@@ -1159,7 +1159,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1159,7 +1159,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
} }
static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
......
...@@ -476,7 +476,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, ...@@ -476,7 +476,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
if (!iova) if (!iova)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
if (iommu_map(domain, iova, phys - iova_off, size, prot)) { if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
iommu_dma_free_iova(cookie, iova, size); iommu_dma_free_iova(cookie, iova, size);
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
} }
...@@ -611,7 +611,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, ...@@ -611,7 +611,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
arch_dma_prep_coherent(sg_page(sg), sg->length); arch_dma_prep_coherent(sg_page(sg), sg->length);
} }
if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
< size) < size)
goto out_free_sg; goto out_free_sg;
...@@ -871,7 +871,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -871,7 +871,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's * We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do. * implementation - it knows better than we do.
*/ */
if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
goto out_free_iova; goto out_free_iova;
return __finalise_sg(dev, sg, nents, iova); return __finalise_sg(dev, sg, nents, iova);
......
...@@ -1073,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, ...@@ -1073,7 +1073,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*/ */
static int exynos_iommu_map(struct iommu_domain *iommu_domain, static int exynos_iommu_map(struct iommu_domain *iommu_domain,
unsigned long l_iova, phys_addr_t paddr, size_t size, unsigned long l_iova, phys_addr_t paddr, size_t size,
int prot) int prot, gfp_t gfp)
{ {
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
sysmmu_pte_t *entry; sysmmu_pte_t *entry;
......
...@@ -5432,7 +5432,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain, ...@@ -5432,7 +5432,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain, static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa, unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot) size_t size, int iommu_prot, gfp_t gfp)
{ {
struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr; u64 max_addr;
......
...@@ -1854,8 +1854,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, ...@@ -1854,8 +1854,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
return pgsize; return pgsize;
} }
int iommu_map(struct iommu_domain *domain, unsigned long iova, int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
const struct iommu_ops *ops = domain->ops; const struct iommu_ops *ops = domain->ops;
unsigned long orig_iova = iova; unsigned long orig_iova = iova;
...@@ -1892,8 +1892,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -1892,8 +1892,8 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize); iova, &paddr, pgsize);
ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
ret = ops->map(domain, iova, paddr, pgsize, prot);
if (ret) if (ret)
break; break;
...@@ -1913,8 +1913,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -1913,8 +1913,22 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret; return ret;
} }
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
might_sleep();
return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(iommu_map); EXPORT_SYMBOL_GPL(iommu_map);
int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iommu_map_atomic);
static size_t __iommu_unmap(struct iommu_domain *domain, static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size, unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather) struct iommu_iotlb_gather *iotlb_gather)
...@@ -1991,8 +2005,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain, ...@@ -1991,8 +2005,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
} }
EXPORT_SYMBOL_GPL(iommu_unmap_fast); EXPORT_SYMBOL_GPL(iommu_unmap_fast);
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot) struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{ {
size_t len = 0, mapped = 0; size_t len = 0, mapped = 0;
phys_addr_t start; phys_addr_t start;
...@@ -2003,7 +2018,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -2003,7 +2018,9 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
phys_addr_t s_phys = sg_phys(sg); phys_addr_t s_phys = sg_phys(sg);
if (len && s_phys != start + len) { if (len && s_phys != start + len) {
ret = iommu_map(domain, iova + mapped, start, len, prot); ret = __iommu_map(domain, iova + mapped, start,
len, prot, gfp);
if (ret) if (ret)
goto out_err; goto out_err;
...@@ -2031,8 +2048,22 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -2031,8 +2048,22 @@ size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
return 0; return 0;
} }
size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
might_sleep();
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(iommu_map_sg); EXPORT_SYMBOL_GPL(iommu_map_sg);
size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot) phys_addr_t paddr, u64 size, int prot)
{ {
......
...@@ -724,7 +724,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain, ...@@ -724,7 +724,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
} }
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
......
...@@ -504,7 +504,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain, ...@@ -504,7 +504,7 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
} }
static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t len, int prot) phys_addr_t pa, size_t len, int prot, gfp_t gfp)
{ {
struct msm_priv *priv = to_msm_priv(domain); struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags; unsigned long flags;
......
...@@ -412,7 +412,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, ...@@ -412,7 +412,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
} }
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_domain *dom = to_mtk_domain(domain);
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
......
...@@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain, ...@@ -295,7 +295,7 @@ static void mtk_iommu_detach_device(struct iommu_domain *domain,
} }
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct mtk_iommu_domain *dom = to_mtk_domain(domain); struct mtk_iommu_domain *dom = to_mtk_domain(domain);
unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT; unsigned int page_num = size >> MT2701_IOMMU_PAGE_SHIFT;
......
...@@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) ...@@ -1339,7 +1339,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
} }
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot) phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{ {
struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct device *dev = omap_domain->dev; struct device *dev = omap_domain->dev;
......
...@@ -423,7 +423,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de ...@@ -423,7 +423,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
} }
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
int ret; int ret;
unsigned long flags; unsigned long flags;
......
...@@ -757,7 +757,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, ...@@ -757,7 +757,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
} }
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct rk_iommu_domain *rk_domain = to_rk_domain(domain); struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags; unsigned long flags;
......
...@@ -265,7 +265,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain, ...@@ -265,7 +265,7 @@ static int s390_iommu_update_trans(struct s390_domain *s390_domain,
} }
static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct s390_domain *s390_domain = to_s390_domain(domain); struct s390_domain *s390_domain = to_s390_domain(domain);
int flags = ZPCI_PTE_VALID, rc = 0; int flags = ZPCI_PTE_VALID, rc = 0;
......
...@@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova, ...@@ -178,7 +178,7 @@ static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
} }
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t bytes, int prot) phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
{ {
struct gart_device *gart = gart_handle; struct gart_device *gart = gart_handle;
int ret; int ret;
......
...@@ -650,7 +650,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, ...@@ -650,7 +650,7 @@ static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
} }
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
struct tegra_smmu_as *as = to_smmu_as(domain); struct tegra_smmu_as *as = to_smmu_as(domain);
dma_addr_t pte_dma; dma_addr_t pte_dma;
......
...@@ -713,7 +713,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -713,7 +713,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
} }
static int viommu_map(struct iommu_domain *domain, unsigned long iova, static int viommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{ {
int ret; int ret;
u32 flags; u32 flags;
......
...@@ -256,7 +256,7 @@ struct iommu_ops { ...@@ -256,7 +256,7 @@ struct iommu_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev); int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova, int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot); phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather); size_t size, struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain); void (*flush_iotlb_all)(struct iommu_domain *domain);
...@@ -421,6 +421,8 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); ...@@ -421,6 +421,8 @@ extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova, extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot); phys_addr_t paddr, size_t size, int prot);
extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size); size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain, extern size_t iommu_unmap_fast(struct iommu_domain *domain,
...@@ -428,6 +430,9 @@ extern size_t iommu_unmap_fast(struct iommu_domain *domain, ...@@ -428,6 +430,9 @@ extern size_t iommu_unmap_fast(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather); struct iommu_iotlb_gather *iotlb_gather);
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot); struct scatterlist *sg,unsigned int nents, int prot);
extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain, extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token); iommu_fault_handler_t handler, void *token);
...@@ -662,6 +667,13 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -662,6 +667,13 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV; return -ENODEV;
} }
static inline int iommu_map_atomic(struct iommu_domain *domain,
unsigned long iova, phys_addr_t paddr,
size_t size, int prot)
{
return -ENODEV;
}
static inline size_t iommu_unmap(struct iommu_domain *domain, static inline size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size) unsigned long iova, size_t size)
{ {
...@@ -682,6 +694,13 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain, ...@@ -682,6 +694,13 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return 0; return 0;
} }
static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
{
return 0;
}
static inline void iommu_flush_tlb_all(struct iommu_domain *domain) static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment