Commit 2dcebc7d authored by Jacob Pan's avatar Jacob Pan Committed by Joerg Roedel

iommu: Move global PASID allocation from SVA to core

Intel ENQCMD requires a single PASID to be shared between multiple
devices, as the PASID is stored in a single MSR register per-process
and userspace can use only that one PASID.

This means that the PASID allocation for any ENQCMD using device driver
must always come from a shared global pool, regardless of what kind of
domain the PASID will be used with.

Split the code for the global PASID allocator into
iommu_alloc/free_global_pasid() so that drivers can attach non-SVA
domains to PASIDs as well.

This patch moves global PASID allocation APIs from SVA to IOMMU APIs.
Reserved PASIDs, currently only RID_PASID, are excluded from the global
PASID allocation.

It is expected that device drivers will use the allocated PASIDs to
attach to appropriate IOMMU domains for use.
Reviewed-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: default avatarJacob Pan <jacob.jun.pan@linux.intel.com>
Link: https://lore.kernel.org/r/20230802212427.1497170-3-jacob.jun.pan@linux.intel.comSigned-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 42987801
......@@ -10,34 +10,30 @@
#include "iommu-sva.h"
static DEFINE_MUTEX(iommu_sva_lock);
static DEFINE_IDA(iommu_global_pasid_ida);
/* Allocate a PASID for the mm within range (inclusive) */
static int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max)
static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
{
ioasid_t pasid;
int ret = 0;
if (min == IOMMU_PASID_INVALID ||
max == IOMMU_PASID_INVALID ||
min == 0 || max < min)
return -EINVAL;
if (!arch_pgtable_dma_compat(mm))
return -EBUSY;
mutex_lock(&iommu_sva_lock);
/* Is a PASID already associated with this mm? */
if (mm_valid_pasid(mm)) {
if (mm->pasid < min || mm->pasid > max)
if (mm->pasid >= dev->iommu->max_pasids)
ret = -EOVERFLOW;
goto out;
}
ret = ida_alloc_range(&iommu_global_pasid_ida, min, max, GFP_KERNEL);
if (ret < 0)
pasid = iommu_alloc_global_pasid(dev);
if (pasid == IOMMU_PASID_INVALID) {
ret = -ENOSPC;
goto out;
mm->pasid = ret;
}
mm->pasid = pasid;
ret = 0;
out:
mutex_unlock(&iommu_sva_lock);
......@@ -64,15 +60,10 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
{
struct iommu_domain *domain;
struct iommu_sva *handle;
ioasid_t max_pasids;
int ret;
max_pasids = dev->iommu->max_pasids;
if (!max_pasids)
return ERR_PTR(-EOPNOTSUPP);
/* Allocate mm->pasid if necessary. */
ret = iommu_sva_alloc_pasid(mm, 1, max_pasids - 1);
ret = iommu_sva_alloc_pasid(mm, dev);
if (ret)
return ERR_PTR(ret);
......@@ -217,5 +208,5 @@ void mm_pasid_drop(struct mm_struct *mm)
if (likely(!mm_valid_pasid(mm)))
return;
ida_free(&iommu_global_pasid_ida, mm->pasid);
iommu_free_global_pasid(mm->pasid);
}
......@@ -39,6 +39,7 @@
static struct kset *iommu_group_kset;
static DEFINE_IDA(iommu_group_ida);
static DEFINE_IDA(iommu_global_pasid_ida);
static unsigned int iommu_def_domain_type __read_mostly;
static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
......@@ -3400,3 +3401,30 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
return domain;
}
ioasid_t iommu_alloc_global_pasid(struct device *dev)
{
int ret;
/* max_pasids == 0 means that the device does not support PASID */
if (!dev->iommu->max_pasids)
return IOMMU_PASID_INVALID;
/*
* max_pasids is set up by vendor driver based on number of PASID bits
* supported but the IDA allocation is inclusive.
*/
ret = ida_alloc_range(&iommu_global_pasid_ida, IOMMU_FIRST_GLOBAL_PASID,
dev->iommu->max_pasids - 1, GFP_KERNEL);
return ret < 0 ? IOMMU_PASID_INVALID : ret;
}
EXPORT_SYMBOL_GPL(iommu_alloc_global_pasid);
void iommu_free_global_pasid(ioasid_t pasid)
{
if (WARN_ON(pasid == IOMMU_PASID_INVALID))
return;
ida_free(&iommu_global_pasid_ida, pasid);
}
EXPORT_SYMBOL_GPL(iommu_free_global_pasid);
......@@ -197,6 +197,7 @@ enum iommu_dev_features {
};
#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
#define IOMMU_PASID_INVALID (-1U)
typedef unsigned int ioasid_t;
......@@ -728,6 +729,8 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
struct iommu_domain *
iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
unsigned int type);
ioasid_t iommu_alloc_global_pasid(struct device *dev);
void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
......@@ -1089,6 +1092,13 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
{
return NULL;
}
static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
{
return IOMMU_PASID_INVALID;
}
static inline void iommu_free_global_pasid(ioasid_t pasid) {}
#endif /* CONFIG_IOMMU_API */
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment