Commit cade075f authored by Kirti Wankhede's avatar Kirti Wankhede Committed by Alex Williamson

vfio iommu: Cache pgsize_bitmap in struct vfio_iommu

Calculate and cache pgsize_bitmap when iommu->domain_list is updated
and iommu->external_domain is set for mdev device.
Add iommu->lock protection when cached pgsize_bitmap is accessed.
Signed-off-by: default avatarKirti Wankhede <kwankhede@nvidia.com>
Reviewed-by: default avatarNeo Jia <cjia@nvidia.com>
Reviewed-by: default avatarCornelia Huck <cohuck@redhat.com>
Reviewed-by: default avatarYan Zhao <yan.y.zhao@intel.com>
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 65817085
......@@ -69,6 +69,7 @@ struct vfio_iommu {
struct rb_root dma_list;
struct blocking_notifier_head notifier;
unsigned int dma_avail;
uint64_t pgsize_bitmap;
bool v2;
bool nesting;
};
......@@ -835,15 +836,14 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
iommu->dma_avail++;
}
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
{
struct vfio_domain *domain;
unsigned long bitmap = ULONG_MAX;
mutex_lock(&iommu->lock);
iommu->pgsize_bitmap = ULONG_MAX;
list_for_each_entry(domain, &iommu->domain_list, next)
bitmap &= domain->domain->pgsize_bitmap;
mutex_unlock(&iommu->lock);
iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
/*
* In case the IOMMU supports page sizes smaller than PAGE_SIZE
......@@ -853,12 +853,10 @@ static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
* granularity while iommu driver can use the sub-PAGE_SIZE size
* to map the buffer.
*/
if (bitmap & ~PAGE_MASK) {
bitmap &= PAGE_MASK;
bitmap |= PAGE_SIZE;
if (iommu->pgsize_bitmap & ~PAGE_MASK) {
iommu->pgsize_bitmap &= PAGE_MASK;
iommu->pgsize_bitmap |= PAGE_SIZE;
}
return bitmap;
}
static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
......@@ -869,19 +867,28 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
size_t unmapped = 0;
int ret = 0, retries = 0;
mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
mutex_lock(&iommu->lock);
mask = ((uint64_t)1 << __ffs(iommu->pgsize_bitmap)) - 1;
if (unmap->iova & mask) {
ret = -EINVAL;
goto unlock;
}
if (!unmap->size || unmap->size & mask) {
ret = -EINVAL;
goto unlock;
}
if (unmap->iova & mask)
return -EINVAL;
if (!unmap->size || unmap->size & mask)
return -EINVAL;
if (unmap->iova + unmap->size - 1 < unmap->iova ||
unmap->size > SIZE_MAX)
return -EINVAL;
unmap->size > SIZE_MAX) {
ret = -EINVAL;
goto unlock;
}
WARN_ON(mask & PAGE_MASK);
again:
mutex_lock(&iommu->lock);
/*
* vfio-iommu-type1 (v1) - User mappings were coalesced together to
......@@ -960,6 +967,7 @@ static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
blocking_notifier_call_chain(&iommu->notifier,
VFIO_IOMMU_NOTIFY_DMA_UNMAP,
&nb_unmap);
mutex_lock(&iommu->lock);
goto again;
}
unmapped += dma->size;
......@@ -1075,24 +1083,28 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
return -EINVAL;
mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
WARN_ON(mask & PAGE_MASK);
/* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
prot |= IOMMU_WRITE;
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
prot |= IOMMU_READ;
if (!prot || !size || (size | iova | vaddr) & mask)
return -EINVAL;
mutex_lock(&iommu->lock);
/* Don't allow IOVA or virtual address wrap */
if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
return -EINVAL;
mask = ((uint64_t)1 << __ffs(iommu->pgsize_bitmap)) - 1;
mutex_lock(&iommu->lock);
WARN_ON(mask & PAGE_MASK);
if (!prot || !size || (size | iova | vaddr) & mask) {
ret = -EINVAL;
goto out_unlock;
}
/* Don't allow IOVA or virtual address wrap */
if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
ret = -EINVAL;
goto out_unlock;
}
if (vfio_find_dma(iommu, iova, size)) {
ret = -EEXIST;
......@@ -1698,6 +1710,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (!iommu->external_domain) {
INIT_LIST_HEAD(&domain->group_list);
iommu->external_domain = domain;
vfio_update_pgsize_bitmap(iommu);
} else {
kfree(domain);
}
......@@ -1823,6 +1836,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
}
list_add(&domain->next, &iommu->domain_list);
vfio_update_pgsize_bitmap(iommu);
done:
/* Delete the old one and insert new iova list */
vfio_iommu_iova_insert_copy(iommu, &iova_copy);
......@@ -2034,6 +2048,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
list_del(&domain->next);
kfree(domain);
vfio_iommu_aper_expand(iommu, &iova_copy);
vfio_update_pgsize_bitmap(iommu);
}
break;
}
......@@ -2166,8 +2181,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
size_t size;
int iovas = 0, i = 0, ret;
mutex_lock(&iommu->lock);
list_for_each_entry(iova, &iommu->iova_list, list)
iovas++;
......@@ -2176,17 +2189,14 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
* Return 0 as a container with a single mdev device
* will have an empty list
*/
ret = 0;
goto out_unlock;
return 0;
}
size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges));
cap_iovas = kzalloc(size, GFP_KERNEL);
if (!cap_iovas) {
ret = -ENOMEM;
goto out_unlock;
}
if (!cap_iovas)
return -ENOMEM;
cap_iovas->nr_iovas = iovas;
......@@ -2199,8 +2209,6 @@ static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size);
kfree(cap_iovas);
out_unlock:
mutex_unlock(&iommu->lock);
return ret;
}
......@@ -2245,11 +2253,13 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
info.cap_offset = 0; /* output, no-recopy necessary */
}
mutex_lock(&iommu->lock);
info.flags = VFIO_IOMMU_INFO_PGSIZES;
info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
info.iova_pgsizes = iommu->pgsize_bitmap;
ret = vfio_iommu_iova_build_caps(iommu, &caps);
mutex_unlock(&iommu->lock);
if (ret)
return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment