Commit a68a7509 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v3.16-rc1' of git://github.com/awilliam/linux-vfio into next

Pull VFIO updates from Alex Williamson:
 "A handful of VFIO bug fixes for v3.16"

* tag 'vfio-v3.16-rc1' of git://github.com/awilliam/linux-vfio:
  drivers/vfio/pci: Fix wrong MSI interrupt count
  drivers/vfio: Rework offsetofend()
  vfio/iommu_type1: Avoid overflow
  vfio/pci: Fix unchecked return value
  vfio/pci: Fix sizing of DPA and THP express capabilities
parents 639b4ac6 fd49c81f
......@@ -57,7 +57,8 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
ret = vfio_config_init(vdev);
if (ret) {
pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state);
kfree(vdev->pci_saved_state);
vdev->pci_saved_state = NULL;
pci_disable_device(pdev);
return ret;
}
......@@ -196,8 +197,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
if (pos) {
pci_read_config_word(vdev->pdev,
pos + PCI_MSI_FLAGS, &flags);
return 1 << (flags & PCI_MSI_FLAGS_QMASK);
return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
}
} else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
u8 pos;
......
......@@ -1126,8 +1126,7 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
return pcibios_err_to_errno(ret);
byte &= PCI_DPA_CAP_SUBSTATE_MASK;
byte = round_up(byte + 1, 4);
return PCI_DPA_BASE_SIZEOF + byte;
return PCI_DPA_BASE_SIZEOF + byte + 1;
case PCI_EXT_CAP_ID_TPH:
ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword);
if (ret)
......@@ -1136,9 +1135,9 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) {
int sts;
sts = byte & PCI_TPH_CAP_ST_MASK;
sts = dword & PCI_TPH_CAP_ST_MASK;
sts >>= PCI_TPH_CAP_ST_SHIFT;
return PCI_TPH_BASE_SIZEOF + round_up(sts * 2, 4);
return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2;
}
return PCI_TPH_BASE_SIZEOF;
default:
......
......@@ -524,7 +524,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_map *map)
{
dma_addr_t end, iova;
dma_addr_t iova = map->iova;
unsigned long vaddr = map->vaddr;
size_t size = map->size;
long npage;
......@@ -533,39 +533,30 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_dma *dma;
unsigned long pfn;
end = map->iova + map->size;
/* Verify that none of our __u64 fields overflow */
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
return -EINVAL;
mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
WARN_ON(mask & PAGE_MASK);
/* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
prot |= IOMMU_WRITE;
if (map->flags & VFIO_DMA_MAP_FLAG_READ)
prot |= IOMMU_READ;
if (!prot)
return -EINVAL; /* No READ/WRITE? */
if (vaddr & mask)
return -EINVAL;
if (map->iova & mask)
return -EINVAL;
if (!map->size || map->size & mask)
return -EINVAL;
WARN_ON(mask & PAGE_MASK);
/* Don't allow IOVA wrap */
if (end && end < map->iova)
if (!prot || !size || (size | iova | vaddr) & mask)
return -EINVAL;
/* Don't allow virtual address wrap */
if (vaddr + map->size && vaddr + map->size < vaddr)
/* Don't allow IOVA or virtual address wrap */
if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
return -EINVAL;
mutex_lock(&iommu->lock);
if (vfio_find_dma(iommu, map->iova, map->size)) {
if (vfio_find_dma(iommu, iova, size)) {
mutex_unlock(&iommu->lock);
return -EEXIST;
}
......@@ -576,17 +567,17 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
return -ENOMEM;
}
dma->iova = map->iova;
dma->vaddr = map->vaddr;
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
/* Insert zero-sized and grow as we map chunks of it */
vfio_link_dma(iommu, dma);
for (iova = map->iova; iova < end; iova += size, vaddr += size) {
while (size) {
/* Pin a contiguous chunk of memory */
npage = vfio_pin_pages(vaddr, (end - iova) >> PAGE_SHIFT,
prot, &pfn);
npage = vfio_pin_pages(vaddr + dma->size,
size >> PAGE_SHIFT, prot, &pfn);
if (npage <= 0) {
WARN_ON(!npage);
ret = (int)npage;
......@@ -594,14 +585,14 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
}
/* Map it! */
ret = vfio_iommu_map(iommu, iova, pfn, npage, prot);
ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot);
if (ret) {
vfio_unpin_pages(pfn, npage, prot, true);
break;
}
size = npage << PAGE_SHIFT;
dma->size += size;
size -= npage << PAGE_SHIFT;
dma->size += npage << PAGE_SHIFT;
}
if (ret)
......
......@@ -86,9 +86,8 @@ extern void vfio_unregister_iommu_driver(
* from user space. This allows us to easily determine if the provided
* structure is sized to include various fields.
*/
#define offsetofend(TYPE, MEMBER) ({ \
TYPE tmp; \
offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
/*
* External user API
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment