Commit a68a7509 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfio-v3.16-rc1' of git://github.com/awilliam/linux-vfio into next

Pull VFIO updates from Alex Williamson:
 "A handful of VFIO bug fixes for v3.16"

* tag 'vfio-v3.16-rc1' of git://github.com/awilliam/linux-vfio:
  drivers/vfio/pci: Fix wrong MSI interrupt count
  drivers/vfio: Rework offsetofend()
  vfio/iommu_type1: Avoid overflow
  vfio/pci: Fix unchecked return value
  vfio/pci: Fix sizing of DPA and THP express capabilities
parents 639b4ac6 fd49c81f
...@@ -57,7 +57,8 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) ...@@ -57,7 +57,8 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
ret = vfio_config_init(vdev); ret = vfio_config_init(vdev);
if (ret) { if (ret) {
pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state); kfree(vdev->pci_saved_state);
vdev->pci_saved_state = NULL;
pci_disable_device(pdev); pci_disable_device(pdev);
return ret; return ret;
} }
...@@ -196,8 +197,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) ...@@ -196,8 +197,7 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
if (pos) { if (pos) {
pci_read_config_word(vdev->pdev, pci_read_config_word(vdev->pdev,
pos + PCI_MSI_FLAGS, &flags); pos + PCI_MSI_FLAGS, &flags);
return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
return 1 << (flags & PCI_MSI_FLAGS_QMASK);
} }
} else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) { } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
u8 pos; u8 pos;
......
...@@ -1126,8 +1126,7 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos) ...@@ -1126,8 +1126,7 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
return pcibios_err_to_errno(ret); return pcibios_err_to_errno(ret);
byte &= PCI_DPA_CAP_SUBSTATE_MASK; byte &= PCI_DPA_CAP_SUBSTATE_MASK;
byte = round_up(byte + 1, 4); return PCI_DPA_BASE_SIZEOF + byte + 1;
return PCI_DPA_BASE_SIZEOF + byte;
case PCI_EXT_CAP_ID_TPH: case PCI_EXT_CAP_ID_TPH:
ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword); ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword);
if (ret) if (ret)
...@@ -1136,9 +1135,9 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos) ...@@ -1136,9 +1135,9 @@ static int vfio_ext_cap_len(struct vfio_pci_device *vdev, u16 ecap, u16 epos)
if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) { if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) {
int sts; int sts;
sts = byte & PCI_TPH_CAP_ST_MASK; sts = dword & PCI_TPH_CAP_ST_MASK;
sts >>= PCI_TPH_CAP_ST_SHIFT; sts >>= PCI_TPH_CAP_ST_SHIFT;
return PCI_TPH_BASE_SIZEOF + round_up(sts * 2, 4); return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2;
} }
return PCI_TPH_BASE_SIZEOF; return PCI_TPH_BASE_SIZEOF;
default: default:
......
...@@ -524,7 +524,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, ...@@ -524,7 +524,7 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
static int vfio_dma_do_map(struct vfio_iommu *iommu, static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_iommu_type1_dma_map *map) struct vfio_iommu_type1_dma_map *map)
{ {
dma_addr_t end, iova; dma_addr_t iova = map->iova;
unsigned long vaddr = map->vaddr; unsigned long vaddr = map->vaddr;
size_t size = map->size; size_t size = map->size;
long npage; long npage;
...@@ -533,39 +533,30 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, ...@@ -533,39 +533,30 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
struct vfio_dma *dma; struct vfio_dma *dma;
unsigned long pfn; unsigned long pfn;
end = map->iova + map->size; /* Verify that none of our __u64 fields overflow */
if (map->size != size || map->vaddr != vaddr || map->iova != iova)
return -EINVAL;
mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
WARN_ON(mask & PAGE_MASK);
/* READ/WRITE from device perspective */ /* READ/WRITE from device perspective */
if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
prot |= IOMMU_WRITE; prot |= IOMMU_WRITE;
if (map->flags & VFIO_DMA_MAP_FLAG_READ) if (map->flags & VFIO_DMA_MAP_FLAG_READ)
prot |= IOMMU_READ; prot |= IOMMU_READ;
if (!prot) if (!prot || !size || (size | iova | vaddr) & mask)
return -EINVAL; /* No READ/WRITE? */
if (vaddr & mask)
return -EINVAL;
if (map->iova & mask)
return -EINVAL;
if (!map->size || map->size & mask)
return -EINVAL;
WARN_ON(mask & PAGE_MASK);
/* Don't allow IOVA wrap */
if (end && end < map->iova)
return -EINVAL; return -EINVAL;
/* Don't allow virtual address wrap */ /* Don't allow IOVA or virtual address wrap */
if (vaddr + map->size && vaddr + map->size < vaddr) if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
return -EINVAL; return -EINVAL;
mutex_lock(&iommu->lock); mutex_lock(&iommu->lock);
if (vfio_find_dma(iommu, map->iova, map->size)) { if (vfio_find_dma(iommu, iova, size)) {
mutex_unlock(&iommu->lock); mutex_unlock(&iommu->lock);
return -EEXIST; return -EEXIST;
} }
...@@ -576,17 +567,17 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, ...@@ -576,17 +567,17 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
return -ENOMEM; return -ENOMEM;
} }
dma->iova = map->iova; dma->iova = iova;
dma->vaddr = map->vaddr; dma->vaddr = vaddr;
dma->prot = prot; dma->prot = prot;
/* Insert zero-sized and grow as we map chunks of it */ /* Insert zero-sized and grow as we map chunks of it */
vfio_link_dma(iommu, dma); vfio_link_dma(iommu, dma);
for (iova = map->iova; iova < end; iova += size, vaddr += size) { while (size) {
/* Pin a contiguous chunk of memory */ /* Pin a contiguous chunk of memory */
npage = vfio_pin_pages(vaddr, (end - iova) >> PAGE_SHIFT, npage = vfio_pin_pages(vaddr + dma->size,
prot, &pfn); size >> PAGE_SHIFT, prot, &pfn);
if (npage <= 0) { if (npage <= 0) {
WARN_ON(!npage); WARN_ON(!npage);
ret = (int)npage; ret = (int)npage;
...@@ -594,14 +585,14 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, ...@@ -594,14 +585,14 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
} }
/* Map it! */ /* Map it! */
ret = vfio_iommu_map(iommu, iova, pfn, npage, prot); ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot);
if (ret) { if (ret) {
vfio_unpin_pages(pfn, npage, prot, true); vfio_unpin_pages(pfn, npage, prot, true);
break; break;
} }
size = npage << PAGE_SHIFT; size -= npage << PAGE_SHIFT;
dma->size += size; dma->size += npage << PAGE_SHIFT;
} }
if (ret) if (ret)
......
...@@ -86,9 +86,8 @@ extern void vfio_unregister_iommu_driver( ...@@ -86,9 +86,8 @@ extern void vfio_unregister_iommu_driver(
* from user space. This allows us to easily determine if the provided * from user space. This allows us to easily determine if the provided
* structure is sized to include various fields. * structure is sized to include various fields.
*/ */
#define offsetofend(TYPE, MEMBER) ({ \ #define offsetofend(TYPE, MEMBER) \
TYPE tmp; \ (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER))
offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \
/* /*
* External user API * External user API
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment