Commit 260782bc authored by Weidong Han's avatar Weidong Han Committed by Joerg Roedel

KVM: use the new intel iommu APIs

intel iommu APIs are updated, use the new APIs.

In addition, change kvm_iommu_map_guest() to just create the domain, let kvm_iommu_assign_device() assign device.
Signed-off-by: default avatarWeidong Han <weidong.han@intel.com>
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent fe40f1e0
...@@ -330,9 +330,10 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); ...@@ -330,9 +330,10 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#ifdef CONFIG_DMAR #ifdef CONFIG_DMAR
int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
unsigned long npages); unsigned long npages);
int kvm_iommu_map_guest(struct kvm *kvm, int kvm_iommu_map_guest(struct kvm *kvm);
struct kvm_assigned_dev_kernel *assigned_dev);
int kvm_iommu_unmap_guest(struct kvm *kvm); int kvm_iommu_unmap_guest(struct kvm *kvm);
int kvm_assign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev);
#else /* CONFIG_DMAR */ #else /* CONFIG_DMAR */
static inline int kvm_iommu_map_pages(struct kvm *kvm, static inline int kvm_iommu_map_pages(struct kvm *kvm,
gfn_t base_gfn, gfn_t base_gfn,
...@@ -341,9 +342,7 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm, ...@@ -341,9 +342,7 @@ static inline int kvm_iommu_map_pages(struct kvm *kvm,
return 0; return 0;
} }
static inline int kvm_iommu_map_guest(struct kvm *kvm, static inline int kvm_iommu_map_guest(struct kvm *kvm)
struct kvm_assigned_dev_kernel
*assigned_dev)
{ {
return -ENODEV; return -ENODEV;
} }
...@@ -352,6 +351,12 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm) ...@@ -352,6 +351,12 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
{ {
return 0; return 0;
} }
static inline int kvm_assign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev)
{
return 0;
}
#endif /* CONFIG_DMAR */ #endif /* CONFIG_DMAR */
static inline void kvm_guest_enter(void) static inline void kvm_guest_enter(void)
......
...@@ -503,7 +503,12 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, ...@@ -503,7 +503,12 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
list_add(&match->list, &kvm->arch.assigned_dev_head); list_add(&match->list, &kvm->arch.assigned_dev_head);
if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
r = kvm_iommu_map_guest(kvm, match); if (!kvm->arch.intel_iommu_domain) {
r = kvm_iommu_map_guest(kvm);
if (r)
goto out_list_del;
}
r = kvm_assign_device(kvm, match);
if (r) if (r)
goto out_list_del; goto out_list_del;
} }
......
...@@ -45,20 +45,18 @@ int kvm_iommu_map_pages(struct kvm *kvm, ...@@ -45,20 +45,18 @@ int kvm_iommu_map_pages(struct kvm *kvm,
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
/* check if already mapped */ /* check if already mapped */
pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, if (intel_iommu_iova_to_phys(domain,
gfn_to_gpa(gfn)); gfn_to_gpa(gfn)))
if (pfn)
continue; continue;
pfn = gfn_to_pfn(kvm, gfn); pfn = gfn_to_pfn(kvm, gfn);
r = intel_iommu_page_mapping(domain, r = intel_iommu_map_address(domain,
gfn_to_gpa(gfn), gfn_to_gpa(gfn),
pfn_to_hpa(pfn), pfn_to_hpa(pfn),
PAGE_SIZE, PAGE_SIZE,
DMA_PTE_READ | DMA_PTE_READ | DMA_PTE_WRITE);
DMA_PTE_WRITE);
if (r) { if (r) {
printk(KERN_ERR "kvm_iommu_map_pages:" printk(KERN_ERR "kvm_iommu_map_address:"
"iommu failed to map pfn=%lx\n", pfn); "iommu failed to map pfn=%lx\n", pfn);
goto unmap_pages; goto unmap_pages;
} }
...@@ -86,50 +84,55 @@ static int kvm_iommu_map_memslots(struct kvm *kvm) ...@@ -86,50 +84,55 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
return r; return r;
} }
int kvm_iommu_map_guest(struct kvm *kvm, int kvm_assign_device(struct kvm *kvm,
struct kvm_assigned_dev_kernel *assigned_dev) struct kvm_assigned_dev_kernel *assigned_dev)
{ {
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
int r; int r;
if (!intel_iommu_found()) { /* check if iommu exists and in use */
printk(KERN_ERR "%s: intel iommu not found\n", __func__); if (!domain)
return 0;
pdev = assigned_dev->dev;
if (pdev == NULL)
return -ENODEV; return -ENODEV;
r = intel_iommu_attach_device(domain, pdev);
if (r) {
printk(KERN_ERR "assign device %x:%x.%x failed",
pdev->bus->number,
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
return r;
} }
printk(KERN_DEBUG "VT-d direct map: host bdf = %x:%x:%x\n", printk(KERN_DEBUG "assign device: host bdf = %x:%x:%x\n",
assigned_dev->host_busnr, assigned_dev->host_busnr,
PCI_SLOT(assigned_dev->host_devfn), PCI_SLOT(assigned_dev->host_devfn),
PCI_FUNC(assigned_dev->host_devfn)); PCI_FUNC(assigned_dev->host_devfn));
pdev = assigned_dev->dev; return 0;
}
if (pdev == NULL) { int kvm_iommu_map_guest(struct kvm *kvm)
if (kvm->arch.intel_iommu_domain) { {
intel_iommu_domain_exit(kvm->arch.intel_iommu_domain); int r;
kvm->arch.intel_iommu_domain = NULL;
} if (!intel_iommu_found()) {
printk(KERN_ERR "%s: intel iommu not found\n", __func__);
return -ENODEV; return -ENODEV;
} }
kvm->arch.intel_iommu_domain = intel_iommu_domain_alloc(pdev); kvm->arch.intel_iommu_domain = intel_iommu_alloc_domain();
if (!kvm->arch.intel_iommu_domain) if (!kvm->arch.intel_iommu_domain)
return -ENODEV; return -ENOMEM;
r = kvm_iommu_map_memslots(kvm); r = kvm_iommu_map_memslots(kvm);
if (r) if (r)
goto out_unmap; goto out_unmap;
intel_iommu_detach_dev(kvm->arch.intel_iommu_domain,
pdev->bus->number, pdev->devfn);
r = intel_iommu_context_mapping(kvm->arch.intel_iommu_domain,
pdev);
if (r) {
printk(KERN_ERR "Domain context map for %s failed",
pci_name(pdev));
goto out_unmap;
}
return 0; return 0;
out_unmap: out_unmap:
...@@ -138,19 +141,29 @@ int kvm_iommu_map_guest(struct kvm *kvm, ...@@ -138,19 +141,29 @@ int kvm_iommu_map_guest(struct kvm *kvm,
} }
static void kvm_iommu_put_pages(struct kvm *kvm, static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages) gfn_t base_gfn, unsigned long npages)
{ {
gfn_t gfn = base_gfn; gfn_t gfn = base_gfn;
pfn_t pfn; pfn_t pfn;
struct dmar_domain *domain = kvm->arch.intel_iommu_domain; struct dmar_domain *domain = kvm->arch.intel_iommu_domain;
int i; unsigned long i;
u64 phys;
/* check if iommu exists and in use */
if (!domain)
return;
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
pfn = (pfn_t)intel_iommu_iova_to_pfn(domain, phys = intel_iommu_iova_to_phys(domain,
gfn_to_gpa(gfn)); gfn_to_gpa(gfn));
pfn = phys >> PAGE_SHIFT;
kvm_release_pfn_clean(pfn); kvm_release_pfn_clean(pfn);
gfn++; gfn++;
} }
intel_iommu_unmap_address(domain,
gfn_to_gpa(base_gfn),
PAGE_SIZE * npages);
} }
static int kvm_iommu_unmap_memslots(struct kvm *kvm) static int kvm_iommu_unmap_memslots(struct kvm *kvm)
...@@ -182,10 +195,9 @@ int kvm_iommu_unmap_guest(struct kvm *kvm) ...@@ -182,10 +195,9 @@ int kvm_iommu_unmap_guest(struct kvm *kvm)
PCI_FUNC(entry->host_devfn)); PCI_FUNC(entry->host_devfn));
/* detach kvm dmar domain */ /* detach kvm dmar domain */
intel_iommu_detach_dev(domain, entry->host_busnr, intel_iommu_detach_device(domain, entry->dev);
entry->host_devfn);
} }
kvm_iommu_unmap_memslots(kvm); kvm_iommu_unmap_memslots(kvm);
intel_iommu_domain_exit(domain); intel_iommu_free_domain(domain);
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment