Commit 04bfdd84 authored by Joerg Roedel's avatar Joerg Roedel

x86/amd-iommu: Flush domains if address space size was increased

Thist patch introduces the update_domain function which
propagates the larger address space of a protection domain
to the device table and flushes all relevant DTEs and the
domain TLB.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 407d733e
...@@ -235,6 +235,7 @@ struct protection_domain { ...@@ -235,6 +235,7 @@ struct protection_domain {
int mode; /* paging mode (0-6 levels) */ int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */ u64 *pt_root; /* page table root pointer */
unsigned long flags; /* flags to find out type of domain */ unsigned long flags; /* flags to find out type of domain */
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_cnt; /* devices assigned to this domain */
void *priv; /* private data */ void *priv; /* private data */
}; };
......
...@@ -63,6 +63,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, ...@@ -63,6 +63,7 @@ static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
unsigned int pages); unsigned int pages);
static u64 *fetch_pte(struct protection_domain *domain, static u64 *fetch_pte(struct protection_domain *domain,
unsigned long address); unsigned long address);
static void update_domain(struct protection_domain *domain);
#ifndef BUS_NOTIFY_UNBOUND_DRIVER #ifndef BUS_NOTIFY_UNBOUND_DRIVER
#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
...@@ -546,6 +547,8 @@ static int iommu_map_page(struct protection_domain *dom, ...@@ -546,6 +547,8 @@ static int iommu_map_page(struct protection_domain *dom,
*pte = __pte; *pte = __pte;
update_domain(dom);
return 0; return 0;
} }
...@@ -762,9 +765,13 @@ static int alloc_new_range(struct amd_iommu *iommu, ...@@ -762,9 +765,13 @@ static int alloc_new_range(struct amd_iommu *iommu,
dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1);
} }
update_domain(&dma_dom->domain);
return 0; return 0;
out_free: out_free:
update_domain(&dma_dom->domain);
free_page((unsigned long)dma_dom->aperture[index]->bitmap); free_page((unsigned long)dma_dom->aperture[index]->bitmap);
kfree(dma_dom->aperture[index]); kfree(dma_dom->aperture[index]);
...@@ -1294,6 +1301,29 @@ static int get_device_resources(struct device *dev, ...@@ -1294,6 +1301,29 @@ static int get_device_resources(struct device *dev,
return 1; return 1;
} }
static void update_device_table(struct protection_domain *domain)
{
int i;
for (i = 0; i <= amd_iommu_last_bdf; ++i) {
if (amd_iommu_pd_table[i] != domain)
continue;
set_dte_entry(i, domain);
}
}
static void update_domain(struct protection_domain *domain)
{
if (!domain->updated)
return;
update_device_table(domain);
flush_devices_by_domain(domain);
iommu_flush_domain(domain->id);
domain->updated = false;
}
/* /*
* If the pte_page is not yet allocated this function is called * If the pte_page is not yet allocated this function is called
*/ */
...@@ -1351,6 +1381,8 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom, ...@@ -1351,6 +1381,8 @@ static u64* dma_ops_get_pte(struct dma_ops_domain *dom,
} else } else
pte += IOMMU_PTE_L0_INDEX(address); pte += IOMMU_PTE_L0_INDEX(address);
update_domain(&dom->domain);
return pte; return pte;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment