Commit ec487d1a authored by Joerg Roedel's avatar Joerg Roedel Committed by Ingo Molnar

x86, AMD IOMMU: add domain allocation and deallocation functions

This patch adds the functions to allocate and free protection domains for the
IOMMU.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Cc: iommu@lists.linux-foundation.org
Cc: bhavna.sarathy@amd.com
Cc: Sebastian.Biemueller@amd.com
Cc: robert.richter@amd.com
Cc: joro@8bytes.org
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d3086444
......@@ -314,3 +314,150 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
iommu_area_free(dom->bitmap, address, pages);
}
static u16 domain_id_alloc(void)
{
unsigned long flags;
int id;
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap);
else
id = 0;
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
return id;
}
static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
unsigned long start_page,
unsigned int pages)
{
unsigned int last_page = dom->aperture_size >> PAGE_SHIFT;
if (start_page + pages > last_page)
pages = last_page - start_page;
set_bit_string(dom->bitmap, start_page, pages);
}
static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
{
int i, j;
u64 *p1, *p2, *p3;
p1 = dma_dom->domain.pt_root;
if (!p1)
return;
for (i = 0; i < 512; ++i) {
if (!IOMMU_PTE_PRESENT(p1[i]))
continue;
p2 = IOMMU_PTE_PAGE(p1[i]);
for (j = 0; j < 512; ++i) {
if (!IOMMU_PTE_PRESENT(p2[j]))
continue;
p3 = IOMMU_PTE_PAGE(p2[j]);
free_page((unsigned long)p3);
}
free_page((unsigned long)p2);
}
free_page((unsigned long)p1);
}
static void dma_ops_domain_free(struct dma_ops_domain *dom)
{
if (!dom)
return;
dma_ops_free_pagetable(dom);
kfree(dom->pte_pages);
kfree(dom->bitmap);
kfree(dom);
}
static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
unsigned order)
{
struct dma_ops_domain *dma_dom;
unsigned i, num_pte_pages;
u64 *l2_pde;
u64 address;
/*
* Currently the DMA aperture must be between 32 MB and 1GB in size
*/
if ((order < 25) || (order > 30))
return NULL;
dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL);
if (!dma_dom)
return NULL;
spin_lock_init(&dma_dom->domain.lock);
dma_dom->domain.id = domain_id_alloc();
if (dma_dom->domain.id == 0)
goto free_dma_dom;
dma_dom->domain.mode = PAGE_MODE_3_LEVEL;
dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL);
dma_dom->domain.priv = dma_dom;
if (!dma_dom->domain.pt_root)
goto free_dma_dom;
dma_dom->aperture_size = (1ULL << order);
dma_dom->bitmap = kzalloc(dma_dom->aperture_size / (PAGE_SIZE * 8),
GFP_KERNEL);
if (!dma_dom->bitmap)
goto free_dma_dom;
/*
* mark the first page as allocated so we never return 0 as
* a valid dma-address. So we can use 0 as error value
*/
dma_dom->bitmap[0] = 1;
dma_dom->next_bit = 0;
if (iommu->exclusion_start &&
iommu->exclusion_start < dma_dom->aperture_size) {
unsigned long startpage = iommu->exclusion_start >> PAGE_SHIFT;
int pages = to_pages(iommu->exclusion_start,
iommu->exclusion_length);
dma_ops_reserve_addresses(dma_dom, startpage, pages);
}
num_pte_pages = dma_dom->aperture_size / (PAGE_SIZE * 512);
dma_dom->pte_pages = kzalloc(num_pte_pages * sizeof(void *),
GFP_KERNEL);
if (!dma_dom->pte_pages)
goto free_dma_dom;
l2_pde = (u64 *)get_zeroed_page(GFP_KERNEL);
if (l2_pde == NULL)
goto free_dma_dom;
dma_dom->domain.pt_root[0] = IOMMU_L2_PDE(virt_to_phys(l2_pde));
for (i = 0; i < num_pte_pages; ++i) {
dma_dom->pte_pages[i] = (u64 *)get_zeroed_page(GFP_KERNEL);
if (!dma_dom->pte_pages[i])
goto free_dma_dom;
address = virt_to_phys(dma_dom->pte_pages[i]);
l2_pde[i] = IOMMU_L1_PDE(address);
}
return dma_dom;
free_dma_dom:
dma_ops_domain_free(dma_dom);
return NULL;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment