Commit 171e7b37 authored by Joerg Roedel's avatar Joerg Roedel

x86/amd-iommu: Rearrange dma_ops related functions

This patch rearranges two dma_ops related functions so that
their forward declarations are not longer necessary.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 308973d3
...@@ -57,11 +57,6 @@ struct iommu_cmd { ...@@ -57,11 +57,6 @@ struct iommu_cmd {
u32 data[4]; u32 data[4];
}; };
static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
struct unity_map_entry *e);
static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
unsigned long start_page,
unsigned int pages);
static void reset_iommu_command_buffer(struct amd_iommu *iommu); static void reset_iommu_command_buffer(struct amd_iommu *iommu);
static void update_domain(struct protection_domain *domain); static void update_domain(struct protection_domain *domain);
...@@ -822,28 +817,6 @@ static int iommu_for_unity_map(struct amd_iommu *iommu, ...@@ -822,28 +817,6 @@ static int iommu_for_unity_map(struct amd_iommu *iommu,
return 0; return 0;
} }
/*
* Init the unity mappings for a specific IOMMU in the system
*
* Basically iterates over all unity mapping entries and applies them to
* the default domain DMA of that IOMMU if necessary.
*/
static int iommu_init_unity_mappings(struct amd_iommu *iommu)
{
struct unity_map_entry *entry;
int ret;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
if (!iommu_for_unity_map(iommu, entry))
continue;
ret = dma_ops_unity_map(iommu->default_dom, entry);
if (ret)
return ret;
}
return 0;
}
/* /*
* This function actually applies the mapping to the page table of the * This function actually applies the mapping to the page table of the
* dma_ops domain. * dma_ops domain.
...@@ -872,6 +845,28 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, ...@@ -872,6 +845,28 @@ static int dma_ops_unity_map(struct dma_ops_domain *dma_dom,
return 0; return 0;
} }
/*
* Init the unity mappings for a specific IOMMU in the system
*
* Basically iterates over all unity mapping entries and applies them to
* the default domain DMA of that IOMMU if necessary.
*/
static int iommu_init_unity_mappings(struct amd_iommu *iommu)
{
struct unity_map_entry *entry;
int ret;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
if (!iommu_for_unity_map(iommu, entry))
continue;
ret = dma_ops_unity_map(iommu->default_dom, entry);
if (ret)
return ret;
}
return 0;
}
/* /*
* Inits the unity mappings required for a specific device * Inits the unity mappings required for a specific device
*/ */
...@@ -908,6 +903,26 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, ...@@ -908,6 +903,26 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom,
* called with domain->lock held * called with domain->lock held
*/ */
/*
* Used to reserve address ranges in the aperture (e.g. for exclusion
* ranges.
*/
static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
unsigned long start_page,
unsigned int pages)
{
unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
if (start_page + pages > last_page)
pages = last_page - start_page;
for (i = start_page; i < start_page + pages; ++i) {
int index = i / APERTURE_RANGE_PAGES;
int page = i % APERTURE_RANGE_PAGES;
__set_bit(page, dom->aperture[index]->bitmap);
}
}
/* /*
* This function is used to add a new aperture range to an existing * This function is used to add a new aperture range to an existing
* aperture in case of dma_ops domain allocation or address allocation * aperture in case of dma_ops domain allocation or address allocation
...@@ -1166,26 +1181,6 @@ static void domain_id_free(int id) ...@@ -1166,26 +1181,6 @@ static void domain_id_free(int id)
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
} }
/*
* Used to reserve address ranges in the aperture (e.g. for exclusion
* ranges.
*/
static void dma_ops_reserve_addresses(struct dma_ops_domain *dom,
unsigned long start_page,
unsigned int pages)
{
unsigned int i, last_page = dom->aperture_size >> PAGE_SHIFT;
if (start_page + pages > last_page)
pages = last_page - start_page;
for (i = start_page; i < start_page + pages; ++i) {
int index = i / APERTURE_RANGE_PAGES;
int page = i % APERTURE_RANGE_PAGES;
__set_bit(page, dom->aperture[index]->bitmap);
}
}
static void free_pagetable(struct protection_domain *domain) static void free_pagetable(struct protection_domain *domain)
{ {
int i, j; int i, j;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment