Commit 57bdeb72 authored by Joerg Roedel's avatar Joerg Roedel Committed by Jiri Slaby

iommu/amd: Return the pte page-size in fetch_pte

commit 3039ca1b upstream.

Extend the fetch_pte function to also return the page-size
that is mapped by the returned pte.
Tested-by: default avatarSuravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarJiri Slaby <jslaby@suse.cz>
parent 8efd02c4
...@@ -1376,7 +1376,9 @@ static u64 *alloc_pte(struct protection_domain *domain, ...@@ -1376,7 +1376,9 @@ static u64 *alloc_pte(struct protection_domain *domain,
* This function checks if there is a PTE for a given dma address. If * This function checks if there is a PTE for a given dma address. If
* there is one, it returns the pointer to it. * there is one, it returns the pointer to it.
*/ */
static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) static u64 *fetch_pte(struct protection_domain *domain,
unsigned long address,
unsigned long *page_size)
{ {
int level; int level;
u64 *pte; u64 *pte;
...@@ -1384,8 +1386,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) ...@@ -1384,8 +1386,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
if (address > PM_LEVEL_SIZE(domain->mode)) if (address > PM_LEVEL_SIZE(domain->mode))
return NULL; return NULL;
level = domain->mode - 1; level = domain->mode - 1;
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
*page_size = PTE_LEVEL_PAGE_SIZE(level);
while (level > 0) { while (level > 0) {
...@@ -1394,19 +1397,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) ...@@ -1394,19 +1397,9 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
return NULL; return NULL;
/* Large PTE */ /* Large PTE */
if (PM_PTE_LEVEL(*pte) == 0x07) { if (PM_PTE_LEVEL(*pte) == 7 ||
unsigned long pte_mask, __pte; PM_PTE_LEVEL(*pte) == 0)
break;
/*
* If we have a series of large PTEs, make
* sure to return a pointer to the first one.
*/
pte_mask = PTE_PAGE_SIZE(*pte);
pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
__pte = ((unsigned long)pte) & pte_mask;
return (u64 *)__pte;
}
/* No level skipping support yet */ /* No level skipping support yet */
if (PM_PTE_LEVEL(*pte) != level) if (PM_PTE_LEVEL(*pte) != level)
...@@ -1415,8 +1408,21 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) ...@@ -1415,8 +1408,21 @@ static u64 *fetch_pte(struct protection_domain *domain, unsigned long address)
level -= 1; level -= 1;
/* Walk to the next level */ /* Walk to the next level */
pte = IOMMU_PTE_PAGE(*pte); pte = IOMMU_PTE_PAGE(*pte);
pte = &pte[PM_LEVEL_INDEX(level, address)]; pte = &pte[PM_LEVEL_INDEX(level, address)];
*page_size = PTE_LEVEL_PAGE_SIZE(level);
}
if (PM_PTE_LEVEL(*pte) == 0x07) {
unsigned long pte_mask;
/*
* If we have a series of large PTEs, make
* sure to return a pointer to the first one.
*/
*page_size = pte_mask = PTE_PAGE_SIZE(*pte);
pte_mask = ~((PAGE_SIZE_PTE_COUNT(pte_mask) << 3) - 1);
pte = (u64 *)(((unsigned long)pte) & pte_mask);
} }
return pte; return pte;
...@@ -1474,6 +1480,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, ...@@ -1474,6 +1480,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
unsigned long page_size) unsigned long page_size)
{ {
unsigned long long unmap_size, unmapped; unsigned long long unmap_size, unmapped;
unsigned long pte_pgsize;
u64 *pte; u64 *pte;
BUG_ON(!is_power_of_2(page_size)); BUG_ON(!is_power_of_2(page_size));
...@@ -1482,7 +1489,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom, ...@@ -1482,7 +1489,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
while (unmapped < page_size) { while (unmapped < page_size) {
pte = fetch_pte(dom, bus_addr); pte = fetch_pte(dom, bus_addr, &pte_pgsize);
if (!pte) { if (!pte) {
/* /*
...@@ -1725,7 +1732,8 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom, ...@@ -1725,7 +1732,8 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
for (i = dma_dom->aperture[index]->offset; for (i = dma_dom->aperture[index]->offset;
i < dma_dom->aperture_size; i < dma_dom->aperture_size;
i += PAGE_SIZE) { i += PAGE_SIZE) {
u64 *pte = fetch_pte(&dma_dom->domain, i); unsigned long pte_pgsize;
u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte)) if (!pte || !IOMMU_PTE_PRESENT(*pte))
continue; continue;
...@@ -3439,14 +3447,14 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, ...@@ -3439,14 +3447,14 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
dma_addr_t iova) dma_addr_t iova)
{ {
struct protection_domain *domain = dom->priv; struct protection_domain *domain = dom->priv;
unsigned long offset_mask; unsigned long offset_mask, pte_pgsize;
phys_addr_t paddr; phys_addr_t paddr;
u64 *pte, __pte; u64 *pte, __pte;
if (domain->mode == PAGE_MODE_NONE) if (domain->mode == PAGE_MODE_NONE)
return iova; return iova;
pte = fetch_pte(domain, iova); pte = fetch_pte(domain, iova, &pte_pgsize);
if (!pte || !IOMMU_PTE_PRESENT(*pte)) if (!pte || !IOMMU_PTE_PRESENT(*pte))
return 0; return 0;
......
...@@ -276,6 +276,12 @@ ...@@ -276,6 +276,12 @@
#define PTE_PAGE_SIZE(pte) \ #define PTE_PAGE_SIZE(pte) \
(1ULL << (1 + ffz(((pte) | 0xfffULL)))) (1ULL << (1 + ffz(((pte) | 0xfffULL))))
/*
* Takes a page-table level and returns the default page-size for this level
*/
#define PTE_LEVEL_PAGE_SIZE(level) \
(1ULL << (12 + (9 * (level))))
#define IOMMU_PTE_P (1ULL << 0) #define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_TV (1ULL << 1) #define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59) #define IOMMU_PTE_U (1ULL << 59)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment