Commit d7b3de91 authored by Ben Widawsky's avatar Ben Widawsky Committed by Daniel Vetter

drm/i915: page table abstractions

When we move to dynamic page allocation, keeping page_directory and pagetabs as
separate structures will help to break actions into simpler tasks.

To help transition the code nicely there is some wasted space in gen6/7.
This will be ameliorated shortly.

Following the x86 pagetable terminology:
PDPE = struct i915_page_directory_pointer_entry.
PDE = struct i915_page_directory_entry [page_directory].
PTE = struct i915_page_table_entry [page_tables].

v2: fixed mismatches after clean-up/rebase.

v3: Clarify the names of the multiple levels of page tables (Daniel)

v4: Addressing Mika's review comments.
s/gen8_free_page_directories/gen8_free_page_directory and free the
page tables for the directory there.
In gen8_ppgtt_allocate_page_directories, do not leak previously allocated
pt in case the page_directory alloc fails.
Update error return handling in gen8_ppgtt_alloc.

v5: Do not leak pt on error in gen6_ppgtt_allocate_page_tables. (Mika)

v6: s/page_tables/page_table/. (Mika)

Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: default avatarBen Widawsky <ben@bwidawsk.net>
Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+)
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 7ad14a29
......@@ -338,7 +338,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
struct i915_page_directory_entry *pd = &ppgtt->pdp.page_directory[pdpe];
struct page *page_table = pd->page_table[pde].page;
last_pte = pte + num_entries;
if (last_pte > GEN8_PTES_PER_PAGE)
......@@ -382,8 +383,12 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
break;
if (pt_vaddr == NULL)
pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
if (pt_vaddr == NULL) {
struct i915_page_directory_entry *pd = &ppgtt->pdp.page_directory[pdpe];
struct page *page_table = pd->page_table[pde].page;
pt_vaddr = kmap_atomic(page_table);
}
pt_vaddr[pte] =
gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
......@@ -407,29 +412,33 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
static void gen8_free_page_tables(struct page **pt_pages)
static void gen8_free_page_tables(struct i915_page_directory_entry *pd)
{
int i;
if (pt_pages == NULL)
if (pd->page_table == NULL)
return;
for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
if (pt_pages[i])
__free_pages(pt_pages[i], 0);
if (pd->page_table[i].page)
__free_page(pd->page_table[i].page);
}
static void gen8_free_page_directory(struct i915_page_directory_entry *pd)
{
gen8_free_page_tables(pd);
kfree(pd->page_table);
__free_page(pd->page);
}
static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
kfree(ppgtt->gen8_pt_pages[i]);
gen8_free_page_directory(&ppgtt->pdp.page_directory[i]);
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
__free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
}
static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
......@@ -464,86 +473,77 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
gen8_ppgtt_free(ppgtt);
}
static struct page **__gen8_alloc_page_tables(void)
static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
{
struct page **pt_pages;
int i;
pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
if (!pt_pages)
return ERR_PTR(-ENOMEM);
for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
pt_pages[i] = alloc_page(GFP_KERNEL);
if (!pt_pages[i])
goto bail;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->gen8_pt_dma_addr[i])
return -ENOMEM;
}
return pt_pages;
bail:
gen8_free_page_tables(pt_pages);
kfree(pt_pages);
return ERR_PTR(-ENOMEM);
return 0;
}
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
struct page **pt_pages[GEN8_LEGACY_PDPES];
int i, ret;
int i, j;
for (i = 0; i < max_pdp; i++) {
pt_pages[i] = __gen8_alloc_page_tables();
if (IS_ERR(pt_pages[i])) {
ret = PTR_ERR(pt_pages[i]);
for (i = 0; i < ppgtt->num_pd_pages; i++) {
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
struct i915_page_table_entry *pt = &ppgtt->pdp.page_directory[i].page_table[j];
pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pt->page)
goto unwind_out;
}
}
/* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
* "atomic" - for cleanup purposes.
*/
for (i = 0; i < max_pdp; i++)
ppgtt->gen8_pt_pages[i] = pt_pages[i];
return 0;
unwind_out:
while (i--) {
gen8_free_page_tables(pt_pages[i]);
kfree(pt_pages[i]);
}
while (i--)
gen8_free_page_tables(&ppgtt->pdp.page_directory[i]);
return ret;
return -ENOMEM;
}
static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
{
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
sizeof(dma_addr_t),
GFP_KERNEL);
if (!ppgtt->gen8_pt_dma_addr[i])
return -ENOMEM;
}
for (i = 0; i < max_pdp; i++) {
struct i915_page_table_entry *pt;
return 0;
}
pt = kcalloc(GEN8_PDES_PER_PAGE, sizeof(*pt), GFP_KERNEL);
if (!pt)
goto unwind_out;
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
{
ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
if (!ppgtt->pd_pages)
return -ENOMEM;
ppgtt->pdp.page_directory[i].page = alloc_page(GFP_KERNEL);
if (!ppgtt->pdp.page_directory[i].page) {
kfree(pt);
goto unwind_out;
}
ppgtt->pdp.page_directory[i].page_table = pt;
}
ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
ppgtt->num_pd_pages = max_pdp;
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
return 0;
unwind_out:
while (i--) {
kfree(ppgtt->pdp.page_directory[i].page_table);
__free_page(ppgtt->pdp.page_directory[i].page);
}
return -ENOMEM;
}
static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
......@@ -555,18 +555,20 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret)
return ret;
ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
if (ret) {
__free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
return ret;
}
ret = gen8_ppgtt_allocate_page_tables(ppgtt);
if (ret)
goto err_out;
ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
ret = gen8_ppgtt_allocate_dma(ppgtt);
if (ret)
gen8_ppgtt_free(ppgtt);
goto err_out;
return 0;
err_out:
gen8_ppgtt_free(ppgtt);
return ret;
}
......@@ -577,7 +579,7 @@ static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
int ret;
pd_addr = pci_map_page(ppgtt->base.dev->pdev,
&ppgtt->pd_pages[pd], 0,
ppgtt->pdp.page_directory[pd].page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
......@@ -597,7 +599,7 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
struct page *p;
int ret;
p = ppgtt->gen8_pt_pages[pd][pt];
p = ppgtt->pdp.page_directory[pd].page_table[pt].page;
pt_addr = pci_map_page(ppgtt->base.dev->pdev,
p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
......@@ -658,7 +660,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
*/
for (i = 0; i < max_pdp; i++) {
gen8_ppgtt_pde_t *pd_vaddr;
pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i].page);
for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
......@@ -721,7 +723,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde].page);
for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
unsigned long va =
(pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
......@@ -936,7 +938,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES;
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt].page);
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
......@@ -965,7 +967,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
pt_vaddr = NULL;
for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
if (pt_vaddr == NULL)
pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt].page);
pt_vaddr[act_pte] =
vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
......@@ -1000,8 +1002,9 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
if (ppgtt->pd.page_table[i].page)
__free_page(ppgtt->pd.page_table[i].page);
kfree(ppgtt->pd.page_table);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
......@@ -1058,17 +1061,18 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
struct i915_page_table_entry *pt;
int i;
ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
GFP_KERNEL);
if (!ppgtt->pt_pages)
pt = kcalloc(ppgtt->num_pd_entries, sizeof(*pt), GFP_KERNEL);
if (!pt)
return -ENOMEM;
ppgtt->pd.page_table = pt;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
if (!ppgtt->pt_pages[i]) {
pt[i].page = alloc_page(GFP_KERNEL);
if (!pt->page) {
gen6_ppgtt_free(ppgtt);
return -ENOMEM;
}
......@@ -1108,9 +1112,11 @@ static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
struct page *page;
dma_addr_t pt_addr;
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
page = ppgtt->pd.page_table[i].page;
pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
......
......@@ -187,6 +187,20 @@ struct i915_vma {
u32 flags);
};
struct i915_page_table_entry {
struct page *page;
};
struct i915_page_directory_entry {
struct page *page; /* NULL for GEN6-GEN7 */
struct i915_page_table_entry *page_table;
};
struct i915_page_directory_pointer_entry {
/* struct page *page; */
struct i915_page_directory_entry page_directory[GEN8_LEGACY_PDPES];
};
struct i915_address_space {
struct drm_mm mm;
struct drm_device *dev;
......@@ -271,11 +285,6 @@ struct i915_hw_ppgtt {
struct drm_mm_node node;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
struct page **pt_pages;
struct page **gen8_pt_pages[GEN8_LEGACY_PDPES];
};
struct page *pd_pages;
union {
uint32_t pd_offset;
dma_addr_t pd_dma_addr[GEN8_LEGACY_PDPES];
......@@ -284,6 +293,10 @@ struct i915_hw_ppgtt {
dma_addr_t *pt_dma_addr;
dma_addr_t *gen8_pt_dma_addr[GEN8_LEGACY_PDPES];
};
union {
struct i915_page_directory_pointer_entry pdp;
struct i915_page_directory_entry pd;
};
struct drm_i915_file_private *file_priv;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment