Commit 66df1014 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Keep a small stash of preallocated WC pages

We use WC pages for coherent writes into the ppGTT on !llc
architectures. However, to create a WC page requires a stop_machine(),
i.e. is very slow. To compensate we currently keep a per-vm cache of
recently freed pages, but we still see the slow startup of new contexts.
We can amoritize that cost slightly by allocating WC pages in small
batches (PAGEVEC_SIZE == 14) and since creating a WC page implies a
stop_machine() there is no penalty for keeping that stash global.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20170822173828.5932-1-chris@chris-wilson.co.uk
parent eb371933
...@@ -1465,6 +1465,11 @@ struct i915_gem_mm { ...@@ -1465,6 +1465,11 @@ struct i915_gem_mm {
struct llist_head free_list; struct llist_head free_list;
struct work_struct free_work; struct work_struct free_work;
/**
* Small stash of WC pages
*/
struct pagevec wc_stash;
/** Usable portion of the GTT for GEM */ /** Usable portion of the GTT for GEM */
dma_addr_t stolen_base; /* limited to low memory (32-bit) */ dma_addr_t stolen_base; /* limited to low memory (32-bit) */
......
...@@ -356,39 +356,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr, ...@@ -356,39 +356,86 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{ {
struct page *page; struct pagevec *pvec = &vm->free_pages;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915); i915_gem_shrink_all(vm->i915);
if (vm->free_pages.nr) if (likely(pvec->nr))
return vm->free_pages.pages[--vm->free_pages.nr]; return pvec->pages[--pvec->nr];
if (!vm->pt_kmap_wc)
return alloc_page(gfp);
/* A placeholder for a specific mutex to guard the WC stash */
lockdep_assert_held(&vm->i915->drm.struct_mutex);
/* Look in our global stash of WC pages... */
pvec = &vm->i915->mm.wc_stash;
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
/* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
do {
struct page *page;
page = alloc_page(gfp); page = alloc_page(gfp);
if (!page) if (unlikely(!page))
break;
pvec->pages[pvec->nr++] = page;
} while (pagevec_space(pvec));
if (unlikely(!pvec->nr))
return NULL; return NULL;
if (vm->pt_kmap_wc) set_pages_array_wc(pvec->pages, pvec->nr);
set_pages_array_wc(&page, 1);
return page; return pvec->pages[--pvec->nr];
} }
static void vm_free_pages_release(struct i915_address_space *vm) static void vm_free_pages_release(struct i915_address_space *vm,
bool immediate)
{ {
GEM_BUG_ON(!pagevec_count(&vm->free_pages)); struct pagevec *pvec = &vm->free_pages;
GEM_BUG_ON(!pagevec_count(pvec));
if (vm->pt_kmap_wc) {
struct pagevec *stash = &vm->i915->mm.wc_stash;
if (vm->pt_kmap_wc) /* When we use WC, first fill up the global stash and then
set_pages_array_wb(vm->free_pages.pages, * only if full immediately free the overflow.
pagevec_count(&vm->free_pages)); */
__pagevec_release(&vm->free_pages); lockdep_assert_held(&vm->i915->drm.struct_mutex);
if (pagevec_space(stash)) {
do {
stash->pages[stash->nr++] =
pvec->pages[--pvec->nr];
if (!pvec->nr)
return;
} while (pagevec_space(stash));
/* As we have made some room in the VM's free_pages,
* we can wait for it to fill again. Unless we are
* inside i915_address_space_fini() and must
* immediately release the pages!
*/
if (!immediate)
return;
}
set_pages_array_wb(pvec->pages, pvec->nr);
}
__pagevec_release(pvec);
} }
static void vm_free_page(struct i915_address_space *vm, struct page *page) static void vm_free_page(struct i915_address_space *vm, struct page *page)
{ {
if (!pagevec_add(&vm->free_pages, page)) if (!pagevec_add(&vm->free_pages, page))
vm_free_pages_release(vm); vm_free_pages_release(vm, false);
} }
static int __setup_page_dma(struct i915_address_space *vm, static int __setup_page_dma(struct i915_address_space *vm,
...@@ -452,12 +499,31 @@ static void fill_page_dma_32(struct i915_address_space *vm, ...@@ -452,12 +499,31 @@ static void fill_page_dma_32(struct i915_address_space *vm,
static int static int
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{ {
return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO); struct page *page;
dma_addr_t addr;
page = alloc_page(gfp | __GFP_ZERO);
if (unlikely(!page))
return -ENOMEM;
addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(vm->dma, addr))) {
__free_page(page);
return -ENOMEM;
}
vm->scratch_page.page = page;
vm->scratch_page.daddr = addr;
return 0;
} }
static void cleanup_scratch_page(struct i915_address_space *vm) static void cleanup_scratch_page(struct i915_address_space *vm)
{ {
cleanup_page_dma(vm, &vm->scratch_page); struct i915_page_dma *p = &vm->scratch_page;
dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
__free_page(p->page);
} }
static struct i915_page_table *alloc_pt(struct i915_address_space *vm) static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
...@@ -1337,18 +1403,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -1337,18 +1403,18 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1ULL << 48 : 1ULL << 48 :
1ULL << 32; 1ULL << 32;
ret = gen8_init_scratch(&ppgtt->base);
if (ret) {
ppgtt->base.total = 0;
return ret;
}
/* There are only few exceptions for gen >=6. chv and bxt. /* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now. * And we are not sure about the latter so play safe for now.
*/ */
if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
ppgtt->base.pt_kmap_wc = true; ppgtt->base.pt_kmap_wc = true;
ret = gen8_init_scratch(&ppgtt->base);
if (ret) {
ppgtt->base.total = 0;
return ret;
}
if (use_4lvl(vm)) { if (use_4lvl(vm)) {
ret = setup_px(&ppgtt->base, &ppgtt->pml4); ret = setup_px(&ppgtt->base, &ppgtt->pml4);
if (ret) if (ret)
...@@ -1872,7 +1938,7 @@ static void i915_address_space_init(struct i915_address_space *vm, ...@@ -1872,7 +1938,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
static void i915_address_space_fini(struct i915_address_space *vm) static void i915_address_space_fini(struct i915_address_space *vm)
{ {
if (pagevec_count(&vm->free_pages)) if (pagevec_count(&vm->free_pages))
vm_free_pages_release(vm); vm_free_pages_release(vm, true);
i915_gem_timeline_fini(&vm->timeline); i915_gem_timeline_fini(&vm->timeline);
drm_mm_takedown(&vm->mm); drm_mm_takedown(&vm->mm);
...@@ -2598,6 +2664,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ...@@ -2598,6 +2664,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma, *vn; struct i915_vma *vma, *vn;
struct pagevec *pvec;
ggtt->base.closed = true; ggtt->base.closed = true;
...@@ -2621,6 +2688,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) ...@@ -2621,6 +2688,13 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
} }
ggtt->base.cleanup(&ggtt->base); ggtt->base.cleanup(&ggtt->base);
pvec = &dev_priv->mm.wc_stash;
if (pvec->nr) {
set_pages_array_wb(pvec->pages, pvec->nr);
__pagevec_release(pvec);
}
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr); arch_phys_wc_del(ggtt->mtrr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment