Commit 7f5f2280 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gtt: Avoid overflowing the WC stash

An interesting issue cropped with making the pagetables be allocated and
freed concurrently (i.e. removing their grandeous struct_mutex guard)
was that we would overflow the page stash. This happens when we have
multiple allocators grabbing WC pages such that we fill the vm's local
page stash and then when we free another page, the page stash is already
full and we overflow.

The fix is quite simple: to check for a full page stash before adding
another. This results in us keeping a vm local page stash around for
much longer, which is both a blessing and a curse.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190529093407.31697-1-chris@chris-wilson.co.uk
parent a10f361d
...@@ -341,11 +341,11 @@ static struct page *stash_pop_page(struct pagestash *stash) ...@@ -341,11 +341,11 @@ static struct page *stash_pop_page(struct pagestash *stash)
static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec) static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
{ {
int nr; unsigned int nr;
spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec)); nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
memcpy(stash->pvec.pages + stash->pvec.nr, memcpy(stash->pvec.pages + stash->pvec.nr,
pvec->pages + pvec->nr - nr, pvec->pages + pvec->nr - nr,
sizeof(pvec->pages[0]) * nr); sizeof(pvec->pages[0]) * nr);
...@@ -399,7 +399,8 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) ...@@ -399,7 +399,8 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
page = stack.pages[--stack.nr]; page = stack.pages[--stack.nr];
/* Merge spare WC pages to the global stash */ /* Merge spare WC pages to the global stash */
stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); if (stack.nr)
stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
/* Push any surplus WC pages onto the local VM stash */ /* Push any surplus WC pages onto the local VM stash */
if (stack.nr) if (stack.nr)
...@@ -469,8 +470,10 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page) ...@@ -469,8 +470,10 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
*/ */
might_sleep(); might_sleep();
spin_lock(&vm->free_pages.lock); spin_lock(&vm->free_pages.lock);
if (!pagevec_add(&vm->free_pages.pvec, page)) while (!pagevec_space(&vm->free_pages.pvec))
vm_free_pages_release(vm, false); vm_free_pages_release(vm, false);
GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
pagevec_add(&vm->free_pages.pvec, page);
spin_unlock(&vm->free_pages.lock); spin_unlock(&vm->free_pages.lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment