2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-25 05:34:00 +08:00

drm/i915: Protect WC stash allocation against direct reclaim

As we attempt to allocate pages for use in a new WC stash, direct
reclaim may run underneath us and fill up the WC stash. We have to be
careful then not to overflow the pvec.

Fixes: 66df1014ef ("drm/i915: Keep a small stash of preallocated WC pages")
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103109
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180121173143.17090-1-chris@chris-wilson.co.uk
(cherry picked from commit 073cd78166)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Chris Wilson 2018-01-21 17:31:43 +00:00 committed by Rodrigo Vivi
parent c5bd1fc9a6
commit 124804c4c4

View File

@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
struct pagevec *pvec = &vm->free_pages;
struct pagevec stash;
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
i915_gem_shrink_all(vm->i915);
@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (likely(pvec->nr))
return pvec->pages[--pvec->nr];
/* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
/*
* Otherwise batch allocate pages to amoritize cost of set_pages_wc.
*
* We have to be careful as page allocation may trigger the shrinker
* (via direct reclaim) which will fill up the WC stash underneath us.
* So we add our WB pages into a temporary pvec on the stack and merge
* them into the WC stash after all the allocations are complete.
*/
pagevec_init(&stash);
do {
struct page *page;
@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
break;
pvec->pages[pvec->nr++] = page;
} while (pagevec_space(pvec));
stash.pages[stash.nr++] = page;
} while (stash.nr < pagevec_space(pvec));
if (unlikely(!pvec->nr))
return NULL;
if (stash.nr) {
int nr = min_t(int, stash.nr, pagevec_space(pvec));
struct page **pages = stash.pages + stash.nr - nr;
set_pages_array_wc(pvec->pages, pvec->nr);
if (nr && !set_pages_array_wc(pages, nr)) {
memcpy(pvec->pages + pvec->nr,
pages, sizeof(pages[0]) * nr);
pvec->nr += nr;
stash.nr -= nr;
}
return pvec->pages[--pvec->nr];
pagevec_release(&stash);
}
return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
}
static void vm_free_pages_release(struct i915_address_space *vm,