2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 15:43:59 +08:00

drm/ttm: stop dangerous caching attribute change

When we swapout/in a BO we try to change the caching
attributes of the pages before/after doing the copy.

On x86 this is done by calling set_pages_uc(),
set_memory_wc() or set_pages_wb() for not highmem pages
to update the linear mapping of the page.

On all other platforms we do exactly nothing.

Now on x86 this is unnecessary because copy_highpage() will
either create a temporary mapping of the page which is wb
anyway and destroyed immediately again or use the linear
mapping with the correct caching attributes.

So stop this nonsense and just keep the caching as it is and
return an error when a driver tries to change the caching of
an already populated TT object.

This is much more defensive since changing caching
attributes is platform and driver specific and usually
doesn't work after the page was initially allocated.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/391293/
This commit is contained in:
Christian König 2020-09-18 16:34:49 +02:00
parent 4127a62049
commit db9c1734ad
3 changed files with 5 additions and 93 deletions

View File

@ -1551,14 +1551,13 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
* Move to system cached
*/
if (bo->mem.mem_type != TTM_PL_SYSTEM ||
bo->ttm->caching_state != tt_cached) {
if (bo->mem.mem_type != TTM_PL_SYSTEM) {
struct ttm_operation_ctx ctx = { false, false };
struct ttm_resource evict_mem;
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
evict_mem.placement = TTM_PL_FLAG_CACHED;
evict_mem.placement = TTM_PL_MASK_CACHING;
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);

View File

@ -38,7 +38,6 @@
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
#include <drm/ttm/ttm_set_memory.h>
/**
* Allocates a ttm structure for the given BO.
@ -115,81 +114,19 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
return 0;
}
static int ttm_tt_set_page_caching(struct page *p,
enum ttm_caching_state c_old,
enum ttm_caching_state c_new)
{
int ret = 0;
if (PageHighMem(p))
return 0;
if (c_old != tt_cached) {
/* p isn't in the default caching state, set it to
* writeback first to free its current memtype. */
ret = ttm_set_pages_wb(p, 1);
if (ret)
return ret;
}
if (c_new == tt_wc)
ret = ttm_set_pages_wc(p, 1);
else if (c_new == tt_uncached)
ret = ttm_set_pages_uc(p, 1);
return ret;
}
/*
* Change caching policy for the linear kernel map
* for range of pages in a ttm.
*/
static int ttm_tt_set_caching(struct ttm_tt *ttm,
enum ttm_caching_state c_state)
{
int i, j;
struct page *cur_page;
int ret;
if (ttm->caching_state == c_state)
return 0;
if (!ttm_tt_is_populated(ttm)) {
/* Change caching but don't populate */
ttm->caching_state = c_state;
return 0;
}
if (ttm->caching_state == tt_cached)
drm_clflush_pages(ttm->pages, ttm->num_pages);
for (i = 0; i < ttm->num_pages; ++i) {
cur_page = ttm->pages[i];
if (likely(cur_page != NULL)) {
ret = ttm_tt_set_page_caching(cur_page,
ttm->caching_state,
c_state);
if (unlikely(ret != 0))
goto out_err;
}
}
/* Can't change the caching state after TT is populated */
if (WARN_ON_ONCE(ttm_tt_is_populated(ttm)))
return -EINVAL;
ttm->caching_state = c_state;
return 0;
out_err:
for (j = 0; j < i; ++j) {
cur_page = ttm->pages[j];
if (likely(cur_page != NULL)) {
(void)ttm_tt_set_page_caching(cur_page, c_state,
ttm->caching_state);
}
}
return ret;
}
int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
@ -358,8 +295,6 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev,
int i;
int ret = -ENOMEM;
BUG_ON(ttm->caching_state != tt_cached);
if (!persistent_swap_storage) {
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,

View File

@ -57,18 +57,6 @@ static inline int ttm_set_pages_wb(struct page *page, int numpages)
return set_pages_wb(page, numpages);
}
static inline int ttm_set_pages_wc(struct page *page, int numpages)
{
unsigned long addr = (unsigned long)page_address(page);
return set_memory_wc(addr, numpages);
}
static inline int ttm_set_pages_uc(struct page *page, int numpages)
{
return set_pages_uc(page, numpages);
}
#else /* for CONFIG_X86 */
static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
@ -91,16 +79,6 @@ static inline int ttm_set_pages_wb(struct page *page, int numpages)
return 0;
}
static inline int ttm_set_pages_wc(struct page *page, int numpages)
{
return 0;
}
static inline int ttm_set_pages_uc(struct page *page, int numpages)
{
return 0;
}
#endif /* for CONFIG_X86 */
#endif