drm/ttm: move swapout logic around v3

Move the iteration of the global lru into the new function
ttm_global_swapout() and use that instead in drivers.

v2: consistently return int
v3: fix build fail

Signed-off-by: Christian König <christian.koenig@amd.com>
Tested-by: Nirmoy Das <nirmoy.das@amd.com>
Reviewed-by: Huang Rui <ray.huang@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/424008/
This commit is contained in:
Christian König 2020-10-06 13:35:32 +02:00
parent 976677b595
commit ebd59851c7
7 changed files with 53 additions and 45 deletions

View File

@ -1193,56 +1193,35 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_wait);
/*
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
*/
int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
struct ttm_global *glob = &ttm_glob;
struct ttm_buffer_object *bo;
int ret = -EBUSY;
bool locked;
unsigned i;
int ret;
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
NULL))
continue;
if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked, NULL))
return -EBUSY;
if (!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
continue;
}
ret = 0;
break;
}
if (!ret)
break;
}
if (ret) {
spin_unlock(&glob->lru_lock);
return ret;
if (!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
return -EBUSY;
}
if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
ttm_bo_cleanup_refs(bo, false, false, locked);
ttm_bo_put(bo);
return ret;
return 0;
}
ttm_bo_del_from_lru(bo);
/* TODO: Cleanup the locking */
spin_unlock(&glob->lru_lock);
/**
/*
* Move to system cached
*/
if (bo->mem.mem_type != TTM_PL_SYSTEM) {
struct ttm_operation_ctx ctx = { false, false };
struct ttm_resource evict_mem;
@ -1262,29 +1241,26 @@ int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
}
}
/**
/*
* Make sure BO is idle.
*/
ret = ttm_bo_wait(bo, false, false);
if (unlikely(ret != 0))
goto out;
ttm_bo_unmap_virtual(bo);
/**
/*
* Swap out. Buffer will be swapped in again as soon as
* anyone tries to access a ttm page.
*/
if (bo->bdev->funcs->swap_notify)
bo->bdev->funcs->swap_notify(bo);
ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
out:
/**
*
/*
* Unreserve without putting on LRU to avoid swapping out an
* already swapped buffer.
*/
@ -1293,7 +1269,6 @@ out:
ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_swapout);
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
{

View File

@ -102,6 +102,35 @@ out:
return ret;
}
/**
* A buffer object shrink method that tries to swap out the first
* buffer object on the global::swap_lru list.
*/
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
{
struct ttm_global *glob = &ttm_glob;
struct ttm_buffer_object *bo;
unsigned i;
int ret;
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
uint32_t num_pages = bo->ttm->num_pages;
ret = ttm_bo_swapout(bo, ctx, gfp_flags);
/* ttm_bo_swapout has dropped the lru_lock */
if (!ret)
return num_pages;
if (ret != -EBUSY)
return ret;
}
}
spin_unlock(&glob->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_global_swapout);
static void ttm_init_sysman(struct ttm_device *bdev)
{
struct ttm_resource_manager *man = &bdev->sysman;

View File

@ -369,7 +369,7 @@ static unsigned long ttm_tt_shrinker_scan(struct shrinker *shrink,
};
int ret;
ret = ttm_bo_swapout(&ctx, GFP_NOFS);
ret = ttm_global_swapout(&ctx, GFP_NOFS);
return ret < 0 ? SHRINK_EMPTY : ret;
}

View File

@ -38,6 +38,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_device.h>
#include "ttm_memory.h"
@ -277,7 +278,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
ret = ttm_bo_swapout(ctx, GFP_KERNEL);
ret = ttm_global_swapout(ctx, GFP_KERNEL);
spin_lock(&glob->lock);
if (unlikely(ret < 0))
break;

View File

@ -1371,7 +1371,7 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
while (ttm_bo_swapout(&ctx, GFP_KERNEL) > 0);
while (ttm_global_swapout(&ctx, GFP_KERNEL) > 0);
if (dev_priv->enable_fb)
vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {

View File

@ -560,7 +560,8 @@ ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write);
int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the

View File

@ -297,6 +297,8 @@ struct ttm_device {
struct delayed_work wq;
};
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
{