2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 15:43:59 +08:00

drm/i915: Register a shrinker to free inactive lists under memory pressure

This should help GEM handle memory pressure sitatuions more gracefully.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
This commit is contained in:
Chris Wilson 2009-09-14 16:50:28 +01:00 committed by Jesse Barnes
parent 725ceaa08a
commit 31169714fc
3 changed files with 159 additions and 0 deletions

View File

@ -362,6 +362,8 @@ static int __init i915_init(void)
{
driver.num_ioctls = i915_max_ioctl;
i915_gem_shrinker_init();
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
@ -388,6 +390,7 @@ static int __init i915_init(void)
static void __exit i915_exit(void)
{
i915_gem_shrinker_exit();
drm_exit(&driver);
}

View File

@ -368,6 +368,15 @@ typedef struct drm_i915_private {
struct io_mapping *gtt_mapping;
int gtt_mtrr;
/**
* Membership on list of all loaded devices, used to evict
* inactive buffers under memory pressure.
*
* Modifications should only be done whilst holding the
* shrink_list_lock spinlock.
*/
struct list_head shrink_list;
/**
* List of objects currently involved in rendering from the
* ringbuffer.
@ -741,6 +750,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);

View File

@ -53,6 +53,9 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
unsigned long end)
{
@ -4265,6 +4268,10 @@ i915_gem_load(struct drm_device *dev)
i915_gem_retire_work_handler);
dev_priv->mm.next_gem_seqno = 1;
spin_lock(&shrink_list_lock);
list_add(&dev_priv->mm.shrink_list, &shrink_list);
spin_unlock(&shrink_list_lock);
/* Old X drivers will take 0-2 for front, back, depth buffers */
dev_priv->fence_reg_start = 3;
@ -4482,3 +4489,140 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
list_del_init(i915_file_priv->mm.request_list.next);
mutex_unlock(&dev->struct_mutex);
}
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_gem_object *obj)
{
struct inode *inode;
inode = obj->filp->f_path.dentry->d_inode;
mutex_lock(&inode->i_mutex);
truncate_inode_pages(inode->i_mapping, 0);
mutex_unlock(&inode->i_mutex);
}
static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
{
return !obj_priv->dirty;
}
static int
i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
{
drm_i915_private_t *dev_priv, *next_dev;
struct drm_i915_gem_object *obj_priv, *next_obj;
int cnt = 0;
int would_deadlock = 1;
/* "fast-path" to count number of available objects */
if (nr_to_scan == 0) {
spin_lock(&shrink_list_lock);
list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
struct drm_device *dev = dev_priv->dev;
if (mutex_trylock(&dev->struct_mutex)) {
list_for_each_entry(obj_priv,
&dev_priv->mm.inactive_list,
list)
cnt++;
mutex_unlock(&dev->struct_mutex);
}
}
spin_unlock(&shrink_list_lock);
return (cnt / 100) * sysctl_vfs_cache_pressure;
}
spin_lock(&shrink_list_lock);
/* first scan for clean buffers */
list_for_each_entry_safe(dev_priv, next_dev,
&shrink_list, mm.shrink_list) {
struct drm_device *dev = dev_priv->dev;
if (! mutex_trylock(&dev->struct_mutex))
continue;
spin_unlock(&shrink_list_lock);
i915_gem_retire_requests(dev);
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
list) {
if (i915_gem_object_is_purgeable(obj_priv)) {
struct drm_gem_object *obj = obj_priv->obj;
i915_gem_object_unbind(obj);
i915_gem_object_truncate(obj);
if (--nr_to_scan <= 0)
break;
}
}
spin_lock(&shrink_list_lock);
mutex_unlock(&dev->struct_mutex);
if (nr_to_scan <= 0)
break;
}
/* second pass, evict/count anything still on the inactive list */
list_for_each_entry_safe(dev_priv, next_dev,
&shrink_list, mm.shrink_list) {
struct drm_device *dev = dev_priv->dev;
if (! mutex_trylock(&dev->struct_mutex))
continue;
spin_unlock(&shrink_list_lock);
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
list) {
if (nr_to_scan > 0) {
struct drm_gem_object *obj = obj_priv->obj;
i915_gem_object_unbind(obj);
if (i915_gem_object_is_purgeable(obj_priv))
i915_gem_object_truncate(obj);
nr_to_scan--;
} else
cnt++;
}
spin_lock(&shrink_list_lock);
mutex_unlock(&dev->struct_mutex);
would_deadlock = 0;
}
spin_unlock(&shrink_list_lock);
if (would_deadlock)
return -1;
else if (cnt > 0)
return (cnt / 100) * sysctl_vfs_cache_pressure;
else
return 0;
}
static struct shrinker shrinker = {
.shrink = i915_gem_shrink,
.seeks = DEFAULT_SEEKS,
};
__init void
i915_gem_shrinker_init(void)
{
register_shrinker(&shrinker);
}
__exit void
i915_gem_shrinker_exit(void)
{
unregister_shrinker(&shrinker);
}