mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 13:14:07 +08:00
drm/i915: Move dev_priv->mm.[un]bound_list to its own lock
Remove the struct_mutex requirement around dev_priv->mm.bound_list and
dev_priv->mm.unbound_list by giving it its own spinlock. This reduces
one more requirement for struct_mutex and in the process gives us
slightly more accurate unbound_list tracking, which should improve the
shrinker - but the drawback is that we drop the retirement before
counting so i915_gem_object_is_active() may be stale and lead us to
underestimate the number of objects that may be shrunk (see commit
bed50aea61
("drm/i915/shrinker: Flush active on objects before
counting")).
v2: Crosslink the spinlock to the lists it protects, and btw this
changes s/obj->global_link/obj->mm.link/
v3: Fix decoupling of old links in i915_gem_object_attach_phys()
v3.1: Fix the fix, only unlink if it was linked
v3.2: Use a local for to_i915(obj->base.dev)->mm.obj_lock
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171016114037.5556-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
3d574a6bbb
commit
f2123818ff
@ -271,7 +271,9 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
||||
goto out;
|
||||
|
||||
total_obj_size = total_gtt_size = count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
|
||||
if (count == total)
|
||||
break;
|
||||
|
||||
@ -283,7 +285,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
||||
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
|
||||
|
||||
}
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
|
||||
if (count == total)
|
||||
break;
|
||||
|
||||
@ -293,6 +295,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
||||
objects[count++] = obj;
|
||||
total_obj_size += obj->base.size;
|
||||
}
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
|
||||
|
||||
@ -454,7 +457,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
||||
mapped_size = mapped_count = 0;
|
||||
purgeable_size = purgeable_count = 0;
|
||||
huge_size = huge_count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
|
||||
size += obj->base.size;
|
||||
++count;
|
||||
|
||||
@ -477,7 +482,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
||||
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
|
||||
|
||||
size = count = dpy_size = dpy_count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
|
||||
size += obj->base.size;
|
||||
++count;
|
||||
|
||||
@ -502,6 +507,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
|
||||
page_sizes |= obj->mm.page_sizes.sg;
|
||||
}
|
||||
}
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
seq_printf(m, "%u bound objects, %llu bytes\n",
|
||||
count, size);
|
||||
seq_printf(m, "%u purgeable objects, %llu bytes\n",
|
||||
@ -568,28 +575,46 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_i915_private *dev_priv = node_to_i915(node);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_i915_gem_object **objects;
|
||||
struct drm_i915_gem_object *obj;
|
||||
u64 total_obj_size, total_gtt_size;
|
||||
unsigned long nobject, n;
|
||||
int count, ret;
|
||||
|
||||
nobject = READ_ONCE(dev_priv->mm.object_count);
|
||||
objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
|
||||
if (!objects)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
total_obj_size = total_gtt_size = count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
count = 0;
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
|
||||
objects[count++] = obj;
|
||||
if (count == nobject)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
total_obj_size = total_gtt_size = 0;
|
||||
for (n = 0; n < count; n++) {
|
||||
obj = objects[n];
|
||||
|
||||
seq_puts(m, " ");
|
||||
describe_obj(m, obj);
|
||||
seq_putc(m, '\n');
|
||||
total_obj_size += obj->base.size;
|
||||
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
|
||||
count++;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
|
||||
count, total_obj_size, total_gtt_size);
|
||||
kvfree(objects);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1490,6 +1490,9 @@ struct i915_gem_mm {
|
||||
* always the inner lock when overlapping with struct_mutex. */
|
||||
struct mutex stolen_lock;
|
||||
|
||||
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
|
||||
spinlock_t obj_lock;
|
||||
|
||||
/** List of all objects in gtt_space. Used to restore gtt
|
||||
* mappings on resume */
|
||||
struct list_head bound_list;
|
||||
|
@ -1537,6 +1537,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
||||
struct list_head *list;
|
||||
struct i915_vma *vma;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
|
||||
|
||||
list_for_each_entry(vma, &obj->vma_list, obj_link) {
|
||||
if (!i915_vma_is_ggtt(vma))
|
||||
break;
|
||||
@ -1551,8 +1553,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
||||
}
|
||||
|
||||
i915 = to_i915(obj->base.dev);
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
|
||||
list_move_tail(&obj->global_link, list);
|
||||
list_move_tail(&obj->mm.link, list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2253,6 +2257,7 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
||||
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||
enum i915_mm_subclass subclass)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct sg_table *pages;
|
||||
|
||||
if (i915_gem_object_has_pinned_pages(obj))
|
||||
@ -2273,6 +2278,10 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||
pages = fetch_and_zero(&obj->mm.pages);
|
||||
GEM_BUG_ON(!pages);
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_del(&obj->mm.link);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
|
||||
if (obj->mm.mapping) {
|
||||
void *ptr;
|
||||
|
||||
@ -2507,7 +2516,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
||||
obj->mm.pages = pages;
|
||||
|
||||
if (i915_gem_object_is_tiled(obj) &&
|
||||
to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
|
||||
GEM_BUG_ON(obj->mm.quirked);
|
||||
__i915_gem_object_pin_pages(obj);
|
||||
obj->mm.quirked = true;
|
||||
@ -2529,8 +2538,11 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
|
||||
if (obj->mm.page_sizes.phys & ~0u << i)
|
||||
obj->mm.page_sizes.sg |= BIT(i);
|
||||
}
|
||||
|
||||
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_add(&obj->mm.link, &i915->mm.unbound_list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
||||
@ -4324,7 +4336,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
{
|
||||
mutex_init(&obj->mm.lock);
|
||||
|
||||
INIT_LIST_HEAD(&obj->global_link);
|
||||
INIT_LIST_HEAD(&obj->vma_list);
|
||||
INIT_LIST_HEAD(&obj->lut_list);
|
||||
INIT_LIST_HEAD(&obj->batch_pool_link);
|
||||
@ -4496,7 +4507,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
||||
GEM_BUG_ON(!list_empty(&obj->vma_list));
|
||||
GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
|
||||
|
||||
list_del(&obj->global_link);
|
||||
/* This serializes freeing with the shrinker. Since the free
|
||||
* is delayed, first by RCU then by the workqueue, we want the
|
||||
* shrinker to be able to free pages of unreferenced objects,
|
||||
* or else we may oom whilst there are plenty of deferred
|
||||
* freed objects.
|
||||
*/
|
||||
if (i915_gem_object_has_pages(obj)) {
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_del_init(&obj->mm.link);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
}
|
||||
intel_runtime_pm_put(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
@ -5035,11 +5057,14 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
|
||||
goto err_priorities;
|
||||
|
||||
INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
|
||||
|
||||
spin_lock_init(&dev_priv->mm.obj_lock);
|
||||
init_llist_head(&dev_priv->mm.free_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||
INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
|
||||
i915_gem_retire_work_handler);
|
||||
INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
|
||||
@ -5133,12 +5158,12 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
|
||||
i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
|
||||
i915_gem_drain_freed_objects(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
for (p = phases; *p; p++) {
|
||||
list_for_each_entry(obj, *p, global_link)
|
||||
list_for_each_entry(obj, *p, mm.link)
|
||||
__start_cpu_write(obj);
|
||||
}
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5457,7 +5482,17 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
pages = obj->mm.pages;
|
||||
pages = fetch_and_zero(&obj->mm.pages);
|
||||
if (pages) {
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
|
||||
__i915_gem_object_reset_page_iter(obj);
|
||||
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
list_del(&obj->mm.link);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
}
|
||||
|
||||
obj->ops = &i915_gem_phys_ops;
|
||||
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
|
@ -3594,8 +3594,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
|
||||
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
|
||||
|
||||
/* clflush objects bound into the GGTT and rebind them. */
|
||||
list_for_each_entry_safe(obj, on,
|
||||
&dev_priv->mm.bound_list, global_link) {
|
||||
list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
|
||||
bool ggtt_bound = false;
|
||||
struct i915_vma *vma;
|
||||
|
||||
|
@ -114,7 +114,6 @@ struct drm_i915_gem_object {
|
||||
|
||||
/** Stolen memory for this object, instead of being backed by shmem. */
|
||||
struct drm_mm_node *stolen;
|
||||
struct list_head global_link;
|
||||
union {
|
||||
struct rcu_head rcu;
|
||||
struct llist_node freed;
|
||||
@ -208,6 +207,12 @@ struct drm_i915_gem_object {
|
||||
struct mutex lock; /* protects this cache */
|
||||
} get_page;
|
||||
|
||||
/**
|
||||
* Element within i915->mm.unbound_list or i915->mm.bound_list,
|
||||
* locked by i915->mm.obj_lock.
|
||||
*/
|
||||
struct list_head link;
|
||||
|
||||
/**
|
||||
* Advice: are the backing pages purgeable?
|
||||
*/
|
||||
|
@ -78,9 +78,6 @@ static bool swap_available(void)
|
||||
|
||||
static bool can_release_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (!i915_gem_object_has_pages(obj))
|
||||
return false;
|
||||
|
||||
/* Consider only shrinkable ojects. */
|
||||
if (!i915_gem_object_is_shrinkable(obj))
|
||||
return false;
|
||||
@ -102,7 +99,7 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
|
||||
* To simplify the scan, and to avoid walking the list of vma under the
|
||||
* object, we just check the count of its permanently pinned.
|
||||
*/
|
||||
if (obj->pin_global)
|
||||
if (READ_ONCE(obj->pin_global))
|
||||
return false;
|
||||
|
||||
/* We can only return physical pages to the system if we can either
|
||||
@ -204,15 +201,20 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
continue;
|
||||
|
||||
INIT_LIST_HEAD(&still_in_list);
|
||||
|
||||
/*
|
||||
* We serialize our access to unreferenced objects through
|
||||
* the use of the struct_mutex. While the objects are not
|
||||
* yet freed (due to RCU then a workqueue) we still want
|
||||
* to be able to shrink their pages, so they remain on
|
||||
* the unbound/bound list until actually freed.
|
||||
*/
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
while (count < target &&
|
||||
(obj = list_first_entry_or_null(phase->list,
|
||||
typeof(*obj),
|
||||
global_link))) {
|
||||
list_move_tail(&obj->global_link, &still_in_list);
|
||||
if (!obj->mm.pages) {
|
||||
list_del_init(&obj->global_link);
|
||||
continue;
|
||||
}
|
||||
mm.link))) {
|
||||
list_move_tail(&obj->mm.link, &still_in_list);
|
||||
|
||||
if (flags & I915_SHRINK_PURGEABLE &&
|
||||
obj->mm.madv != I915_MADV_DONTNEED)
|
||||
@ -230,20 +232,24 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
if (!can_release_pages(obj))
|
||||
continue;
|
||||
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
if (unsafe_drop_pages(obj)) {
|
||||
/* May arrive from get_pages on another bo */
|
||||
mutex_lock_nested(&obj->mm.lock,
|
||||
I915_MM_SHRINKER);
|
||||
if (!i915_gem_object_has_pages(obj)) {
|
||||
__i915_gem_object_invalidate(obj);
|
||||
list_del_init(&obj->global_link);
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
scanned += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
}
|
||||
list_splice_tail(&still_in_list, phase->list);
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
}
|
||||
|
||||
if (flags & I915_SHRINK_BOUND)
|
||||
@ -292,25 +298,17 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(shrinker, struct drm_i915_private, mm.shrinker);
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long count;
|
||||
bool unlock;
|
||||
unsigned long count = 0;
|
||||
|
||||
if (!shrinker_lock(dev_priv, &unlock))
|
||||
return 0;
|
||||
|
||||
i915_gem_retire_requests(dev_priv);
|
||||
|
||||
count = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link)
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link)
|
||||
if (can_release_pages(obj))
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link)
|
||||
if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
shrinker_unlock(dev_priv, unlock);
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
@ -387,10 +385,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||
container_of(nb, struct drm_i915_private, mm.oom_notifier);
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long unevictable, bound, unbound, freed_pages;
|
||||
bool unlock;
|
||||
|
||||
if (!shrinker_lock_uninterruptible(dev_priv, &unlock, 5000))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
freed_pages = i915_gem_shrink_all(dev_priv);
|
||||
|
||||
@ -399,26 +393,20 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||
* being pointed to by hardware.
|
||||
*/
|
||||
unbound = bound = unevictable = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
|
||||
if (!i915_gem_object_has_pages(obj))
|
||||
continue;
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
|
||||
if (!can_release_pages(obj))
|
||||
unevictable += obj->base.size >> PAGE_SHIFT;
|
||||
else
|
||||
unbound += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
|
||||
if (!i915_gem_object_has_pages(obj))
|
||||
continue;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
|
||||
if (!can_release_pages(obj))
|
||||
unevictable += obj->base.size >> PAGE_SHIFT;
|
||||
else
|
||||
bound += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
shrinker_unlock(dev_priv, unlock);
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
if (freed_pages || unbound || bound)
|
||||
pr_info("Purging GPU memory, %lu pages freed, "
|
||||
|
@ -724,8 +724,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
|
||||
vma->flags |= I915_VMA_GLOBAL_BIND;
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
|
||||
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
|
||||
obj->bind_count++;
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
return obj;
|
||||
|
||||
|
@ -58,8 +58,10 @@ i915_vma_retire(struct i915_gem_active *active,
|
||||
* so that we don't steal from recently used but inactive objects
|
||||
* (unless we are forced to ofc!)
|
||||
*/
|
||||
spin_lock(&rq->i915->mm.obj_lock);
|
||||
if (obj->bind_count)
|
||||
list_move_tail(&obj->global_link, &rq->i915->mm.bound_list);
|
||||
list_move_tail(&obj->mm.link, &rq->i915->mm.bound_list);
|
||||
spin_unlock(&rq->i915->mm.obj_lock);
|
||||
|
||||
obj->mm.dirty = true; /* be paranoid */
|
||||
|
||||
@ -563,9 +565,13 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
|
||||
|
||||
list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
|
||||
|
||||
spin_lock(&dev_priv->mm.obj_lock);
|
||||
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
|
||||
obj->bind_count++;
|
||||
spin_unlock(&dev_priv->mm.obj_lock);
|
||||
|
||||
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
|
||||
|
||||
return 0;
|
||||
@ -580,6 +586,7 @@ err_unpin:
|
||||
static void
|
||||
i915_vma_remove(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_private *i915 = vma->vm->i915;
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
@ -593,9 +600,10 @@ i915_vma_remove(struct i915_vma *vma)
|
||||
/* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist.
|
||||
*/
|
||||
spin_lock(&i915->mm.obj_lock);
|
||||
if (--obj->bind_count == 0)
|
||||
list_move_tail(&obj->global_link,
|
||||
&to_i915(obj->base.dev)->mm.unbound_list);
|
||||
list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
|
||||
spin_unlock(&i915->mm.obj_lock);
|
||||
|
||||
/* And finally now the object is completely decoupled from this vma,
|
||||
* we can drop its hold on the backing storage and allow it to be
|
||||
|
@ -417,7 +417,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
|
||||
list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
|
||||
|
@ -50,7 +50,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
|
||||
|
||||
if (!list_empty(&i915->mm.unbound_list)) {
|
||||
size = 0;
|
||||
list_for_each_entry(obj, &i915->mm.unbound_list, global_link)
|
||||
list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
|
||||
size++;
|
||||
|
||||
pr_err("Found %lld objects unbound!\n", size);
|
||||
@ -77,10 +77,10 @@ static void cleanup_objects(struct drm_i915_private *i915)
|
||||
{
|
||||
struct drm_i915_gem_object *obj, *on;
|
||||
|
||||
list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, global_link)
|
||||
list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link)
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
list_for_each_entry_safe(obj, on, &i915->mm.bound_list, global_link)
|
||||
list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link)
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
@ -152,8 +152,6 @@ static int igt_overcommit(void *arg)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
list_move(&obj->global_link, &i915->mm.unbound_list);
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
|
||||
if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
|
||||
pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
|
||||
|
Loading…
Reference in New Issue
Block a user