drm/i915: Be more careful when unbinding vma

When we call i915_vma_unbind(), we will wait upon outstanding rendering.
This will also trigger a retirement phase, which may update the object
lists. If, we extend request tracking to the VMA itself (rather than
keep it at the encompassing object), then there is a potential that the
obj->vma_list be modified for other elements upon i915_vma_unbind(). As
a result, if we walk over the object list and call i915_vma_unbind(), we
need to be prepared for that list to change.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470293567-10811-8-git-send-email-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-08-04 07:52:27 +01:00
parent 15717de219
commit aa653a685d
4 changed files with 46 additions and 25 deletions

View File

@ -3052,6 +3052,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
* _guarantee_ VMA in question is _not in use_ anywhere.
*/
int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);

View File

@ -283,18 +283,38 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.release = i915_gem_object_release_phys,
};
int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
LIST_HEAD(still_in_list);
int ret;
/* The vma will only be freed if it is marked as closed, and if we wait
* upon rendering to the vma, we may unbind anything in the list.
*/
while ((vma = list_first_entry_or_null(&obj->vma_list,
struct i915_vma,
obj_link))) {
list_move_tail(&vma->obj_link, &still_in_list);
ret = i915_vma_unbind(vma);
if (ret)
break;
}
list_splice(&still_in_list, &obj->vma_list);
return ret;
}
static int
drop_pages(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma, *next;
int ret;
i915_gem_object_get(obj);
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
ret = i915_gem_object_put_pages(obj);
ret = i915_gem_object_unbind(obj);
if (ret == 0)
ret = i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
return ret;
@ -3450,8 +3470,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct i915_vma *vma, *next;
struct i915_vma *vma;
int ret = 0;
if (obj->cache_level == cache_level)
@ -3462,7 +3481,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* catch the issue of the CS prefetch crossing page boundaries and
* reading an invalid PTE on older architectures.
*/
list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
restart:
list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (!drm_mm_node_allocated(&vma->node))
continue;
@ -3471,11 +3491,18 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
if (!i915_gem_valid_gtt_space(vma, cache_level)) {
ret = i915_vma_unbind(vma);
if (ret)
return ret;
}
if (i915_gem_valid_gtt_space(vma, cache_level))
continue;
ret = i915_vma_unbind(vma);
if (ret)
return ret;
/* As unbinding may affect other elements in the
* obj->vma_list (due to side-effects from retiring
* an active vma), play safe and restart the iterator.
*/
goto restart;
}
/* We can reuse the existing drm_mm nodes but need to change the
@ -3494,7 +3521,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (ret)
return ret;
if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
/* Access to snoopable pages through the GTT is
* incoherent and on some machines causes a hard
* lockup. Relinquish the CPU mmaping to force

View File

@ -172,8 +172,6 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
(obj = list_first_entry_or_null(phase->list,
typeof(*obj),
global_list))) {
struct i915_vma *vma, *v;
list_move_tail(&obj->global_list, &still_in_list);
if (flags & I915_SHRINK_PURGEABLE &&
@ -193,11 +191,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
i915_gem_object_get(obj);
/* For the unbound phase, this should be a no-op! */
list_for_each_entry_safe(vma, v,
&obj->vma_list, obj_link)
if (i915_vma_unbind(vma))
break;
i915_gem_object_unbind(obj);
if (i915_gem_object_put_pages(obj) == 0)
count += obj->base.size >> PAGE_SHIFT;

View File

@ -104,7 +104,6 @@ static void cancel_userptr(struct work_struct *work)
if (obj->pages != NULL) {
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_vma *vma, *tmp;
bool was_interruptible;
wait_rendering(obj);
@ -112,8 +111,7 @@ static void cancel_userptr(struct work_struct *work)
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
WARN_ON(i915_vma_unbind(vma));
WARN_ON(i915_gem_object_unbind(obj));
WARN_ON(i915_gem_object_put_pages(obj));
dev_priv->mm.interruptible = was_interruptible;