mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 07:04:00 +08:00
drm/i915/gem: Drop lru bumping on display unpinning
Simplify the frontbuffer unpin by removing the lock requirement. The LRU bumping was primarily to protect the GTT from being evicted and from frontbuffers being eagerly shrunk. Now we protect frontbuffers from the shrinker, and we avoid accidentally evicting from the GTT, so the benefit from bumping LRU is no more, and we can save more time by not. Reported-and-tested-by: Matti Hämäläinen <ccr@tnsp.org> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2905 Fixes:c1793ba86a
("drm/i915: Add ww locking to pin_to_display_plane, v2.") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210119214336.1463-6-chris@chris-wilson.co.uk (cherry picked from commit14ca83eece
) Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Jani Nikula <jani.nikula@intel.com> Cc: <stable@vger.kernel.org> # v5.10+ Signed-off-by: Jani Nikula <jani.nikula@intel.com>
This commit is contained in:
parent
e4747cb3ec
commit
761c70a525
@ -2309,7 +2309,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
|
|||||||
*/
|
*/
|
||||||
ret = i915_vma_pin_fence(vma);
|
ret = i915_vma_pin_fence(vma);
|
||||||
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
|
if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
|
||||||
i915_gem_object_unpin_from_display_plane(vma);
|
i915_vma_unpin(vma);
|
||||||
vma = ERR_PTR(ret);
|
vma = ERR_PTR(ret);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -2327,12 +2327,9 @@ err:
|
|||||||
|
|
||||||
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
|
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
|
||||||
{
|
{
|
||||||
i915_gem_object_lock(vma->obj, NULL);
|
|
||||||
if (flags & PLANE_HAS_FENCE)
|
if (flags & PLANE_HAS_FENCE)
|
||||||
i915_vma_unpin_fence(vma);
|
i915_vma_unpin_fence(vma);
|
||||||
i915_gem_object_unpin_from_display_plane(vma);
|
i915_vma_unpin(vma);
|
||||||
i915_gem_object_unlock(vma->obj);
|
|
||||||
|
|
||||||
i915_vma_put(vma);
|
i915_vma_put(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -359,7 +359,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
|
|||||||
intel_frontbuffer_flip_complete(overlay->i915,
|
intel_frontbuffer_flip_complete(overlay->i915,
|
||||||
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
|
INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
|
||||||
|
|
||||||
i915_gem_object_unpin_from_display_plane(vma);
|
i915_vma_unpin(vma);
|
||||||
i915_vma_put(vma);
|
i915_vma_put(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -860,7 +860,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_unpin:
|
out_unpin:
|
||||||
i915_gem_object_unpin_from_display_plane(vma);
|
i915_vma_unpin(vma);
|
||||||
out_pin_section:
|
out_pin_section:
|
||||||
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
|
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
|
||||||
|
|
||||||
|
@ -387,48 +387,6 @@ err:
|
|||||||
return vma;
|
return vma;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
|
|
||||||
{
|
|
||||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
|
||||||
struct i915_vma *vma;
|
|
||||||
|
|
||||||
if (list_empty(&obj->vma.list))
|
|
||||||
return;
|
|
||||||
|
|
||||||
mutex_lock(&i915->ggtt.vm.mutex);
|
|
||||||
spin_lock(&obj->vma.lock);
|
|
||||||
for_each_ggtt_vma(vma, obj) {
|
|
||||||
if (!drm_mm_node_allocated(&vma->node))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
GEM_BUG_ON(vma->vm != &i915->ggtt.vm);
|
|
||||||
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
|
|
||||||
}
|
|
||||||
spin_unlock(&obj->vma.lock);
|
|
||||||
mutex_unlock(&i915->ggtt.vm.mutex);
|
|
||||||
|
|
||||||
if (i915_gem_object_is_shrinkable(obj)) {
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
|
||||||
|
|
||||||
if (obj->mm.madv == I915_MADV_WILLNEED &&
|
|
||||||
!atomic_read(&obj->mm.shrink_pin))
|
|
||||||
list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
|
||||||
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
|
|
||||||
{
|
|
||||||
/* Bump the LRU to try and avoid premature eviction whilst flipping */
|
|
||||||
i915_gem_object_bump_inactive_ggtt(vma->obj);
|
|
||||||
|
|
||||||
i915_vma_unpin(vma);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Moves a single object to the CPU read, and possibly write domain.
|
* Moves a single object to the CPU read, and possibly write domain.
|
||||||
* @obj: object to act on
|
* @obj: object to act on
|
||||||
@ -569,9 +527,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||||||
else
|
else
|
||||||
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
|
err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
|
||||||
|
|
||||||
/* And bump the LRU for this access */
|
|
||||||
i915_gem_object_bump_inactive_ggtt(obj);
|
|
||||||
|
|
||||||
i915_gem_object_unlock(obj);
|
i915_gem_object_unlock(obj);
|
||||||
|
|
||||||
if (write_domain)
|
if (write_domain)
|
||||||
|
@ -486,7 +486,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||||||
u32 alignment,
|
u32 alignment,
|
||||||
const struct i915_ggtt_view *view,
|
const struct i915_ggtt_view *view,
|
||||||
unsigned int flags);
|
unsigned int flags);
|
||||||
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
|
|
||||||
|
|
||||||
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
|
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
|
||||||
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
|
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
|
||||||
|
Loading…
Reference in New Issue
Block a user