mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-18 16:44:27 +08:00
drm/i915: Move gpu_write_list to per-ring
... to prevent flush processing of an idle (or even absent) ring.
This fixes a regression during suspend from 87acb0a5
.
Reported-and-tested-by: Alexey Fisher <bug-track@fisher-privat.net>
Tested-by: Peter Clifton <pcjc2@cam.ac.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
b6651458d3
commit
641934069d
@ -570,15 +570,6 @@ typedef struct drm_i915_private {
|
|||||||
*/
|
*/
|
||||||
struct list_head flushing_list;
|
struct list_head flushing_list;
|
||||||
|
|
||||||
/**
|
|
||||||
* List of objects currently pending a GPU write flush.
|
|
||||||
*
|
|
||||||
* All elements on this list will belong to either the
|
|
||||||
* active_list or flushing_list, last_rendering_seqno can
|
|
||||||
* be used to differentiate between the two elements.
|
|
||||||
*/
|
|
||||||
struct list_head gpu_write_list;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* LRU list of objects which are not in the ringbuffer and
|
* LRU list of objects which are not in the ringbuffer and
|
||||||
* are ready to unbind, but are still in the GTT.
|
* are ready to unbind, but are still in the GTT.
|
||||||
|
@ -1657,12 +1657,11 @@ i915_gem_process_flushing_list(struct drm_device *dev,
|
|||||||
struct drm_i915_gem_object *obj_priv, *next;
|
struct drm_i915_gem_object *obj_priv, *next;
|
||||||
|
|
||||||
list_for_each_entry_safe(obj_priv, next,
|
list_for_each_entry_safe(obj_priv, next,
|
||||||
&dev_priv->mm.gpu_write_list,
|
&ring->gpu_write_list,
|
||||||
gpu_write_list) {
|
gpu_write_list) {
|
||||||
struct drm_gem_object *obj = &obj_priv->base;
|
struct drm_gem_object *obj = &obj_priv->base;
|
||||||
|
|
||||||
if (obj->write_domain & flush_domains &&
|
if (obj->write_domain & flush_domains) {
|
||||||
obj_priv->ring == ring) {
|
|
||||||
uint32_t old_write_domain = obj->write_domain;
|
uint32_t old_write_domain = obj->write_domain;
|
||||||
|
|
||||||
obj->write_domain = 0;
|
obj->write_domain = 0;
|
||||||
@ -2173,6 +2172,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
|
|||||||
static int i915_ring_idle(struct drm_device *dev,
|
static int i915_ring_idle(struct drm_device *dev,
|
||||||
struct intel_ring_buffer *ring)
|
struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
|
if (list_empty(&ring->gpu_write_list))
|
||||||
|
return 0;
|
||||||
|
|
||||||
i915_gem_flush_ring(dev, NULL, ring,
|
i915_gem_flush_ring(dev, NULL, ring,
|
||||||
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
|
||||||
return i915_wait_request(dev,
|
return i915_wait_request(dev,
|
||||||
@ -3786,14 +3788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
for (i = 0; i < args->buffer_count; i++) {
|
for (i = 0; i < args->buffer_count; i++) {
|
||||||
struct drm_gem_object *obj = object_list[i];
|
struct drm_gem_object *obj = object_list[i];
|
||||||
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
|
|
||||||
uint32_t old_write_domain = obj->write_domain;
|
uint32_t old_write_domain = obj->write_domain;
|
||||||
|
|
||||||
obj->write_domain = obj->pending_write_domain;
|
obj->write_domain = obj->pending_write_domain;
|
||||||
if (obj->write_domain)
|
|
||||||
list_move_tail(&obj_priv->gpu_write_list,
|
|
||||||
&dev_priv->mm.gpu_write_list);
|
|
||||||
|
|
||||||
trace_i915_gem_object_change_domain(obj,
|
trace_i915_gem_object_change_domain(obj,
|
||||||
obj->read_domains,
|
obj->read_domains,
|
||||||
old_write_domain);
|
old_write_domain);
|
||||||
@ -3858,9 +3854,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
for (i = 0; i < args->buffer_count; i++) {
|
for (i = 0; i < args->buffer_count; i++) {
|
||||||
struct drm_gem_object *obj = object_list[i];
|
struct drm_gem_object *obj = object_list[i];
|
||||||
obj_priv = to_intel_bo(obj);
|
|
||||||
|
|
||||||
i915_gem_object_move_to_active(obj, ring);
|
i915_gem_object_move_to_active(obj, ring);
|
||||||
|
if (obj->write_domain)
|
||||||
|
list_move_tail(&to_intel_bo(obj)->gpu_write_list,
|
||||||
|
&ring->gpu_write_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
i915_add_request(dev, file, request, ring);
|
i915_add_request(dev, file, request, ring);
|
||||||
@ -4618,6 +4616,14 @@ i915_gem_lastclose(struct drm_device *dev)
|
|||||||
DRM_ERROR("failed to idle hardware: %d\n", ret);
|
DRM_ERROR("failed to idle hardware: %d\n", ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
init_ring_lists(struct intel_ring_buffer *ring)
|
||||||
|
{
|
||||||
|
INIT_LIST_HEAD(&ring->active_list);
|
||||||
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
|
INIT_LIST_HEAD(&ring->gpu_write_list);
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
i915_gem_load(struct drm_device *dev)
|
i915_gem_load(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
@ -4626,17 +4632,13 @@ i915_gem_load(struct drm_device *dev)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&dev_priv->mm.active_list);
|
INIT_LIST_HEAD(&dev_priv->mm.active_list);
|
||||||
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
|
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
|
||||||
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
|
|
||||||
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
|
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
|
||||||
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
|
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
|
||||||
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
|
||||||
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
|
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
|
||||||
INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
|
init_ring_lists(&dev_priv->render_ring);
|
||||||
INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
|
init_ring_lists(&dev_priv->bsd_ring);
|
||||||
INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
|
init_ring_lists(&dev_priv->blt_ring);
|
||||||
INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
|
|
||||||
INIT_LIST_HEAD(&dev_priv->blt_ring.active_list);
|
|
||||||
INIT_LIST_HEAD(&dev_priv->blt_ring.request_list);
|
|
||||||
for (i = 0; i < 16; i++)
|
for (i = 0; i < 16; i++)
|
||||||
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
|
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
|
||||||
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
|
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
|
||||||
|
@ -580,6 +580,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
|
|||||||
ring->dev = dev;
|
ring->dev = dev;
|
||||||
INIT_LIST_HEAD(&ring->active_list);
|
INIT_LIST_HEAD(&ring->active_list);
|
||||||
INIT_LIST_HEAD(&ring->request_list);
|
INIT_LIST_HEAD(&ring->request_list);
|
||||||
|
INIT_LIST_HEAD(&ring->gpu_write_list);
|
||||||
|
|
||||||
if (I915_NEED_GFX_HWS(dev)) {
|
if (I915_NEED_GFX_HWS(dev)) {
|
||||||
ret = init_status_page(dev, ring);
|
ret = init_status_page(dev, ring);
|
||||||
|
@ -82,6 +82,15 @@ struct intel_ring_buffer {
|
|||||||
*/
|
*/
|
||||||
struct list_head request_list;
|
struct list_head request_list;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List of objects currently pending a GPU write flush.
|
||||||
|
*
|
||||||
|
* All elements on this list will belong to either the
|
||||||
|
* active_list or flushing_list, last_rendering_seqno can
|
||||||
|
* be used to differentiate between the two elements.
|
||||||
|
*/
|
||||||
|
struct list_head gpu_write_list;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Do we have some not yet emitted requests outstanding?
|
* Do we have some not yet emitted requests outstanding?
|
||||||
*/
|
*/
|
||||||
|
Loading…
Reference in New Issue
Block a user