drm/i915: Prevent concurrent tiling/framebuffer modifications

Reintroduce a lock around tiling vs framebuffer creation to prevent
modification of the obj->tiling_and_stride whilst the framebuffer is
being created. Rather than use struct_mutex once again, use the
per-object lock - this will also be required in future to prevent
changing the tiling whilst submitting rendering.

Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Fixes: 24dbf51a55 ("drm/i915: struct_mutex is not required for allocating the framebuffer")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170301154128.2841-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2017-03-01 15:41:28 +00:00
parent 9aceb5c15d
commit dd689287b9
4 changed files with 42 additions and 12 deletions

View File

@ -165,7 +165,7 @@ struct drm_i915_gem_object {
struct reservation_object *resv;
/** References from framebuffers, locks out tiling changes. */
atomic_t framebuffer_references;
unsigned int framebuffer_references;
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
@ -260,6 +260,16 @@ extern void drm_gem_object_unreference(struct drm_gem_object *);
__deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
reservation_object_lock(obj->resv, NULL);
}
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
reservation_object_unlock(obj->resv);
}
static inline bool
i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
{
@ -306,6 +316,12 @@ i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
return READ_ONCE(obj->framebuffer_references);
}
static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
{

View File

@ -207,7 +207,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!(flags & I915_SHRINK_ACTIVE) &&
(i915_gem_object_is_active(obj) ||
atomic_read(&obj->framebuffer_references)))
i915_gem_object_is_framebuffer(obj)))
continue;
if (!can_release_pages(obj))

View File

@ -238,7 +238,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
if (atomic_read(&obj->framebuffer_references))
if (i915_gem_object_is_framebuffer(obj))
return -EBUSY;
/* We need to rebind the object if its current allocation
@ -258,6 +258,12 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
if (err)
return err;
i915_gem_object_lock(obj);
if (i915_gem_object_is_framebuffer(obj)) {
i915_gem_object_unlock(obj);
return -EBUSY;
}
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
@ -294,6 +300,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
}
obj->tiling_and_stride = tiling | stride;
i915_gem_object_unlock(obj);
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);

View File

@ -14185,7 +14185,10 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
drm_framebuffer_cleanup(fb);
WARN_ON(atomic_dec_return(&intel_fb->obj->framebuffer_references) < 0);
i915_gem_object_lock(intel_fb->obj);
WARN_ON(!intel_fb->obj->framebuffer_references--);
i915_gem_object_unlock(intel_fb->obj);
i915_gem_object_put(intel_fb->obj);
kfree(intel_fb);
@ -14262,12 +14265,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
unsigned int tiling = i915_gem_object_get_tiling(obj);
u32 pitch_limit, stride_alignment;
struct drm_format_name_buf format_name;
u32 pitch_limit, stride_alignment;
unsigned int tiling, stride;
int ret = -EINVAL;
atomic_inc(&obj->framebuffer_references);
i915_gem_object_lock(obj);
obj->framebuffer_references++;
tiling = i915_gem_object_get_tiling(obj);
stride = i915_gem_object_get_stride(obj);
i915_gem_object_unlock(obj);
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
/*
@ -14339,11 +14346,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* If there's a fence, enforce that
* the fb pitch and fence stride match.
*/
if (tiling != I915_TILING_NONE &&
mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0],
i915_gem_object_get_stride(obj));
mode_cmd->pitches[0], stride);
goto err;
}
@ -14424,7 +14429,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
return 0;
err:
atomic_dec(&obj->framebuffer_references);
i915_gem_object_lock(obj);
obj->framebuffer_references--;
i915_gem_object_unlock(obj);
return ret;
}