mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-22 22:04:47 +08:00
drm/i915: Remove queue_flip pointer.
With the removal of cs support this is no longer reachable. Can be revived if needed. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1463490484-19540-15-git-send-email-maarten.lankhorst@linux.intel.com Reviewed-by: Patrik Jakobsson <patrik.jakobsson@linux.intel.com>
This commit is contained in:
parent
b8d2afae55
commit
2ee004f7c5
@ -618,11 +618,6 @@ struct drm_i915_display_funcs {
|
||||
void (*audio_codec_disable)(struct intel_encoder *encoder);
|
||||
void (*fdi_link_train)(struct drm_crtc *crtc);
|
||||
void (*init_clock_gating)(struct drm_device *dev);
|
||||
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint64_t gtt_offset);
|
||||
void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
|
||||
/* clock updates for mode set */
|
||||
/* cursor updates */
|
||||
|
@ -10964,237 +10964,6 @@ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
|
||||
static int intel_gen2_queue_flip(struct drm_device *dev,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Can't queue multiple flips, so wait for the previous
|
||||
* one to finish before executing the next.
|
||||
*/
|
||||
if (intel_crtc->plane)
|
||||
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
|
||||
else
|
||||
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
|
||||
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0]);
|
||||
intel_ring_emit(engine, gtt_offset);
|
||||
intel_ring_emit(engine, 0); /* aux display base address, unused */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_gen3_queue_flip(struct drm_device *dev,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
u32 flip_mask;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (intel_crtc->plane)
|
||||
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
|
||||
else
|
||||
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
|
||||
intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0]);
|
||||
intel_ring_emit(engine, gtt_offset);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_gen4_queue_flip(struct drm_device *dev,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* i965+ uses the linear or tiled offsets from the
|
||||
* Display Registers (which do not change across a page-flip)
|
||||
* so we need only reprogram the base address.
|
||||
*/
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0]);
|
||||
intel_ring_emit(engine, gtt_offset | obj->tiling_mode);
|
||||
|
||||
/* XXX Enabling the panel-fitter across page-flip is so far
|
||||
* untested on non-native modes, so ignore it for now.
|
||||
* pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
|
||||
*/
|
||||
pf = 0;
|
||||
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
|
||||
intel_ring_emit(engine, pf | pipesrc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_gen6_queue_flip(struct drm_device *dev,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t pf, pipesrc;
|
||||
int ret;
|
||||
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP |
|
||||
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
|
||||
intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
|
||||
intel_ring_emit(engine, gtt_offset);
|
||||
|
||||
/* Contrary to the suggestions in the documentation,
|
||||
* "Enable Panel Fitter" does not seem to be required when page
|
||||
* flipping with a non-native mode, and worse causes a normal
|
||||
* modeset to fail.
|
||||
* pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
|
||||
*/
|
||||
pf = 0;
|
||||
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
|
||||
intel_ring_emit(engine, pf | pipesrc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_gen7_queue_flip(struct drm_device *dev,
|
||||
struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_request *req,
|
||||
uint64_t gtt_offset)
|
||||
{
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t plane_bit = 0;
|
||||
int len, ret;
|
||||
|
||||
switch (intel_crtc->plane) {
|
||||
case PLANE_A:
|
||||
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
|
||||
break;
|
||||
case PLANE_B:
|
||||
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
|
||||
break;
|
||||
case PLANE_C:
|
||||
plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "unknown plane in flip command\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
len = 4;
|
||||
if (engine->id == RCS) {
|
||||
len += 6;
|
||||
/*
|
||||
* On Gen 8, SRM is now taking an extra dword to accommodate
|
||||
* 48bits addresses, and we need a NOOP for the batch size to
|
||||
* stay even.
|
||||
*/
|
||||
if (IS_GEN8(dev))
|
||||
len += 2;
|
||||
}
|
||||
|
||||
/*
|
||||
* BSpec MI_DISPLAY_FLIP for IVB:
|
||||
* "The full packet must be contained within the same cache line."
|
||||
*
|
||||
* Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
|
||||
* cacheline, if we ever start emitting more commands before
|
||||
* the MI_DISPLAY_FLIP we may need to first emit everything else,
|
||||
* then do the cacheline alignment, and finally emit the
|
||||
* MI_DISPLAY_FLIP.
|
||||
*/
|
||||
ret = intel_ring_cacheline_align(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_ring_begin(req, len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Unmask the flip-done completion message. Note that the bspec says that
|
||||
* we should do this for both the BCS and RCS, and that we must not unmask
|
||||
* more than one flip event at any time (or ensure that one flip message
|
||||
* can be sent by waiting for flip-done prior to queueing new flips).
|
||||
* Experimentation says that BCS works despite DERRMR masking all
|
||||
* flip-done completion events and that unmasking all planes at once
|
||||
* for the RCS also doesn't appear to drop events. Setting the DERRMR
|
||||
* to zero does lead to lockups within MI_DISPLAY_FLIP.
|
||||
*/
|
||||
if (engine->id == RCS) {
|
||||
intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
|
||||
intel_ring_emit_reg(engine, DERRMR);
|
||||
intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
|
||||
DERRMR_PIPEB_PRI_FLIP_DONE |
|
||||
DERRMR_PIPEC_PRI_FLIP_DONE));
|
||||
if (IS_GEN8(dev))
|
||||
intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
else
|
||||
intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
intel_ring_emit_reg(engine, DERRMR);
|
||||
intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
|
||||
if (IS_GEN8(dev)) {
|
||||
intel_ring_emit(engine, 0);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
}
|
||||
}
|
||||
|
||||
intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
|
||||
intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
|
||||
intel_ring_emit(engine, gtt_offset);
|
||||
intel_ring_emit(engine, (MI_NOOP));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void intel_mmio_flip_work_func(struct work_struct *w)
|
||||
{
|
||||
struct intel_flip_work *work =
|
||||
@ -14752,34 +14521,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
|
||||
dev_priv->display.modeset_calc_cdclk =
|
||||
broxton_modeset_calc_cdclk;
|
||||
}
|
||||
|
||||
switch (INTEL_INFO(dev_priv)->gen) {
|
||||
case 2:
|
||||
dev_priv->display.queue_flip = intel_gen2_queue_flip;
|
||||
break;
|
||||
|
||||
case 3:
|
||||
dev_priv->display.queue_flip = intel_gen3_queue_flip;
|
||||
break;
|
||||
|
||||
case 4:
|
||||
case 5:
|
||||
dev_priv->display.queue_flip = intel_gen4_queue_flip;
|
||||
break;
|
||||
|
||||
case 6:
|
||||
dev_priv->display.queue_flip = intel_gen6_queue_flip;
|
||||
break;
|
||||
case 7:
|
||||
case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
|
||||
dev_priv->display.queue_flip = intel_gen7_queue_flip;
|
||||
break;
|
||||
case 9:
|
||||
/* Drop through - unsupported since execlist only. */
|
||||
default:
|
||||
/* Default just returns -ENODEV to indicate unsupported */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user