From b3c95d0bdb0855b1f28370629e9eebec6bceac17 Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Mon, 11 Jan 2021 13:41:02 +0530 Subject: [PATCH 1/6] drm/i915/hdcp: Update CP property in update_pipe When crtc state need_modeset is true it is not necessary it is going to be a real modeset, it can turns to be a fastset instead of modeset. This turns content protection property to be DESIRED and hdcp update_pipe left with property to be in DESIRED state but actual hdcp->value was ENABLED. This issue is caught with DP MST setup, where we have multiple connector in same DP_MST topology. When disabling HDCP on one of DP MST connector leads to set the crtc state need_modeset to true for all other crtc driving the other DP-MST topology connectors. This turns up other DP MST connectors CP property to be DESIRED despite the actual hdcp->value is ENABLED. Above scenario fails the DP MST HDCP IGT test, disabling HDCP on one MST stream should not cause to disable HDCP on another MST stream on same DP MST topology. v2: - Fixed connector->base.registration_state == DRM_CONNECTOR_REGISTERED WARN_ON. v3: - Commit log improvement. [Uma] - Added a comment before scheduling prop_work. [Uma] Fixes: 33f9a623bfc6 ("drm/i915/hdcp: Update CP as per the kernel internal state") Cc: Ramalingam C Reviewed-by: Uma Shankar Reviewed-by: Ramalingam C Tested-by: Karthik B S Signed-off-by: Anshuman Gupta Link: https://patchwork.freedesktop.org/patch/msgid/20210111081120.28417-2-anshuman.gupta@intel.com (cherry picked from commit d276e16702e2d634094f75f69df3b493f359fe31) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_hdcp.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index b2a4bbcfdcd2..eee8263405b9 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -2221,6 +2221,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, desired_and_not_enabled = hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; mutex_unlock(&hdcp->mutex); + /* + * If HDCP already ENABLED and CP property is DESIRED, schedule + * prop_work to update correct CP property to user space. + */ + if (!desired_and_not_enabled && !content_protection_type_changed) { + drm_connector_get(&connector->base); + schedule_work(&hdcp->prop_work); + } } if (desired_and_not_enabled || content_protection_type_changed) From 8662e1119a7d1baa1b2001689b2923e9050754bd Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Mon, 11 Jan 2021 13:41:03 +0530 Subject: [PATCH 2/6] drm/i915/hdcp: Get conn while content_type changed Get DRM connector reference count while scheduling a prop work to avoid any possible destroy of DRM connector when it is in DRM_CONNECTOR_REGISTERED state. Fixes: a6597faa2d59 ("drm/i915: Protect workers against disappearing connectors") Cc: Sean Paul Cc: Ramalingam C Reviewed-by: Uma Shankar Reviewed-by: Ramalingam C Tested-by: Karthik B S Signed-off-by: Anshuman Gupta Link: https://patchwork.freedesktop.org/patch/msgid/20210111081120.28417-3-anshuman.gupta@intel.com (cherry picked from commit b3c6661aad979ec3d4f5675cf3e6a35828607d6a) Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_hdcp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index eee8263405b9..b9d8825e2bb1 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -2210,6 +2210,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, if (content_protection_type_changed) { mutex_lock(&hdcp->mutex); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + drm_connector_get(&connector->base); schedule_work(&hdcp->prop_work); mutex_unlock(&hdcp->mutex); } From 488751a0ef9b5ce572c47301ce62d54fc6b5a74d Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 18 Jan 2021 09:53:32 +0000 Subject: [PATCH 3/6] drm/i915/gt: Prevent use of engine->wa_ctx after error On error we unpin and free the wa_ctx.vma, but do not clear any of the derived flags. During lrc_init, we look at the flags and attempt to dereference the wa_ctx.vma if they are set. To protect the error path where we try to limp along without the wa_ctx, make sure we clear those flags! Reported-by: Matt Roper Fixes: 604a8f6f1e33 ("drm/i915/lrc: Only enable per-context and per-bb buffers if set") Signed-off-by: Chris Wilson Cc: Matt Roper Cc: Tvrtko Ursulin Cc: Mika Kuoppala Cc: # v4.15+ Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20210108204026.20682-1-chris@chris-wilson.co.uk (cherry-picked from 5b4dc95cf7f573e927fbbd406ebe54225d41b9b2) Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20210118095332.458813-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_lrc.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7614a3d24fca..26c7d0a50585 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3988,6 +3988,9 @@ err: static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) { i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); + + /* Called on error unwind, clear all flags to prevent further use */ + memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx)); } typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); From 171a8e99828144050015672016dd63494c6d200a Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 18 Jan 2021 10:07:24 +0000 Subject: [PATCH 4/6] drm/i915/pmu: Don't grab wakeref when enabling events Chris found a CI report which points out calling intel_runtime_pm_get from inside i915_pmu_enable hook is not allowed since it can be invoked from hard irq context. This is something we knew but forgot, so lets fix it once again. We do this by syncing the internal book keeping with hardware rc6 counter on driver load. v2: * Always sync on parking and fully sync on init. Signed-off-by: Tvrtko Ursulin Fixes: f4e9894b6952 ("drm/i915/pmu: Correct the rc6 offset upon enabling") Cc: Chris Wilson Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20201214094349.3563876-1-tvrtko.ursulin@linux.intel.com (cherry picked from commit dbe13ae1d6abaab417edf3c37601c6a56594a4cd) Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20210118100724.465555-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/i915_pmu.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d76685ce0399..9856479b56d8 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -184,13 +184,24 @@ static u64 get_rc6(struct intel_gt *gt) return val; } +static void init_rc6(struct i915_pmu *pmu) +{ + struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); + intel_wakeref_t wakeref; + + with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) { + pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); + pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = + pmu->sample[__I915_SAMPLE_RC6].cur; + pmu->sleep_last = ktime_get(); + } +} + static void park_rc6(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; - if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); - + pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); pmu->sleep_last = ktime_get(); } @@ -201,6 +212,7 @@ static u64 get_rc6(struct intel_gt *gt) return __get_rc6(gt); } +static void init_rc6(struct i915_pmu *pmu) { } static void park_rc6(struct drm_i915_private *i915) {} #endif @@ -612,10 +624,8 @@ static void i915_pmu_enable(struct perf_event *event) container_of(event->pmu, typeof(*i915), pmu.base); unsigned int bit = event_enabled_bit(event); struct i915_pmu *pmu = &i915->pmu; - intel_wakeref_t wakeref; unsigned long flags; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); spin_lock_irqsave(&pmu->lock, flags); /* @@ -626,13 +636,6 @@ static void i915_pmu_enable(struct perf_event *event) GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); GEM_BUG_ON(pmu->enable_count[bit] == ~0); - if (pmu->enable_count[bit] == 0 && - config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) { - pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0; - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); - pmu->sleep_last = ktime_get(); - } - pmu->enable |= BIT_ULL(bit); pmu->enable_count[bit]++; @@ -673,8 +676,6 @@ static void i915_pmu_enable(struct perf_event *event) * an existing non-zero value. */ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); - - intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static void i915_pmu_disable(struct perf_event *event) @@ -1130,6 +1131,7 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; pmu->cpuhp.cpu = -1; + init_rc6(pmu); if (!is_igp(i915)) { pmu->name = kasprintf(GFP_KERNEL, From 45db630e5f7ec83817c57c8ae387fe219bd42adf Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Mon, 18 Jan 2021 10:17:55 +0000 Subject: [PATCH 5/6] drm/i915: Check for rq->hwsp validity after acquiring RCU lock Since we allow removing the timeline map at runtime, there is a risk that rq->hwsp points into a stale page. To control that risk, we hold the RCU read lock while reading *rq->hwsp, but we missed a couple of important barriers. First, the unpinning / removal of the timeline map must be after all RCU readers into that map are complete, i.e. after an rcu barrier (in this case courtesy of call_rcu()). Secondly, we must make sure that the rq->hwsp we are about to dereference under the RCU lock is valid. In this case, we make the rq->hwsp pointer safe during i915_request_retire() and so we know that rq->hwsp may become invalid only after the request has been signaled. Therefore is the request is not yet signaled when we acquire rq->hwsp under the RCU, we know that rq->hwsp will remain valid for the duration of the RCU read lock. This is a very small window that may lead to either considering the request not completed (causing a delay until the request is checked again, any wait for the request is not affected) or dereferencing an invalid pointer. Fixes: 3adac4689f58 ("drm/i915: Introduce concept of per-timeline (context) HWSP") Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Cc: # v5.1+ Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20201218122421.18344-1-chris@chris-wilson.co.uk (cherry picked from commit 9bb36cf66091ddf2d8840e5aa705ad3c93a6279b) Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20210118101755.476744-1-chris@chris-wilson.co.uk --- drivers/gpu/drm/i915/gt/intel_breadcrumbs.c | 9 ++--- drivers/gpu/drm/i915/gt/intel_timeline.c | 10 +++--- drivers/gpu/drm/i915/i915_request.h | 37 ++++++++++++++++++--- 3 files changed, 38 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index a24cc1ff08a0..0625cbb3b431 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b, return true; } -static inline bool __request_completed(const struct i915_request *rq) -{ - return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); -} - __maybe_unused static bool check_signal_order(struct intel_context *ce, struct i915_request *rq) { @@ -257,7 +252,7 @@ static void signal_irq_work(struct irq_work *work) list_for_each_entry_rcu(rq, &ce->signals, signal_link) { bool release; - if (!__request_completed(rq)) + if (!__i915_request_is_complete(rq)) break; if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, @@ -379,7 +374,7 @@ static void insert_breadcrumb(struct i915_request *rq) * straight onto a signaled list, and queue the irq worker for * its signal completion. */ - if (__request_completed(rq)) { + if (__i915_request_is_complete(rq)) { if (__signal_request(rq) && llist_add(&rq->signal_node, &b->signaled_requests)) irq_work_queue(&b->irq_work); diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 7ea94d201fe6..8015964043eb 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu) struct intel_timeline_cacheline *cl = container_of(rcu, typeof(*cl), rcu); + /* Must wait until after all *rq->hwsp are complete before removing */ + i915_gem_object_unpin_map(cl->hwsp->vma->obj); + __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); + i915_active_fini(&cl->active); kfree(cl); } @@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu) static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) { GEM_BUG_ON(!i915_active_is_idle(&cl->active)); - - i915_gem_object_unpin_map(cl->hwsp->vma->obj); - i915_vma_put(cl->hwsp->vma); - __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); - call_rcu(&cl->rcu, __rcu_cacheline_free); } @@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) return ERR_CAST(vaddr); } - i915_vma_get(hwsp->vma); cl->hwsp = hwsp; cl->vaddr = page_pack_bits(vaddr, cacheline); diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 620b6fab2c5c..92adfee30c7c 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq) static inline bool __i915_request_has_started(const struct i915_request *rq) { - return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); + return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1); } /** @@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq) */ static inline bool i915_request_started(const struct i915_request *rq) { + bool result; + if (i915_request_signaled(rq)) return true; - /* Remember: started but may have since been preempted! */ - return __i915_request_has_started(rq); + result = true; + rcu_read_lock(); /* the HWSP may be freed at runtime */ + if (likely(!i915_request_signaled(rq))) + /* Remember: started but may have since been preempted! */ + result = __i915_request_has_started(rq); + rcu_read_unlock(); + + return result; } /** @@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq) */ static inline bool i915_request_is_running(const struct i915_request *rq) { + bool result; + if (!i915_request_is_active(rq)) return false; - return __i915_request_has_started(rq); + rcu_read_lock(); + result = __i915_request_has_started(rq) && i915_request_is_active(rq); + rcu_read_unlock(); + + return result; } /** @@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq) return !list_empty(&rq->sched.link); } +static inline bool __i915_request_is_complete(const struct i915_request *rq) +{ + return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); +} + static inline bool i915_request_completed(const struct i915_request *rq) { + bool result; + if (i915_request_signaled(rq)) return true; - return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno); + result = true; + rcu_read_lock(); /* the HWSP may be freed at runtime */ + if (likely(!i915_request_signaled(rq))) + result = __i915_request_is_complete(rq); + rcu_read_unlock(); + + return result; } static inline void i915_request_mark_complete(struct i915_request *rq) From 1c4995b0a576d24bb7ead991fb037c8b47ab6e32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 18 Jan 2021 17:43:55 +0200 Subject: [PATCH 6/6] drm/i915: Only enable DFP 4:4:4->4:2:0 conversion when outputting YCbCr 4:4:4 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's not enable the 4:4:4->4:2:0 conversion bit in the DFP unless we're actually outputting YCbCr 4:4:4. It would appear some protocol converters blindy consult this bit even when the source is outputting RGB, resulting in a visual mess. Cc: stable@vger.kernel.org Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2914 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20210111164111.13302-1-ville.syrjala@linux.intel.com Fixes: 181567aa9f0d ("drm/i915: Do YCbCr 444->420 conversion via DP protocol converters") Reviewed-by: Jani Nikula (cherry picked from commit 3170a21f7059c4660c469f59bf529f372a57da5f) Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20210118154355.24453-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 2 +- drivers/gpu/drm/i915/display/intel_dp.c | 9 +++++---- drivers/gpu/drm/i915/display/intel_dp.h | 3 ++- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 92940a0c5ef8..d5ace48b1ace 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3725,7 +3725,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_init_dp_buf_reg(encoder, crtc_state); if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); - intel_dp_configure_protocol_converter(intel_dp); + intel_dp_configure_protocol_converter(intel_dp, crtc_state); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); intel_dp_sink_set_fec_ready(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 37f1a10fd021..09123e8625c4 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4014,7 +4014,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, intel_dp->output_reg); } -void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp) +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 tmp; @@ -4033,8 +4034,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp) drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", enableddisabled(intel_dp->has_hdmi_sink)); - tmp = intel_dp->dfp.ycbcr_444_to_420 ? - DP_CONVERSION_TO_YCBCR420_ENABLE : 0; + tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && + intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) @@ -4088,7 +4089,7 @@ static void intel_enable_dp(struct intel_atomic_state *state, } intel_dp_set_power(intel_dp, DP_SET_POWER_D0); - intel_dp_configure_protocol_converter(intel_dp); + intel_dp_configure_protocol_converter(intel_dp, pipe_config); intel_dp_start_link_train(intel_dp, pipe_config); intel_dp_stop_link_train(intel_dp, pipe_config); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index b871a09b6901..05f7ddf7a795 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); -void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp); +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool enable);