mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-30 07:34:12 +08:00
Merge tag 'drm-intel-fixes-2021-01-21' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
drm/i915 fixes for v5.11-rc5: - HDCP fixes - PMU wakeref fix - Fix HWSP validity race - Fix DP protocol converter accidental 4:4:4->4:2:0 conversion for RGB Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87a6t2kzgb.fsf@intel.com
This commit is contained in:
commit
5f9986a6cd
@ -3725,7 +3725,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
|
||||
if (!is_mst)
|
||||
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
|
||||
intel_dp_configure_protocol_converter(intel_dp);
|
||||
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
|
||||
intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
|
||||
true);
|
||||
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
|
||||
|
@ -4014,7 +4014,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
}
|
||||
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 tmp;
|
||||
@ -4033,8 +4034,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
|
||||
drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
|
||||
enableddisabled(intel_dp->has_hdmi_sink));
|
||||
|
||||
tmp = intel_dp->dfp.ycbcr_444_to_420 ?
|
||||
DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
|
||||
tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
|
||||
intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
|
||||
@ -4088,7 +4089,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
|
||||
}
|
||||
|
||||
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
|
||||
intel_dp_configure_protocol_converter(intel_dp);
|
||||
intel_dp_configure_protocol_converter(intel_dp, pipe_config);
|
||||
intel_dp_start_link_train(intel_dp, pipe_config);
|
||||
intel_dp_stop_link_train(intel_dp, pipe_config);
|
||||
|
||||
|
@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
|
||||
int intel_dp_retrain_link(struct intel_encoder *encoder,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enable);
|
||||
|
@ -2210,6 +2210,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
if (content_protection_type_changed) {
|
||||
mutex_lock(&hdcp->mutex);
|
||||
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
|
||||
drm_connector_get(&connector->base);
|
||||
schedule_work(&hdcp->prop_work);
|
||||
mutex_unlock(&hdcp->mutex);
|
||||
}
|
||||
@ -2221,6 +2222,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
desired_and_not_enabled =
|
||||
hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
|
||||
mutex_unlock(&hdcp->mutex);
|
||||
/*
|
||||
* If HDCP already ENABLED and CP property is DESIRED, schedule
|
||||
* prop_work to update correct CP property to user space.
|
||||
*/
|
||||
if (!desired_and_not_enabled && !content_protection_type_changed) {
|
||||
drm_connector_get(&connector->base);
|
||||
schedule_work(&hdcp->prop_work);
|
||||
}
|
||||
}
|
||||
|
||||
if (desired_and_not_enabled || content_protection_type_changed)
|
||||
|
@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool __request_completed(const struct i915_request *rq)
|
||||
{
|
||||
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
|
||||
}
|
||||
|
||||
__maybe_unused static bool
|
||||
check_signal_order(struct intel_context *ce, struct i915_request *rq)
|
||||
{
|
||||
@ -257,7 +252,7 @@ static void signal_irq_work(struct irq_work *work)
|
||||
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
|
||||
bool release;
|
||||
|
||||
if (!__request_completed(rq))
|
||||
if (!__i915_request_is_complete(rq))
|
||||
break;
|
||||
|
||||
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
|
||||
@ -379,7 +374,7 @@ static void insert_breadcrumb(struct i915_request *rq)
|
||||
* straight onto a signaled list, and queue the irq worker for
|
||||
* its signal completion.
|
||||
*/
|
||||
if (__request_completed(rq)) {
|
||||
if (__i915_request_is_complete(rq)) {
|
||||
if (__signal_request(rq) &&
|
||||
llist_add(&rq->signal_node, &b->signaled_requests))
|
||||
irq_work_queue(&b->irq_work);
|
||||
|
@ -3988,6 +3988,9 @@ err:
|
||||
static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
|
||||
{
|
||||
i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
|
||||
|
||||
/* Called on error unwind, clear all flags to prevent further use */
|
||||
memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx));
|
||||
}
|
||||
|
||||
typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
|
||||
|
@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
|
||||
struct intel_timeline_cacheline *cl =
|
||||
container_of(rcu, typeof(*cl), rcu);
|
||||
|
||||
/* Must wait until after all *rq->hwsp are complete before removing */
|
||||
i915_gem_object_unpin_map(cl->hwsp->vma->obj);
|
||||
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
|
||||
|
||||
i915_active_fini(&cl->active);
|
||||
kfree(cl);
|
||||
}
|
||||
@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu)
|
||||
static void __idle_cacheline_free(struct intel_timeline_cacheline *cl)
|
||||
{
|
||||
GEM_BUG_ON(!i915_active_is_idle(&cl->active));
|
||||
|
||||
i915_gem_object_unpin_map(cl->hwsp->vma->obj);
|
||||
i915_vma_put(cl->hwsp->vma);
|
||||
__idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
|
||||
|
||||
call_rcu(&cl->rcu, __rcu_cacheline_free);
|
||||
}
|
||||
|
||||
@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
|
||||
return ERR_CAST(vaddr);
|
||||
}
|
||||
|
||||
i915_vma_get(hwsp->vma);
|
||||
cl->hwsp = hwsp;
|
||||
cl->vaddr = page_pack_bits(vaddr, cacheline);
|
||||
|
||||
|
@ -184,13 +184,24 @@ static u64 get_rc6(struct intel_gt *gt)
|
||||
return val;
|
||||
}
|
||||
|
||||
static void init_rc6(struct i915_pmu *pmu)
|
||||
{
|
||||
struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) {
|
||||
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
|
||||
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur =
|
||||
pmu->sample[__I915_SAMPLE_RC6].cur;
|
||||
pmu->sleep_last = ktime_get();
|
||||
}
|
||||
}
|
||||
|
||||
static void park_rc6(struct drm_i915_private *i915)
|
||||
{
|
||||
struct i915_pmu *pmu = &i915->pmu;
|
||||
|
||||
if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
|
||||
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
|
||||
|
||||
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
|
||||
pmu->sleep_last = ktime_get();
|
||||
}
|
||||
|
||||
@ -201,6 +212,7 @@ static u64 get_rc6(struct intel_gt *gt)
|
||||
return __get_rc6(gt);
|
||||
}
|
||||
|
||||
static void init_rc6(struct i915_pmu *pmu) { }
|
||||
static void park_rc6(struct drm_i915_private *i915) {}
|
||||
|
||||
#endif
|
||||
@ -612,10 +624,8 @@ static void i915_pmu_enable(struct perf_event *event)
|
||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||
unsigned int bit = event_enabled_bit(event);
|
||||
struct i915_pmu *pmu = &i915->pmu;
|
||||
intel_wakeref_t wakeref;
|
||||
unsigned long flags;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
spin_lock_irqsave(&pmu->lock, flags);
|
||||
|
||||
/*
|
||||
@ -626,13 +636,6 @@ static void i915_pmu_enable(struct perf_event *event)
|
||||
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
|
||||
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
|
||||
|
||||
if (pmu->enable_count[bit] == 0 &&
|
||||
config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
|
||||
pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
|
||||
pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
|
||||
pmu->sleep_last = ktime_get();
|
||||
}
|
||||
|
||||
pmu->enable |= BIT_ULL(bit);
|
||||
pmu->enable_count[bit]++;
|
||||
|
||||
@ -673,8 +676,6 @@ static void i915_pmu_enable(struct perf_event *event)
|
||||
* an existing non-zero value.
|
||||
*/
|
||||
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
|
||||
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
static void i915_pmu_disable(struct perf_event *event)
|
||||
@ -1130,6 +1131,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
||||
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
pmu->timer.function = i915_sample;
|
||||
pmu->cpuhp.cpu = -1;
|
||||
init_rc6(pmu);
|
||||
|
||||
if (!is_igp(i915)) {
|
||||
pmu->name = kasprintf(GFP_KERNEL,
|
||||
|
@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq)
|
||||
|
||||
static inline bool __i915_request_has_started(const struct i915_request *rq)
|
||||
{
|
||||
return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
|
||||
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
|
||||
*/
|
||||
static inline bool i915_request_started(const struct i915_request *rq)
|
||||
{
|
||||
bool result;
|
||||
|
||||
if (i915_request_signaled(rq))
|
||||
return true;
|
||||
|
||||
/* Remember: started but may have since been preempted! */
|
||||
return __i915_request_has_started(rq);
|
||||
result = true;
|
||||
rcu_read_lock(); /* the HWSP may be freed at runtime */
|
||||
if (likely(!i915_request_signaled(rq)))
|
||||
/* Remember: started but may have since been preempted! */
|
||||
result = __i915_request_has_started(rq);
|
||||
rcu_read_unlock();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq)
|
||||
*/
|
||||
static inline bool i915_request_is_running(const struct i915_request *rq)
|
||||
{
|
||||
bool result;
|
||||
|
||||
if (!i915_request_is_active(rq))
|
||||
return false;
|
||||
|
||||
return __i915_request_has_started(rq);
|
||||
rcu_read_lock();
|
||||
result = __i915_request_has_started(rq) && i915_request_is_active(rq);
|
||||
rcu_read_unlock();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq)
|
||||
return !list_empty(&rq->sched.link);
|
||||
}
|
||||
|
||||
static inline bool __i915_request_is_complete(const struct i915_request *rq)
|
||||
{
|
||||
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
|
||||
}
|
||||
|
||||
static inline bool i915_request_completed(const struct i915_request *rq)
|
||||
{
|
||||
bool result;
|
||||
|
||||
if (i915_request_signaled(rq))
|
||||
return true;
|
||||
|
||||
return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
|
||||
result = true;
|
||||
rcu_read_lock(); /* the HWSP may be freed at runtime */
|
||||
if (likely(!i915_request_signaled(rq)))
|
||||
result = __i915_request_is_complete(rq);
|
||||
rcu_read_unlock();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline void i915_request_mark_complete(struct i915_request *rq)
|
||||
|
Loading…
Reference in New Issue
Block a user