mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-02 00:24:12 +08:00
i915 fixes, and single amd and nouveau fix
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJahnE9AAoJEAx081l5xIa+io0QAJlHvqgoDwzgzEJ2ORyLEMkR GOg79Bq7Km3KEtrPNMYpVC/c0jsZHeaYIYdo0RhRhpeJRJtuudcqielyVsTGPbBL Hb7rE2XrcfT3ttmcAe/1OlwVhEyzn9+6DEMIQgY1nYmxBjTMCoeZisOkUSNZzMrk f5/x+4lGp4mSJ33ruKI01pq22DPDXC7rKobjr22OiZSSr1Gw9ZVvTBdPi/zU6izi vqNjzgdTnBfW2lOqB/MZneUioXUsagZ+GahCm0AxUYUIFbuUtwnu/dG7eljhjQZg pXLcsnssXn4+a9Q2uLZ1FSXBei7m6ye23UmRWW8omS45sfXPVsfeNcP9WHISE/Ln 7HDlIdU+KOvTLnq4dLbeWTPgK30cokXJ7SVFR+R3VyVB4hWVDLwZzXSMRnLlxUUT qc0+9txZDkTBo6COjRHt0B3Umw3n2NABa0cx8T44m/N2S0pNdgksxltZIq84GzbL btRIYf5LOqJeClzlT7X+XfSOGycEpeKul1NecfBHlW3UHI7EupN5X28hd7nLVM8E lGkljaOezFhSqBSDkR2dQueY04ZsdGUH+7ehh3cLoMpxlsj/CHeh7BNre0Lzs+LT 1q02X/YyeB/ErMCQypN1hBrRk5D5TSDBjAVb5UhyUClFONgakdnfGgvby8jQ6pqh WeuBV0kG31A8c/+OXjn+ =+PbR -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.16-rc2' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "One nouveau regression fix, one AMD quirk and a full set of i915 fixes. The i915 fixes are mostly for things caught by their CI system, main ones being DSI panel fixes and GEM fixes" * tag 'drm-fixes-for-v4.16-rc2' of git://people.freedesktop.org/~airlied/linux: drm/nouveau: Make clock gate support conditional drm/i915: Fix DSI panels with v1 MIPI sequences without a DEASSERT sequence v3 drm/i915: Free memdup-ed DSI VBT data structures on driver_unload drm/i915: Add intel_bios_cleanup() function drm/i915/vlv: Add cdclk workaround for DSI drm/i915/gvt: fix one typo of render_mmio trace drm/i915/gvt: Support BAR0 8-byte reads/writes drm/i915/gvt: add 0xe4f0 into gen9 render list drm/i915/pmu: Fix building without CONFIG_PM drm/i915/pmu: Fix sleep under atomic in RC6 readout drm/i915/pmu: Fix PMU enable vs execlists tasklet race drm/i915: Lock out execlist tasklet while peeking inside for busy-stats drm/i915/breadcrumbs: Ignore unsubmitted signalers drm/i915: Don't wake the device up to check if the engine is asleep drm/i915: Avoid truncation before clamping userspace's priority value drm/i915/perf: Fix compiler warning for string truncation drm/i915/perf: Fix compiler warning for string truncation drm/amdgpu: add new device to use atpx quirk
This commit is contained in:
commit
bad575394b
@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
|
|||||||
/* HG _PR3 doesn't seem to work on this A+A weston board */
|
/* HG _PR3 doesn't seem to work on this A+A weston board */
|
||||||
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||||
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||||
|
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||||
{ 0, 0, 0, 0, 0 },
|
{ 0, 0, 0, 0, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -733,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
|
|||||||
return ret == 0 ? count : ret;
|
return ret == 0 ? count : ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
|
||||||
|
{
|
||||||
|
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
|
||||||
|
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
|
||||||
|
struct intel_gvt *gvt = vgpu->gvt;
|
||||||
|
int offset;
|
||||||
|
|
||||||
|
/* Only allow MMIO GGTT entry access */
|
||||||
|
if (index != PCI_BASE_ADDRESS_0)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
|
||||||
|
intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
|
||||||
|
|
||||||
|
return (offset >= gvt->device_info.gtt_start_offset &&
|
||||||
|
offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
|
||||||
|
true : false;
|
||||||
|
}
|
||||||
|
|
||||||
static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
|
static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
|
||||||
size_t count, loff_t *ppos)
|
size_t count, loff_t *ppos)
|
||||||
{
|
{
|
||||||
@ -742,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
|
|||||||
while (count) {
|
while (count) {
|
||||||
size_t filled;
|
size_t filled;
|
||||||
|
|
||||||
if (count >= 4 && !(*ppos % 4)) {
|
/* Only support GGTT entry 8 bytes read */
|
||||||
|
if (count >= 8 && !(*ppos % 8) &&
|
||||||
|
gtt_entry(mdev, ppos)) {
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
|
||||||
|
ppos, false);
|
||||||
|
if (ret <= 0)
|
||||||
|
goto read_err;
|
||||||
|
|
||||||
|
if (copy_to_user(buf, &val, sizeof(val)))
|
||||||
|
goto read_err;
|
||||||
|
|
||||||
|
filled = 8;
|
||||||
|
} else if (count >= 4 && !(*ppos % 4)) {
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
|
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
|
||||||
@ -802,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
|
|||||||
while (count) {
|
while (count) {
|
||||||
size_t filled;
|
size_t filled;
|
||||||
|
|
||||||
if (count >= 4 && !(*ppos % 4)) {
|
/* Only support GGTT entry 8 bytes write */
|
||||||
|
if (count >= 8 && !(*ppos % 8) &&
|
||||||
|
gtt_entry(mdev, ppos)) {
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
if (copy_from_user(&val, buf, sizeof(val)))
|
||||||
|
goto write_err;
|
||||||
|
|
||||||
|
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
|
||||||
|
ppos, true);
|
||||||
|
if (ret <= 0)
|
||||||
|
goto write_err;
|
||||||
|
|
||||||
|
filled = 8;
|
||||||
|
} else if (count >= 4 && !(*ppos % 4)) {
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
if (copy_from_user(&val, buf, sizeof(val)))
|
if (copy_from_user(&val, buf, sizeof(val)))
|
||||||
|
@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
|||||||
{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
|
{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
|
||||||
{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
|
{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
|
||||||
{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
|
{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
|
||||||
|
{RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
|
||||||
{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
|
{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
|
||||||
{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
|
{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
|
||||||
{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
|
{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
|
||||||
|
@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio,
|
|||||||
TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
|
TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
|
||||||
unsigned int old_val, unsigned int new_val),
|
unsigned int old_val, unsigned int new_val),
|
||||||
|
|
||||||
TP_ARGS(old_id, new_id, action, reg, new_val, old_val),
|
TP_ARGS(old_id, new_id, action, reg, old_val, new_val),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(int, old_id)
|
__field(int, old_id)
|
||||||
|
@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev)
|
|||||||
|
|
||||||
intel_modeset_cleanup(dev);
|
intel_modeset_cleanup(dev);
|
||||||
|
|
||||||
/*
|
intel_bios_cleanup(dev_priv);
|
||||||
* free the memory space allocated for the child device
|
|
||||||
* config parsed from VBT
|
|
||||||
*/
|
|
||||||
if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
|
|
||||||
kfree(dev_priv->vbt.child_dev);
|
|
||||||
dev_priv->vbt.child_dev = NULL;
|
|
||||||
dev_priv->vbt.child_dev_num = 0;
|
|
||||||
}
|
|
||||||
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
|
|
||||||
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
|
|
||||||
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
|
|
||||||
dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
|
|
||||||
|
|
||||||
vga_switcheroo_unregister_client(pdev);
|
vga_switcheroo_unregister_client(pdev);
|
||||||
vga_client_register(pdev, NULL, NULL, NULL);
|
vga_client_register(pdev, NULL, NULL, NULL);
|
||||||
|
@ -1349,6 +1349,7 @@ struct intel_vbt_data {
|
|||||||
u32 size;
|
u32 size;
|
||||||
u8 *data;
|
u8 *data;
|
||||||
const u8 *sequence[MIPI_SEQ_MAX];
|
const u8 *sequence[MIPI_SEQ_MAX];
|
||||||
|
u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
|
||||||
} dsi;
|
} dsi;
|
||||||
|
|
||||||
int crt_ddc_pin;
|
int crt_ddc_pin;
|
||||||
@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
|
|||||||
|
|
||||||
/* intel_bios.c */
|
/* intel_bios.c */
|
||||||
void intel_bios_init(struct drm_i915_private *dev_priv);
|
void intel_bios_init(struct drm_i915_private *dev_priv);
|
||||||
|
void intel_bios_cleanup(struct drm_i915_private *dev_priv);
|
||||||
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
|
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
|
||||||
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
|
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
|
||||||
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
|
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
|
||||||
|
@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
case I915_CONTEXT_PARAM_PRIORITY:
|
case I915_CONTEXT_PARAM_PRIORITY:
|
||||||
{
|
{
|
||||||
int priority = args->value;
|
s64 priority = args->value;
|
||||||
|
|
||||||
if (args->size)
|
if (args->size)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
|||||||
void
|
void
|
||||||
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
|
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
strncpy(dev_priv->perf.oa.test_config.uuid,
|
strlcpy(dev_priv->perf.oa.test_config.uuid,
|
||||||
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
|
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
|
||||||
UUID_STRING_LEN);
|
sizeof(dev_priv->perf.oa.test_config.uuid));
|
||||||
dev_priv->perf.oa.test_config.id = 1;
|
dev_priv->perf.oa.test_config.id = 1;
|
||||||
|
|
||||||
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
|
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
|
||||||
|
@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
|||||||
void
|
void
|
||||||
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
|
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
|
||||||
{
|
{
|
||||||
strncpy(dev_priv->perf.oa.test_config.uuid,
|
strlcpy(dev_priv->perf.oa.test_config.uuid,
|
||||||
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
|
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
|
||||||
UUID_STRING_LEN);
|
sizeof(dev_priv->perf.oa.test_config.uuid));
|
||||||
dev_priv->perf.oa.test_config.id = 1;
|
dev_priv->perf.oa.test_config.id = 1;
|
||||||
|
|
||||||
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
|
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
|
||||||
|
@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915)
|
|||||||
return sum;
|
return sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void i915_pmu_event_destroy(struct perf_event *event)
|
static void engine_event_destroy(struct perf_event *event)
|
||||||
{
|
|
||||||
WARN_ON(event->parent);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int engine_event_init(struct perf_event *event)
|
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 =
|
struct drm_i915_private *i915 =
|
||||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||||
|
struct intel_engine_cs *engine;
|
||||||
|
|
||||||
if (!intel_engine_lookup_user(i915, engine_event_class(event),
|
engine = intel_engine_lookup_user(i915,
|
||||||
engine_event_instance(event)))
|
engine_event_class(event),
|
||||||
return -ENODEV;
|
engine_event_instance(event));
|
||||||
|
if (WARN_ON_ONCE(!engine))
|
||||||
|
return;
|
||||||
|
|
||||||
switch (engine_event_sample(event)) {
|
if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
|
||||||
|
intel_engine_supports_stats(engine))
|
||||||
|
intel_disable_engine_stats(engine);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void i915_pmu_event_destroy(struct perf_event *event)
|
||||||
|
{
|
||||||
|
WARN_ON(event->parent);
|
||||||
|
|
||||||
|
if (is_engine_event(event))
|
||||||
|
engine_event_destroy(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
engine_event_status(struct intel_engine_cs *engine,
|
||||||
|
enum drm_i915_pmu_engine_sample sample)
|
||||||
|
{
|
||||||
|
switch (sample) {
|
||||||
case I915_SAMPLE_BUSY:
|
case I915_SAMPLE_BUSY:
|
||||||
case I915_SAMPLE_WAIT:
|
case I915_SAMPLE_WAIT:
|
||||||
break;
|
break;
|
||||||
case I915_SAMPLE_SEMA:
|
case I915_SAMPLE_SEMA:
|
||||||
if (INTEL_GEN(i915) < 6)
|
if (INTEL_GEN(engine->i915) < 6)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int engine_event_init(struct perf_event *event)
|
||||||
|
{
|
||||||
|
struct drm_i915_private *i915 =
|
||||||
|
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||||
|
struct intel_engine_cs *engine;
|
||||||
|
u8 sample;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
engine = intel_engine_lookup_user(i915, engine_event_class(event),
|
||||||
|
engine_event_instance(event));
|
||||||
|
if (!engine)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
sample = engine_event_sample(event);
|
||||||
|
ret = engine_event_status(engine, sample);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
|
||||||
|
ret = intel_enable_engine_stats(engine);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static int i915_pmu_event_init(struct perf_event *event)
|
static int i915_pmu_event_init(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 =
|
struct drm_i915_private *i915 =
|
||||||
@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 __i915_pmu_event_read(struct perf_event *event)
|
static u64 __get_rc6(struct drm_i915_private *i915)
|
||||||
|
{
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
val = intel_rc6_residency_ns(i915,
|
||||||
|
IS_VALLEYVIEW(i915) ?
|
||||||
|
VLV_GT_RENDER_RC6 :
|
||||||
|
GEN6_GT_GFX_RC6);
|
||||||
|
|
||||||
|
if (HAS_RC6p(i915))
|
||||||
|
val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
|
||||||
|
|
||||||
|
if (HAS_RC6pp(i915))
|
||||||
|
val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
|
||||||
|
|
||||||
|
return val;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u64 get_rc6(struct drm_i915_private *i915, bool locked)
|
||||||
|
{
|
||||||
|
#if IS_ENABLED(CONFIG_PM)
|
||||||
|
unsigned long flags;
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
if (intel_runtime_pm_get_if_in_use(i915)) {
|
||||||
|
val = __get_rc6(i915);
|
||||||
|
intel_runtime_pm_put(i915);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we are coming back from being runtime suspended we must
|
||||||
|
* be careful not to report a larger value than returned
|
||||||
|
* previously.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (!locked)
|
||||||
|
spin_lock_irqsave(&i915->pmu.lock, flags);
|
||||||
|
|
||||||
|
if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
|
||||||
|
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
|
||||||
|
i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
|
||||||
|
} else {
|
||||||
|
val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!locked)
|
||||||
|
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||||
|
} else {
|
||||||
|
struct pci_dev *pdev = i915->drm.pdev;
|
||||||
|
struct device *kdev = &pdev->dev;
|
||||||
|
unsigned long flags2;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We are runtime suspended.
|
||||||
|
*
|
||||||
|
* Report the delta from when the device was suspended to now,
|
||||||
|
* on top of the last known real value, as the approximated RC6
|
||||||
|
* counter value.
|
||||||
|
*/
|
||||||
|
if (!locked)
|
||||||
|
spin_lock_irqsave(&i915->pmu.lock, flags);
|
||||||
|
|
||||||
|
spin_lock_irqsave(&kdev->power.lock, flags2);
|
||||||
|
|
||||||
|
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
|
||||||
|
i915->pmu.suspended_jiffies_last =
|
||||||
|
kdev->power.suspended_jiffies;
|
||||||
|
|
||||||
|
val = kdev->power.suspended_jiffies -
|
||||||
|
i915->pmu.suspended_jiffies_last;
|
||||||
|
val += jiffies - kdev->power.accounting_timestamp;
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&kdev->power.lock, flags2);
|
||||||
|
|
||||||
|
val = jiffies_to_nsecs(val);
|
||||||
|
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
|
||||||
|
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
|
||||||
|
|
||||||
|
if (!locked)
|
||||||
|
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
return val;
|
||||||
|
#else
|
||||||
|
return __get_rc6(i915);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 =
|
struct drm_i915_private *i915 =
|
||||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||||
@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
|
|||||||
if (WARN_ON_ONCE(!engine)) {
|
if (WARN_ON_ONCE(!engine)) {
|
||||||
/* Do nothing */
|
/* Do nothing */
|
||||||
} else if (sample == I915_SAMPLE_BUSY &&
|
} else if (sample == I915_SAMPLE_BUSY &&
|
||||||
engine->pmu.busy_stats) {
|
intel_engine_supports_stats(engine)) {
|
||||||
val = ktime_to_ns(intel_engine_get_busy_time(engine));
|
val = ktime_to_ns(intel_engine_get_busy_time(engine));
|
||||||
} else {
|
} else {
|
||||||
val = engine->pmu.sample[sample].cur;
|
val = engine->pmu.sample[sample].cur;
|
||||||
@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
|
|||||||
val = count_interrupts(i915);
|
val = count_interrupts(i915);
|
||||||
break;
|
break;
|
||||||
case I915_PMU_RC6_RESIDENCY:
|
case I915_PMU_RC6_RESIDENCY:
|
||||||
intel_runtime_pm_get(i915);
|
val = get_rc6(i915, locked);
|
||||||
val = intel_rc6_residency_ns(i915,
|
|
||||||
IS_VALLEYVIEW(i915) ?
|
|
||||||
VLV_GT_RENDER_RC6 :
|
|
||||||
GEN6_GT_GFX_RC6);
|
|
||||||
if (HAS_RC6p(i915))
|
|
||||||
val += intel_rc6_residency_ns(i915,
|
|
||||||
GEN6_GT_GFX_RC6p);
|
|
||||||
if (HAS_RC6pp(i915))
|
|
||||||
val += intel_rc6_residency_ns(i915,
|
|
||||||
GEN6_GT_GFX_RC6pp);
|
|
||||||
intel_runtime_pm_put(i915);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event)
|
|||||||
|
|
||||||
again:
|
again:
|
||||||
prev = local64_read(&hwc->prev_count);
|
prev = local64_read(&hwc->prev_count);
|
||||||
new = __i915_pmu_event_read(event);
|
new = __i915_pmu_event_read(event, false);
|
||||||
|
|
||||||
if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
|
if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
|
||||||
goto again;
|
goto again;
|
||||||
@ -442,12 +557,6 @@ again:
|
|||||||
local64_add(new - prev, &event->count);
|
local64_add(new - prev, &event->count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
|
|
||||||
{
|
|
||||||
return intel_engine_supports_stats(engine) &&
|
|
||||||
(engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void i915_pmu_enable(struct perf_event *event)
|
static void i915_pmu_enable(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 =
|
struct drm_i915_private *i915 =
|
||||||
@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event)
|
|||||||
|
|
||||||
GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
|
GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
|
||||||
GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
|
GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
|
||||||
if (engine->pmu.enable_count[sample]++ == 0) {
|
engine->pmu.enable_count[sample]++;
|
||||||
/*
|
|
||||||
* Enable engine busy stats tracking if needed or
|
|
||||||
* alternatively cancel the scheduled disable.
|
|
||||||
*
|
|
||||||
* If the delayed disable was pending, cancel it and
|
|
||||||
* in this case do not enable since it already is.
|
|
||||||
*/
|
|
||||||
if (engine_needs_busy_stats(engine) &&
|
|
||||||
!engine->pmu.busy_stats) {
|
|
||||||
engine->pmu.busy_stats = true;
|
|
||||||
if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
|
|
||||||
intel_enable_engine_stats(engine);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event)
|
|||||||
* for all listeners. Even when the event was already enabled and has
|
* for all listeners. Even when the event was already enabled and has
|
||||||
* an existing non-zero value.
|
* an existing non-zero value.
|
||||||
*/
|
*/
|
||||||
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
|
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
|
||||||
|
|
||||||
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __disable_busy_stats(struct work_struct *work)
|
|
||||||
{
|
|
||||||
struct intel_engine_cs *engine =
|
|
||||||
container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
|
|
||||||
|
|
||||||
intel_disable_engine_stats(engine);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void i915_pmu_disable(struct perf_event *event)
|
static void i915_pmu_disable(struct perf_event *event)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *i915 =
|
struct drm_i915_private *i915 =
|
||||||
@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event)
|
|||||||
* Decrement the reference count and clear the enabled
|
* Decrement the reference count and clear the enabled
|
||||||
* bitmask when the last listener on an event goes away.
|
* bitmask when the last listener on an event goes away.
|
||||||
*/
|
*/
|
||||||
if (--engine->pmu.enable_count[sample] == 0) {
|
if (--engine->pmu.enable_count[sample] == 0)
|
||||||
engine->pmu.enable &= ~BIT(sample);
|
engine->pmu.enable &= ~BIT(sample);
|
||||||
if (!engine_needs_busy_stats(engine) &&
|
|
||||||
engine->pmu.busy_stats) {
|
|
||||||
engine->pmu.busy_stats = false;
|
|
||||||
/*
|
|
||||||
* We request a delayed disable to handle the
|
|
||||||
* rapid on/off cycles on events, which can
|
|
||||||
* happen when tools like perf stat start, in a
|
|
||||||
* nicer way.
|
|
||||||
*
|
|
||||||
* In addition, this also helps with busy stats
|
|
||||||
* accuracy with background CPU offline/online
|
|
||||||
* migration events.
|
|
||||||
*/
|
|
||||||
queue_delayed_work(system_wq,
|
|
||||||
&engine->pmu.disable_busy_stats,
|
|
||||||
round_jiffies_up_relative(HZ));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
|
GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
|
||||||
@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
|
|||||||
|
|
||||||
void i915_pmu_register(struct drm_i915_private *i915)
|
void i915_pmu_register(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
|
||||||
enum intel_engine_id id;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (INTEL_GEN(i915) <= 2) {
|
if (INTEL_GEN(i915) <= 2) {
|
||||||
@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||||||
hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||||
i915->pmu.timer.function = i915_sample;
|
i915->pmu.timer.function = i915_sample;
|
||||||
|
|
||||||
for_each_engine(engine, i915, id)
|
|
||||||
INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
|
|
||||||
__disable_busy_stats);
|
|
||||||
|
|
||||||
ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
|
ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
@ -843,9 +906,6 @@ err:
|
|||||||
|
|
||||||
void i915_pmu_unregister(struct drm_i915_private *i915)
|
void i915_pmu_unregister(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
struct intel_engine_cs *engine;
|
|
||||||
enum intel_engine_id id;
|
|
||||||
|
|
||||||
if (!i915->pmu.base.event_init)
|
if (!i915->pmu.base.event_init)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
|
|||||||
|
|
||||||
hrtimer_cancel(&i915->pmu.timer);
|
hrtimer_cancel(&i915->pmu.timer);
|
||||||
|
|
||||||
for_each_engine(engine, i915, id) {
|
|
||||||
GEM_BUG_ON(engine->pmu.busy_stats);
|
|
||||||
flush_delayed_work(&engine->pmu.disable_busy_stats);
|
|
||||||
}
|
|
||||||
|
|
||||||
i915_pmu_unregister_cpuhp_state(i915);
|
i915_pmu_unregister_cpuhp_state(i915);
|
||||||
|
|
||||||
perf_pmu_unregister(&i915->pmu.base);
|
perf_pmu_unregister(&i915->pmu.base);
|
||||||
|
@ -27,6 +27,8 @@
|
|||||||
enum {
|
enum {
|
||||||
__I915_SAMPLE_FREQ_ACT = 0,
|
__I915_SAMPLE_FREQ_ACT = 0,
|
||||||
__I915_SAMPLE_FREQ_REQ,
|
__I915_SAMPLE_FREQ_REQ,
|
||||||
|
__I915_SAMPLE_RC6,
|
||||||
|
__I915_SAMPLE_RC6_ESTIMATED,
|
||||||
__I915_NUM_PMU_SAMPLERS
|
__I915_NUM_PMU_SAMPLERS
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -94,6 +96,10 @@ struct i915_pmu {
|
|||||||
* struct intel_engine_cs.
|
* struct intel_engine_cs.
|
||||||
*/
|
*/
|
||||||
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
|
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
|
||||||
|
/**
|
||||||
|
* @suspended_jiffies_last: Cached suspend time from PM core.
|
||||||
|
*/
|
||||||
|
unsigned long suspended_jiffies_last;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_PERF_EVENTS
|
#ifdef CONFIG_PERF_EVENTS
|
||||||
|
@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
|
||||||
|
* skip all delay + gpio operands and stop at the first DSI packet op.
|
||||||
|
*/
|
||||||
|
static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
|
||||||
|
int index, len;
|
||||||
|
|
||||||
|
if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* index = 1 to skip sequence byte */
|
||||||
|
for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
|
||||||
|
switch (data[index]) {
|
||||||
|
case MIPI_SEQ_ELEM_SEND_PKT:
|
||||||
|
return index == 1 ? 0 : index;
|
||||||
|
case MIPI_SEQ_ELEM_DELAY:
|
||||||
|
len = 5; /* 1 byte for operand + uint32 */
|
||||||
|
break;
|
||||||
|
case MIPI_SEQ_ELEM_GPIO:
|
||||||
|
len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
|
||||||
|
* The deassert must be done before calling intel_dsi_device_ready, so for
|
||||||
|
* these devices we split the init OTP sequence into a deassert sequence and
|
||||||
|
* the actual init OTP part.
|
||||||
|
*/
|
||||||
|
static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
u8 *init_otp;
|
||||||
|
int len;
|
||||||
|
|
||||||
|
/* Limit this to VLV for now. */
|
||||||
|
if (!IS_VALLEYVIEW(dev_priv))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Limit this to v1 vid-mode sequences */
|
||||||
|
if (dev_priv->vbt.dsi.config->is_cmd_mode ||
|
||||||
|
dev_priv->vbt.dsi.seq_version != 1)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Only do this if there are otp and assert seqs and no deassert seq */
|
||||||
|
if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
|
||||||
|
!dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
|
||||||
|
dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* The deassert-sequence ends at the first DSI packet */
|
||||||
|
len = get_init_otp_deassert_fragment_len(dev_priv);
|
||||||
|
if (!len)
|
||||||
|
return;
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
|
||||||
|
|
||||||
|
/* Copy the fragment, update seq byte and terminate it */
|
||||||
|
init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
|
||||||
|
dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
|
||||||
|
if (!dev_priv->vbt.dsi.deassert_seq)
|
||||||
|
return;
|
||||||
|
dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
|
||||||
|
dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
|
||||||
|
/* Use the copy for deassert */
|
||||||
|
dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
|
||||||
|
dev_priv->vbt.dsi.deassert_seq;
|
||||||
|
/* Replace the last byte of the fragment with init OTP seq byte */
|
||||||
|
init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
|
||||||
|
/* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
|
||||||
|
dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
parse_mipi_sequence(struct drm_i915_private *dev_priv,
|
parse_mipi_sequence(struct drm_i915_private *dev_priv,
|
||||||
const struct bdb_header *bdb)
|
const struct bdb_header *bdb)
|
||||||
@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
|
|||||||
dev_priv->vbt.dsi.size = seq_size;
|
dev_priv->vbt.dsi.size = seq_size;
|
||||||
dev_priv->vbt.dsi.seq_version = sequence->version;
|
dev_priv->vbt.dsi.seq_version = sequence->version;
|
||||||
|
|
||||||
|
fixup_mipi_sequences(dev_priv);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
|
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -1588,6 +1670,29 @@ out:
|
|||||||
pci_unmap_rom(pdev, bios);
|
pci_unmap_rom(pdev, bios);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_bios_cleanup - Free any resources allocated by intel_bios_init()
|
||||||
|
* @dev_priv: i915 device instance
|
||||||
|
*/
|
||||||
|
void intel_bios_cleanup(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
kfree(dev_priv->vbt.child_dev);
|
||||||
|
dev_priv->vbt.child_dev = NULL;
|
||||||
|
dev_priv->vbt.child_dev_num = 0;
|
||||||
|
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
|
||||||
|
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
|
||||||
|
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
|
||||||
|
dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
|
||||||
|
kfree(dev_priv->vbt.dsi.data);
|
||||||
|
dev_priv->vbt.dsi.data = NULL;
|
||||||
|
kfree(dev_priv->vbt.dsi.pps);
|
||||||
|
dev_priv->vbt.dsi.pps = NULL;
|
||||||
|
kfree(dev_priv->vbt.dsi.config);
|
||||||
|
dev_priv->vbt.dsi.config = NULL;
|
||||||
|
kfree(dev_priv->vbt.dsi.deassert_seq);
|
||||||
|
dev_priv->vbt.dsi.deassert_seq = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_bios_is_tv_present - is integrated TV present in VBT
|
* intel_bios_is_tv_present - is integrated TV present in VBT
|
||||||
* @dev_priv: i915 device instance
|
* @dev_priv: i915 device instance
|
||||||
|
@ -594,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
|
|||||||
spin_unlock_irq(&b->rb_lock);
|
spin_unlock_irq(&b->rb_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool signal_valid(const struct drm_i915_gem_request *request)
|
|
||||||
{
|
|
||||||
return intel_wait_check_request(&request->signaling.wait, request);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool signal_complete(const struct drm_i915_gem_request *request)
|
static bool signal_complete(const struct drm_i915_gem_request *request)
|
||||||
{
|
{
|
||||||
if (!request)
|
if (!request)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* If another process served as the bottom-half it may have already
|
/*
|
||||||
* signalled that this wait is already completed.
|
* Carefully check if the request is complete, giving time for the
|
||||||
*/
|
|
||||||
if (intel_wait_complete(&request->signaling.wait))
|
|
||||||
return signal_valid(request);
|
|
||||||
|
|
||||||
/* Carefully check if the request is complete, giving time for the
|
|
||||||
* seqno to be visible or if the GPU hung.
|
* seqno to be visible or if the GPU hung.
|
||||||
*/
|
*/
|
||||||
if (__i915_request_irq_complete(request))
|
return __i915_request_irq_complete(request);
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
|
static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
|
||||||
@ -659,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg)
|
|||||||
request = i915_gem_request_get_rcu(request);
|
request = i915_gem_request_get_rcu(request);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
if (signal_complete(request)) {
|
if (signal_complete(request)) {
|
||||||
local_bh_disable();
|
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||||
dma_fence_signal(&request->fence);
|
&request->fence.flags)) {
|
||||||
local_bh_enable(); /* kick start the tasklets */
|
local_bh_disable();
|
||||||
|
dma_fence_signal(&request->fence);
|
||||||
|
GEM_BUG_ON(!i915_gem_request_completed(request));
|
||||||
|
local_bh_enable(); /* kick start the tasklets */
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_irq(&b->rb_lock);
|
spin_lock_irq(&b->rb_lock);
|
||||||
|
|
||||||
|
@ -1952,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
|
|||||||
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
|
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
|
||||||
min_cdclk = max(2 * 96000, min_cdclk);
|
min_cdclk = max(2 * 96000, min_cdclk);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
|
||||||
|
* than 320000KHz.
|
||||||
|
*/
|
||||||
|
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
|
||||||
|
IS_VALLEYVIEW(dev_priv))
|
||||||
|
min_cdclk = max(320000, min_cdclk);
|
||||||
|
|
||||||
if (min_cdclk > dev_priv->max_cdclk_freq) {
|
if (min_cdclk > dev_priv->max_cdclk_freq) {
|
||||||
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
|
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
|
||||||
min_cdclk, dev_priv->max_cdclk_freq);
|
min_cdclk, dev_priv->max_cdclk_freq);
|
||||||
|
@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
|||||||
struct drm_i915_private *dev_priv = engine->i915;
|
struct drm_i915_private *dev_priv = engine->i915;
|
||||||
bool idle = true;
|
bool idle = true;
|
||||||
|
|
||||||
intel_runtime_pm_get(dev_priv);
|
/* If the whole device is asleep, the engine must be idle */
|
||||||
|
if (!intel_runtime_pm_get_if_in_use(dev_priv))
|
||||||
|
return true;
|
||||||
|
|
||||||
/* First check that no commands are left in the ring */
|
/* First check that no commands are left in the ring */
|
||||||
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
|
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
|
||||||
@ -1943,16 +1945,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
|
|||||||
*/
|
*/
|
||||||
int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
||||||
{
|
{
|
||||||
|
struct intel_engine_execlists *execlists = &engine->execlists;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
if (!intel_engine_supports_stats(engine))
|
if (!intel_engine_supports_stats(engine))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
tasklet_disable(&execlists->tasklet);
|
||||||
spin_lock_irqsave(&engine->stats.lock, flags);
|
spin_lock_irqsave(&engine->stats.lock, flags);
|
||||||
if (engine->stats.enabled == ~0)
|
|
||||||
goto busy;
|
if (unlikely(engine->stats.enabled == ~0)) {
|
||||||
|
err = -EBUSY;
|
||||||
|
goto unlock;
|
||||||
|
}
|
||||||
|
|
||||||
if (engine->stats.enabled++ == 0) {
|
if (engine->stats.enabled++ == 0) {
|
||||||
struct intel_engine_execlists *execlists = &engine->execlists;
|
|
||||||
const struct execlist_port *port = execlists->port;
|
const struct execlist_port *port = execlists->port;
|
||||||
unsigned int num_ports = execlists_num_ports(execlists);
|
unsigned int num_ports = execlists_num_ports(execlists);
|
||||||
|
|
||||||
@ -1967,14 +1975,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
|||||||
if (engine->stats.active)
|
if (engine->stats.active)
|
||||||
engine->stats.start = engine->stats.enabled_at;
|
engine->stats.start = engine->stats.enabled_at;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unlock:
|
||||||
spin_unlock_irqrestore(&engine->stats.lock, flags);
|
spin_unlock_irqrestore(&engine->stats.lock, flags);
|
||||||
|
tasklet_enable(&execlists->tasklet);
|
||||||
|
|
||||||
return 0;
|
return err;
|
||||||
|
|
||||||
busy:
|
|
||||||
spin_unlock_irqrestore(&engine->stats.lock, flags);
|
|
||||||
|
|
||||||
return -EBUSY;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
|
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
|
||||||
|
@ -366,20 +366,6 @@ struct intel_engine_cs {
|
|||||||
*/
|
*/
|
||||||
#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
|
#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
|
||||||
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
|
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
|
||||||
/**
|
|
||||||
* @busy_stats: Has enablement of engine stats tracking been
|
|
||||||
* requested.
|
|
||||||
*/
|
|
||||||
bool busy_stats;
|
|
||||||
/**
|
|
||||||
* @disable_busy_stats: Work item for busy stats disabling.
|
|
||||||
*
|
|
||||||
* Same as with @enable_busy_stats action, with the difference
|
|
||||||
* that we delay it in case there are rapid enable-disable
|
|
||||||
* actions, which can happen during tool startup (like perf
|
|
||||||
* stat).
|
|
||||||
*/
|
|
||||||
struct delayed_work disable_busy_stats;
|
|
||||||
} pmu;
|
} pmu;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
|
|||||||
void
|
void
|
||||||
nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
|
nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
|
||||||
{
|
{
|
||||||
if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
|
if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nvkm_debug(&therm->subdev,
|
nvkm_debug(&therm->subdev,
|
||||||
@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
|
|||||||
void
|
void
|
||||||
nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
|
nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
|
||||||
{
|
{
|
||||||
if (!therm->func->clkgate_fini || !therm->clkgating_enabled)
|
if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
nvkm_debug(&therm->subdev,
|
nvkm_debug(&therm->subdev,
|
||||||
@ -395,7 +395,7 @@ void
|
|||||||
nvkm_therm_clkgate_init(struct nvkm_therm *therm,
|
nvkm_therm_clkgate_init(struct nvkm_therm *therm,
|
||||||
const struct nvkm_therm_clkgate_pack *p)
|
const struct nvkm_therm_clkgate_pack *p)
|
||||||
{
|
{
|
||||||
if (!therm->func->clkgate_init || !therm->clkgating_enabled)
|
if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
therm->func->clkgate_init(therm, p);
|
therm->func->clkgate_init(therm, p);
|
||||||
|
Loading…
Reference in New Issue
Block a user