mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 02:04:05 +08:00
Merge tag 'drm-intel-next-2015-10-10' of git://anongit.freedesktop.org/drm-intel into drm-next
- dmc fixes from Animesh (not yet all) for deeper sleep states - piles of prep patches from Ville to make mmio functions type-safe - more fbc work from Paulo all over - w/a shuffling from Arun Siluvery - first part of atomic watermark updates from Matt and Ville (later parts had to be dropped again unfortunately) - lots of patches to prepare bxt dsi support ( Shashank Sharma) - userptr fixes from Chris - audio rate interface between i915/snd_hda plus kerneldoc (Libin Yang) - shrinker improvements and fixes (Chris Wilson) - lots and lots of small patches all over * tag 'drm-intel-next-2015-10-10' of git://anongit.freedesktop.org/drm-intel: (134 commits) drm/i915: Update DRIVER_DATE to 20151010 drm/i915: Partial revert of atomic watermark series drm/i915: Early exit from semaphore_waits_for for execlist mode. drm/i915: Remove wrong warning from i915_gem_context_clean drm/i915: Determine the stolen memory base address on gen2 drm/i915: fix FBC buffer size checks drm/i915: fix CFB size calculation drm/i915: remove pre-atomic check from SKL update_primary_plane drm/i915: don't allocate fbcon from stolen memory if it's too big Revert "drm/i915: Call encoder hotplug for init and resume cases" Revert "drm/i915: Add hot_plug hook for hdmi encoder" drm/i915: use error path drm/i915/irq: Fix misspelled word register in kernel-doc drm/i915/irq: Fix kernel-doc warnings drm/i915: Hook up ring workaround writes at context creation time on Gen6-7. drm/i915: Don't warn if the workaround list is empty. drm/i915: Resurrect golden context on gen6/7 drm/i915/chv: remove pre-production hardware workarounds drm/i915/snb: remove pre-production hardware workaround drm/i915/bxt: Set time interval unit to 0.833us ...
This commit is contained in:
commit
2dd3a88ac8
@ -3989,6 +3989,7 @@ int num_ioctls;</synopsis>
|
||||
<title>High Definition Audio</title>
|
||||
!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
|
||||
!Idrivers/gpu/drm/i915/intel_audio.c
|
||||
!Iinclude/drm/i915_component.h
|
||||
</sect2>
|
||||
<sect2>
|
||||
<title>Panel Self Refresh PSR (PSR/SRD)</title>
|
||||
|
@ -94,8 +94,8 @@ struct intel_dvo_dev_ops {
|
||||
* after this function is called.
|
||||
*/
|
||||
void (*mode_set)(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_display_mode *adjusted_mode);
|
||||
|
||||
/*
|
||||
* Probe for a connected output, and return detect_status.
|
||||
|
@ -255,8 +255,8 @@ static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,
|
||||
}
|
||||
|
||||
static void ch7017_mode_set(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
uint8_t lvds_pll_feedback_div, lvds_pll_vco_control;
|
||||
uint8_t outputs_enable, lvds_control_2, lvds_power_down;
|
||||
|
@ -275,8 +275,8 @@ static enum drm_mode_status ch7xxx_mode_valid(struct intel_dvo_device *dvo,
|
||||
}
|
||||
|
||||
static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
uint8_t tvco, tpcp, tpd, tlpf, idf;
|
||||
|
||||
|
@ -394,8 +394,8 @@ static bool ivch_get_hw_state(struct intel_dvo_device *dvo)
|
||||
}
|
||||
|
||||
static void ivch_mode_set(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct ivch_priv *priv = dvo->dev_priv;
|
||||
uint16_t vr40 = 0;
|
||||
@ -414,16 +414,16 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
|
||||
vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE |
|
||||
VR40_HORIZONTAL_INTERP_ENABLE);
|
||||
|
||||
if (mode->hdisplay != adjusted_mode->hdisplay ||
|
||||
mode->vdisplay != adjusted_mode->vdisplay) {
|
||||
if (mode->hdisplay != adjusted_mode->crtc_hdisplay ||
|
||||
mode->vdisplay != adjusted_mode->crtc_vdisplay) {
|
||||
uint16_t x_ratio, y_ratio;
|
||||
|
||||
vr01 |= VR01_PANEL_FIT_ENABLE;
|
||||
vr40 |= VR40_CLOCK_GATING_ENABLE;
|
||||
x_ratio = (((mode->hdisplay - 1) << 16) /
|
||||
(adjusted_mode->hdisplay - 1)) >> 2;
|
||||
(adjusted_mode->crtc_hdisplay - 1)) >> 2;
|
||||
y_ratio = (((mode->vdisplay - 1) << 16) /
|
||||
(adjusted_mode->vdisplay - 1)) >> 2;
|
||||
(adjusted_mode->crtc_vdisplay - 1)) >> 2;
|
||||
ivch_write(dvo, VR42, x_ratio);
|
||||
ivch_write(dvo, VR41, y_ratio);
|
||||
} else {
|
||||
|
@ -546,8 +546,8 @@ static enum drm_mode_status ns2501_mode_valid(struct intel_dvo_device *dvo,
|
||||
}
|
||||
|
||||
static void ns2501_mode_set(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
const struct ns2501_configuration *conf;
|
||||
struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
|
||||
|
@ -190,8 +190,8 @@ static enum drm_mode_status sil164_mode_valid(struct intel_dvo_device *dvo,
|
||||
}
|
||||
|
||||
static void sil164_mode_set(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
/* As long as the basics are set up, since we don't have clock
|
||||
* dependencies in the mode setup, we can just leave the
|
||||
|
@ -222,8 +222,8 @@ static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo,
|
||||
}
|
||||
|
||||
static void tfp410_mode_set(struct intel_dvo_device *dvo,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *mode,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
/* As long as the basics are set up, since we don't have clock dependencies
|
||||
* in the mode setup, we can just leave the registers alone and everything
|
||||
|
@ -448,6 +448,9 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
|
||||
REG32(GEN7_3DPRIM_INSTANCE_COUNT),
|
||||
REG32(GEN7_3DPRIM_START_INSTANCE),
|
||||
REG32(GEN7_3DPRIM_BASE_VERTEX),
|
||||
REG32(GEN7_GPGPU_DISPATCHDIMX),
|
||||
REG32(GEN7_GPGPU_DISPATCHDIMY),
|
||||
REG32(GEN7_GPGPU_DISPATCHDIMZ),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)),
|
||||
REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)),
|
||||
@ -1214,6 +1217,7 @@ int i915_cmd_parser_get_version(void)
|
||||
* MI_PREDICATE_SRC1 registers.
|
||||
* 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
|
||||
* 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
|
||||
* 5. GPGPU dispatch compute indirect registers.
|
||||
*/
|
||||
return 4;
|
||||
return 5;
|
||||
}
|
||||
|
@ -253,7 +253,11 @@ static int obj_rank_by_stolen(void *priv,
|
||||
struct drm_i915_gem_object *b =
|
||||
container_of(B, struct drm_i915_gem_object, obj_exec_link);
|
||||
|
||||
return a->stolen->start - b->stolen->start;
|
||||
if (a->stolen->start < b->stolen->start)
|
||||
return -1;
|
||||
if (a->stolen->start > b->stolen->start)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
|
||||
@ -1308,6 +1312,10 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
seq_puts(m, "no P-state info available\n");
|
||||
}
|
||||
|
||||
seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
|
||||
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
|
||||
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return ret;
|
||||
@ -2230,10 +2238,9 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
|
||||
for_each_ring(ring, dev_priv, unused) {
|
||||
seq_printf(m, "%s\n", ring->name);
|
||||
for (i = 0; i < 4; i++) {
|
||||
u32 offset = 0x270 + i * 8;
|
||||
u64 pdp = I915_READ(ring->mmio_base + offset + 4);
|
||||
u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i));
|
||||
pdp <<= 32;
|
||||
pdp |= I915_READ(ring->mmio_base + offset);
|
||||
pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i));
|
||||
seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
|
||||
}
|
||||
}
|
||||
@ -2290,18 +2297,21 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
|
||||
struct task_struct *task;
|
||||
|
||||
task = get_pid_task(file->pid, PIDTYPE_PID);
|
||||
if (!task)
|
||||
return -ESRCH;
|
||||
if (!task) {
|
||||
ret = -ESRCH;
|
||||
goto out_put;
|
||||
}
|
||||
seq_printf(m, "\nproc: %s\n", task->comm);
|
||||
put_task_struct(task);
|
||||
idr_for_each(&file_priv->context_idr, per_file_ctx,
|
||||
(void *)(unsigned long)m);
|
||||
}
|
||||
|
||||
out_put:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int count_irq_waiters(struct drm_i915_private *i915)
|
||||
@ -2909,7 +2919,7 @@ static bool cursor_active(struct drm_device *dev, int pipe)
|
||||
u32 state;
|
||||
|
||||
if (IS_845G(dev) || IS_I865G(dev))
|
||||
state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
|
||||
state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
|
||||
else
|
||||
state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
|
||||
|
||||
@ -3147,7 +3157,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
|
||||
skl_ddb_entry_size(entry));
|
||||
}
|
||||
|
||||
entry = &ddb->cursor[pipe];
|
||||
entry = &ddb->plane[pipe][PLANE_CURSOR];
|
||||
seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
|
||||
entry->end, skl_ddb_entry_size(entry));
|
||||
}
|
||||
@ -5040,13 +5050,38 @@ static void gen9_sseu_device_status(struct drm_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static void broadwell_sseu_device_status(struct drm_device *dev,
|
||||
struct sseu_dev_status *stat)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int s;
|
||||
u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
|
||||
|
||||
stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK);
|
||||
|
||||
if (stat->slice_total) {
|
||||
stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice;
|
||||
stat->subslice_total = stat->slice_total *
|
||||
stat->subslice_per_slice;
|
||||
stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice;
|
||||
stat->eu_total = stat->eu_per_subslice * stat->subslice_total;
|
||||
|
||||
/* subtract fused off EU(s) from enabled slice(s) */
|
||||
for (s = 0; s < stat->slice_total; s++) {
|
||||
u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s];
|
||||
|
||||
stat->eu_total -= hweight8(subslice_7eu);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int i915_sseu_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct sseu_dev_status stat;
|
||||
|
||||
if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev))
|
||||
if (INTEL_INFO(dev)->gen < 8)
|
||||
return -ENODEV;
|
||||
|
||||
seq_puts(m, "SSEU Device Info\n");
|
||||
@ -5071,6 +5106,8 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
|
||||
memset(&stat, 0, sizeof(stat));
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
cherryview_sseu_device_status(dev, &stat);
|
||||
} else if (IS_BROADWELL(dev)) {
|
||||
broadwell_sseu_device_status(dev, &stat);
|
||||
} else if (INTEL_INFO(dev)->gen >= 9) {
|
||||
gen9_sseu_device_status(dev, &stat);
|
||||
}
|
||||
|
@ -673,6 +673,82 @@ static void gen9_sseu_info_init(struct drm_device *dev)
|
||||
info->has_eu_pg = (info->eu_per_subslice > 2);
|
||||
}
|
||||
|
||||
static void broadwell_sseu_info_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_device_info *info;
|
||||
const int s_max = 3, ss_max = 3, eu_max = 8;
|
||||
int s, ss;
|
||||
u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
|
||||
|
||||
fuse2 = I915_READ(GEN8_FUSE2);
|
||||
s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
|
||||
ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
|
||||
|
||||
eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
|
||||
eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
|
||||
((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
|
||||
(32 - GEN8_EU_DIS0_S1_SHIFT));
|
||||
eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
|
||||
((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
|
||||
(32 - GEN8_EU_DIS1_S2_SHIFT));
|
||||
|
||||
|
||||
info = (struct intel_device_info *)&dev_priv->info;
|
||||
info->slice_total = hweight32(s_enable);
|
||||
|
||||
/*
|
||||
* The subslice disable field is global, i.e. it applies
|
||||
* to each of the enabled slices.
|
||||
*/
|
||||
info->subslice_per_slice = ss_max - hweight32(ss_disable);
|
||||
info->subslice_total = info->slice_total * info->subslice_per_slice;
|
||||
|
||||
/*
|
||||
* Iterate through enabled slices and subslices to
|
||||
* count the total enabled EU.
|
||||
*/
|
||||
for (s = 0; s < s_max; s++) {
|
||||
if (!(s_enable & (0x1 << s)))
|
||||
/* skip disabled slice */
|
||||
continue;
|
||||
|
||||
for (ss = 0; ss < ss_max; ss++) {
|
||||
u32 n_disabled;
|
||||
|
||||
if (ss_disable & (0x1 << ss))
|
||||
/* skip disabled subslice */
|
||||
continue;
|
||||
|
||||
n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
|
||||
|
||||
/*
|
||||
* Record which subslices have 7 EUs.
|
||||
*/
|
||||
if (eu_max - n_disabled == 7)
|
||||
info->subslice_7eu[s] |= 1 << ss;
|
||||
|
||||
info->eu_total += eu_max - n_disabled;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* BDW is expected to always have a uniform distribution of EU across
|
||||
* subslices with the exception that any one EU in any one subslice may
|
||||
* be fused off for die recovery.
|
||||
*/
|
||||
info->eu_per_subslice = info->subslice_total ?
|
||||
DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
|
||||
|
||||
/*
|
||||
* BDW supports slice power gating on devices with more than
|
||||
* one slice.
|
||||
*/
|
||||
info->has_slice_pg = (info->slice_total > 1);
|
||||
info->has_subslice_pg = 0;
|
||||
info->has_eu_pg = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine various intel_device_info fields at runtime.
|
||||
*
|
||||
@ -743,6 +819,8 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
||||
/* Initialize slice/subslice/EU info */
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
cherryview_sseu_info_init(dev);
|
||||
else if (IS_BROADWELL(dev))
|
||||
broadwell_sseu_info_init(dev);
|
||||
else if (INTEL_INFO(dev)->gen >= 9)
|
||||
gen9_sseu_info_init(dev);
|
||||
|
||||
@ -818,6 +896,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
mutex_init(&dev_priv->sb_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->csr_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
|
||||
intel_pm_setup(dev);
|
||||
|
||||
@ -1045,12 +1124,9 @@ out_freecsr:
|
||||
put_bridge:
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
free_priv:
|
||||
if (dev_priv->requests)
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
if (dev_priv->vmas)
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
if (dev_priv->objects)
|
||||
kmem_cache_destroy(dev_priv->objects);
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
kmem_cache_destroy(dev_priv->objects);
|
||||
kfree(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
@ -1141,13 +1217,9 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
if (dev_priv->regs != NULL)
|
||||
pci_iounmap(dev->pdev, dev_priv->regs);
|
||||
|
||||
if (dev_priv->requests)
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
if (dev_priv->vmas)
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
if (dev_priv->objects)
|
||||
kmem_cache_destroy(dev_priv->objects);
|
||||
|
||||
kmem_cache_destroy(dev_priv->requests);
|
||||
kmem_cache_destroy(dev_priv->vmas);
|
||||
kmem_cache_destroy(dev_priv->objects);
|
||||
pci_dev_put(dev_priv->bridge_dev);
|
||||
kfree(dev_priv);
|
||||
|
||||
|
@ -443,6 +443,34 @@ static const struct pci_device_id pciidlist[] = { /* aka */
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
||||
static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
|
||||
{
|
||||
enum intel_pch ret = PCH_NOP;
|
||||
|
||||
/*
|
||||
* In a virtualized passthrough environment we can be in a
|
||||
* setup where the ISA bridge is not able to be passed through.
|
||||
* In this case, a south bridge can be emulated and we have to
|
||||
* make an educated guess as to which PCH is really there.
|
||||
*/
|
||||
|
||||
if (IS_GEN5(dev)) {
|
||||
ret = PCH_IBX;
|
||||
DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n");
|
||||
} else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
|
||||
ret = PCH_CPT;
|
||||
DRM_DEBUG_KMS("Assuming CouarPoint PCH\n");
|
||||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
ret = PCH_LPT;
|
||||
DRM_DEBUG_KMS("Assuming LynxPoint PCH\n");
|
||||
} else if (IS_SKYLAKE(dev)) {
|
||||
ret = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_detect_pch(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -503,6 +531,8 @@ void intel_detect_pch(struct drm_device *dev)
|
||||
dev_priv->pch_type = PCH_SPT;
|
||||
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
|
||||
WARN_ON(!IS_SKYLAKE(dev));
|
||||
} else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) {
|
||||
dev_priv->pch_type = intel_virt_detect_pch(dev);
|
||||
} else
|
||||
continue;
|
||||
|
||||
@ -608,6 +638,8 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||
return error;
|
||||
}
|
||||
|
||||
intel_guc_suspend(dev);
|
||||
|
||||
intel_suspend_gt_powersave(dev);
|
||||
|
||||
/*
|
||||
@ -737,6 +769,8 @@ static int i915_drm_resume(struct drm_device *dev)
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_guc_resume(dev);
|
||||
|
||||
intel_modeset_init_hw(dev);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
@ -1021,12 +1055,6 @@ static int skl_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Enabling DC6 is not a hard requirement to enter runtime D3 */
|
||||
|
||||
/*
|
||||
* This is to ensure that CSR isn't identified as loaded before
|
||||
* CSR-loading program is called during runtime-resume.
|
||||
*/
|
||||
intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED);
|
||||
|
||||
skl_uninit_cdclk(dev_priv);
|
||||
|
||||
return 0;
|
||||
@ -1476,6 +1504,8 @@ static int intel_runtime_suspend(struct device *device)
|
||||
i915_gem_release_all_mmaps(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
intel_guc_suspend(dev);
|
||||
|
||||
intel_suspend_gt_powersave(dev);
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
|
||||
@ -1535,6 +1565,8 @@ static int intel_runtime_resume(struct device *device)
|
||||
intel_opregion_notify_adapter(dev, PCI_D0);
|
||||
dev_priv->pm.suspended = false;
|
||||
|
||||
intel_guc_resume(dev);
|
||||
|
||||
if (IS_GEN6(dev_priv))
|
||||
intel_init_pch_refclk(dev);
|
||||
|
||||
|
@ -57,7 +57,7 @@
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20150928"
|
||||
#define DRIVER_DATE "20151010"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
@ -131,17 +131,17 @@ enum transcoder {
|
||||
#define transcoder_name(t) ((t) + 'A')
|
||||
|
||||
/*
|
||||
* This is the maximum (across all platforms) number of planes (primary +
|
||||
* sprites) that can be active at the same time on one pipe.
|
||||
*
|
||||
* This value doesn't count the cursor plane.
|
||||
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
|
||||
* number of planes per CRTC. Not all platforms really have this many planes,
|
||||
* which means some arrays of size I915_MAX_PLANES may have unused entries
|
||||
* between the topmost sprite plane and the cursor plane.
|
||||
*/
|
||||
#define I915_MAX_PLANES 4
|
||||
|
||||
enum plane {
|
||||
PLANE_A = 0,
|
||||
PLANE_B,
|
||||
PLANE_C,
|
||||
PLANE_CURSOR,
|
||||
I915_MAX_PLANES,
|
||||
};
|
||||
#define plane_name(p) ((p) + 'A')
|
||||
|
||||
@ -628,10 +628,6 @@ struct drm_i915_display_funcs {
|
||||
struct dpll *match_clock,
|
||||
struct dpll *best_clock);
|
||||
void (*update_wm)(struct drm_crtc *crtc);
|
||||
void (*update_sprite_wm)(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width, uint32_t sprite_height,
|
||||
int pixel_size, bool enable, bool scaled);
|
||||
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
|
||||
void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
|
||||
/* Returns the active state of the crtc, and if the crtc is active,
|
||||
@ -646,7 +642,7 @@ struct drm_i915_display_funcs {
|
||||
void (*crtc_disable)(struct drm_crtc *crtc);
|
||||
void (*audio_codec_enable)(struct drm_connector *connector,
|
||||
struct intel_encoder *encoder,
|
||||
struct drm_display_mode *mode);
|
||||
const struct drm_display_mode *adjusted_mode);
|
||||
void (*audio_codec_disable)(struct intel_encoder *encoder);
|
||||
void (*fdi_link_train)(struct drm_crtc *crtc);
|
||||
void (*init_clock_gating)(struct drm_device *dev);
|
||||
@ -664,15 +660,6 @@ struct drm_i915_display_funcs {
|
||||
/* render clock increase/decrease */
|
||||
/* display clock increase/decrease */
|
||||
/* pll clock increase/decrease */
|
||||
|
||||
int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
|
||||
uint32_t (*get_backlight)(struct intel_connector *connector);
|
||||
void (*set_backlight)(struct intel_connector *connector,
|
||||
uint32_t level);
|
||||
void (*disable_backlight)(struct intel_connector *connector);
|
||||
void (*enable_backlight)(struct intel_connector *connector);
|
||||
uint32_t (*backlight_hz_to_pwm)(struct intel_connector *connector,
|
||||
uint32_t hz);
|
||||
};
|
||||
|
||||
enum forcewake_domain_id {
|
||||
@ -1146,7 +1133,6 @@ struct intel_gen6_power_mgmt {
|
||||
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
|
||||
u8 rp1_freq; /* "less than" RP0 power/freqency */
|
||||
u8 rp0_freq; /* Non-overclocked max frequency. */
|
||||
u32 cz_freq;
|
||||
|
||||
u8 up_threshold; /* Current %busy required to uplock */
|
||||
u8 down_threshold; /* Current %busy required to downclock */
|
||||
@ -1588,8 +1574,7 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
|
||||
struct skl_ddb_allocation {
|
||||
struct skl_ddb_entry pipe[I915_MAX_PIPES];
|
||||
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
|
||||
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* y-plane */
|
||||
struct skl_ddb_entry cursor[I915_MAX_PIPES];
|
||||
struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES];
|
||||
};
|
||||
|
||||
struct skl_wm_values {
|
||||
@ -1597,18 +1582,13 @@ struct skl_wm_values {
|
||||
struct skl_ddb_allocation ddb;
|
||||
uint32_t wm_linetime[I915_MAX_PIPES];
|
||||
uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
|
||||
uint32_t cursor[I915_MAX_PIPES][8];
|
||||
uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
|
||||
uint32_t cursor_trans[I915_MAX_PIPES];
|
||||
};
|
||||
|
||||
struct skl_wm_level {
|
||||
bool plane_en[I915_MAX_PLANES];
|
||||
bool cursor_en;
|
||||
uint16_t plane_res_b[I915_MAX_PLANES];
|
||||
uint8_t plane_res_l[I915_MAX_PLANES];
|
||||
uint16_t cursor_res_b;
|
||||
uint8_t cursor_res_l;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1809,6 +1789,7 @@ struct drm_i915_private {
|
||||
unsigned int cdclk_freq, max_cdclk_freq;
|
||||
unsigned int max_dotclk_freq;
|
||||
unsigned int hpll_freq;
|
||||
unsigned int czclk_freq;
|
||||
|
||||
/**
|
||||
* wq - Driver workqueue for GEM.
|
||||
@ -1897,6 +1878,11 @@ struct drm_i915_private {
|
||||
/* hda/i915 audio component */
|
||||
struct i915_audio_component *audio_component;
|
||||
bool audio_component_registered;
|
||||
/**
|
||||
* av_mutex - mutex for audio/video sync
|
||||
*
|
||||
*/
|
||||
struct mutex av_mutex;
|
||||
|
||||
uint32_t hw_context_size;
|
||||
struct list_head context_list;
|
||||
@ -1959,6 +1945,9 @@ struct drm_i915_private {
|
||||
|
||||
bool edp_low_vswing;
|
||||
|
||||
/* perform PHY state sanity checks? */
|
||||
bool chv_phy_assert[2];
|
||||
|
||||
/*
|
||||
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
|
||||
* will be rejected. Instead look for a better place.
|
||||
@ -2607,6 +2596,7 @@ struct drm_i915_cmd_table {
|
||||
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
|
||||
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
|
||||
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
|
||||
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
|
||||
|
||||
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
|
||||
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
|
||||
@ -2824,6 +2814,8 @@ void i915_gem_vma_destroy(struct i915_vma *vma);
|
||||
#define PIN_OFFSET_BIAS (1<<3)
|
||||
#define PIN_USER (1<<4)
|
||||
#define PIN_UPDATE (1<<5)
|
||||
#define PIN_ZONE_4G (1<<6)
|
||||
#define PIN_HIGH (1<<7)
|
||||
#define PIN_OFFSET_MASK (~4095)
|
||||
int __must_check
|
||||
i915_gem_object_pin(struct drm_i915_gem_object *obj,
|
||||
@ -2839,6 +2831,11 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
|
||||
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
int __must_check i915_vma_unbind(struct i915_vma *vma);
|
||||
/*
|
||||
* BEWARE: Do not use the function below unless you can _absolutely_
|
||||
* _guarantee_ VMA in question is _not in use_ anywhere.
|
||||
*/
|
||||
int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
|
||||
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
|
||||
@ -3167,7 +3164,6 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
|
||||
unsigned long end,
|
||||
unsigned flags);
|
||||
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
|
||||
int i915_gem_evict_everything(struct drm_device *dev);
|
||||
|
||||
/* belongs in i915_gem_gtt.h */
|
||||
static inline void i915_gem_chipset_flush(struct drm_device *dev)
|
||||
@ -3198,11 +3194,12 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||
|
||||
/* i915_gem_shrinker.c */
|
||||
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
long target,
|
||||
unsigned long target,
|
||||
unsigned flags);
|
||||
#define I915_SHRINK_PURGEABLE 0x1
|
||||
#define I915_SHRINK_UNBOUND 0x2
|
||||
#define I915_SHRINK_BOUND 0x4
|
||||
#define I915_SHRINK_ACTIVE 0x8
|
||||
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
@ -3208,7 +3208,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
|
||||
old_write_domain);
|
||||
}
|
||||
|
||||
int i915_vma_unbind(struct i915_vma *vma)
|
||||
static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
|
||||
@ -3227,9 +3227,11 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
|
||||
BUG_ON(obj->pages == NULL);
|
||||
|
||||
ret = i915_gem_object_wait_rendering(obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (wait) {
|
||||
ret = i915_gem_object_wait_rendering(obj, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (i915_is_ggtt(vma->vm) &&
|
||||
vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
|
||||
@ -3274,6 +3276,16 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_vma_unbind(struct i915_vma *vma)
|
||||
{
|
||||
return __i915_vma_unbind(vma, true);
|
||||
}
|
||||
|
||||
int __i915_vma_unbind_no_wait(struct i915_vma *vma)
|
||||
{
|
||||
return __i915_vma_unbind(vma, false);
|
||||
}
|
||||
|
||||
int i915_gpu_idle(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -3354,11 +3366,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 fence_alignment, unfenced_alignment;
|
||||
u32 search_flag, alloc_flag;
|
||||
u64 start, end;
|
||||
u64 size, fence_size;
|
||||
u64 start =
|
||||
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
|
||||
u64 end =
|
||||
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
@ -3398,6 +3408,13 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
|
||||
}
|
||||
|
||||
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
|
||||
end = vm->total;
|
||||
if (flags & PIN_MAPPABLE)
|
||||
end = min_t(u64, end, dev_priv->gtt.mappable_end);
|
||||
if (flags & PIN_ZONE_4G)
|
||||
end = min_t(u64, end, (1ULL << 32));
|
||||
|
||||
if (alignment == 0)
|
||||
alignment = flags & PIN_MAPPABLE ? fence_alignment :
|
||||
unfenced_alignment;
|
||||
@ -3433,13 +3450,21 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
|
||||
if (IS_ERR(vma))
|
||||
goto err_unpin;
|
||||
|
||||
if (flags & PIN_HIGH) {
|
||||
search_flag = DRM_MM_SEARCH_BELOW;
|
||||
alloc_flag = DRM_MM_CREATE_TOP;
|
||||
} else {
|
||||
search_flag = DRM_MM_SEARCH_DEFAULT;
|
||||
alloc_flag = DRM_MM_CREATE_DEFAULT;
|
||||
}
|
||||
|
||||
search_free:
|
||||
ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
|
||||
size, alignment,
|
||||
obj->cache_level,
|
||||
start, end,
|
||||
DRM_MM_SEARCH_DEFAULT,
|
||||
DRM_MM_CREATE_DEFAULT);
|
||||
search_flag,
|
||||
alloc_flag);
|
||||
if (ret) {
|
||||
ret = i915_gem_evict_something(dev, vm, size, alignment,
|
||||
obj->cache_level,
|
||||
@ -4533,22 +4558,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
|
||||
BUG();
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_enable_blt(struct drm_device *dev)
|
||||
{
|
||||
if (!HAS_BLT(dev))
|
||||
return false;
|
||||
|
||||
/* The blitter was dysfunctional on early prototypes */
|
||||
if (IS_GEN6(dev) && dev->pdev->revision < 8) {
|
||||
DRM_INFO("BLT not supported on this pre-production hardware;"
|
||||
" graphics performance will be degraded.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void init_unused_ring(struct drm_device *dev, u32 base)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -4591,7 +4600,7 @@ int i915_gem_init_rings(struct drm_device *dev)
|
||||
goto cleanup_render_ring;
|
||||
}
|
||||
|
||||
if (intel_enable_blt(dev)) {
|
||||
if (HAS_BLT(dev)) {
|
||||
ret = intel_init_blt_ring_buffer(dev);
|
||||
if (ret)
|
||||
goto cleanup_bsd_ring;
|
||||
|
@ -133,6 +133,23 @@ static int get_context_size(struct drm_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void i915_gem_context_clean(struct intel_context *ctx)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
|
||||
struct i915_vma *vma, *next;
|
||||
|
||||
if (!ppgtt)
|
||||
return;
|
||||
|
||||
WARN_ON(!list_empty(&ppgtt->base.active_list));
|
||||
|
||||
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
|
||||
mm_list) {
|
||||
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_context_free(struct kref *ctx_ref)
|
||||
{
|
||||
struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
|
||||
@ -142,6 +159,13 @@ void i915_gem_context_free(struct kref *ctx_ref)
|
||||
if (i915.enable_execlists)
|
||||
intel_lr_context_free(ctx);
|
||||
|
||||
/*
|
||||
* This context is going away and we need to remove all VMAs still
|
||||
* around. This is to handle imported shared objects for which
|
||||
* destructor did not run when their handles were closed.
|
||||
*/
|
||||
i915_gem_context_clean(ctx);
|
||||
|
||||
i915_ppgtt_put(ctx->ppgtt);
|
||||
|
||||
if (ctx->legacy_hw_ctx.rcs_state)
|
||||
|
@ -237,48 +237,3 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_evict_everything - Try to evict all objects
|
||||
* @dev: Device to evict objects for
|
||||
*
|
||||
* This functions tries to evict all gem objects from all address spaces. Used
|
||||
* by the shrinker as a last-ditch effort and for suspend, before releasing the
|
||||
* backing storage of all unbound objects.
|
||||
*/
|
||||
int
|
||||
i915_gem_evict_everything(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_address_space *vm, *v;
|
||||
bool lists_empty = true;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
|
||||
lists_empty = (list_empty(&vm->inactive_list) &&
|
||||
list_empty(&vm->active_list));
|
||||
if (!lists_empty)
|
||||
lists_empty = false;
|
||||
}
|
||||
|
||||
if (lists_empty)
|
||||
return -ENOSPC;
|
||||
|
||||
trace_i915_gem_evict_everything(dev);
|
||||
|
||||
/* The gpu_idle will flush everything in the write domain to the
|
||||
* active list. Then we must move everything off the active list
|
||||
* with retire requests.
|
||||
*/
|
||||
ret = i915_gpu_idle(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i915_gem_retire_requests(dev);
|
||||
|
||||
/* Having flushed everything, unbind() should never raise an error */
|
||||
list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link)
|
||||
WARN_ON(i915_gem_evict_vm(vm, false));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -590,10 +590,17 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
|
||||
flags |= PIN_GLOBAL;
|
||||
|
||||
if (!drm_mm_node_allocated(&vma->node)) {
|
||||
/* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
|
||||
* limit address to the first 4GBs for unflagged objects.
|
||||
*/
|
||||
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
|
||||
flags |= PIN_ZONE_4G;
|
||||
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
|
||||
flags |= PIN_GLOBAL | PIN_MAPPABLE;
|
||||
if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
|
||||
flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
|
||||
if ((flags & PIN_MAPPABLE) == 0)
|
||||
flags |= PIN_HIGH;
|
||||
}
|
||||
|
||||
ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
|
||||
@ -671,6 +678,10 @@ eb_vma_misplaced(struct i915_vma *vma)
|
||||
if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
|
||||
return !only_mappable_for_reloc(entry->flags);
|
||||
|
||||
if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
|
||||
(vma->node.start + vma->node.size - 1) >> 32)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -934,7 +945,21 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
|
||||
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
|
||||
return false;
|
||||
|
||||
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
|
||||
/* Kernel clipping was a DRI1 misfeature */
|
||||
if (exec->num_cliprects || exec->cliprects_ptr)
|
||||
return false;
|
||||
|
||||
if (exec->DR4 == 0xffffffff) {
|
||||
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
|
||||
exec->DR4 = 0;
|
||||
}
|
||||
if (exec->DR1 || exec->DR4)
|
||||
return false;
|
||||
|
||||
if ((exec->batch_start_offset | exec->batch_len) & 0x7)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1098,47 +1123,6 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_emit_box(struct drm_i915_gem_request *req,
|
||||
struct drm_clip_rect *box,
|
||||
int DR1, int DR4)
|
||||
{
|
||||
struct intel_engine_cs *ring = req->ring;
|
||||
int ret;
|
||||
|
||||
if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
|
||||
box->y2 <= 0 || box->x2 <= 0) {
|
||||
DRM_ERROR("Bad box %d,%d..%d,%d\n",
|
||||
box->x1, box->y1, box->x2, box->y2);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(ring->dev)->gen >= 4) {
|
||||
ret = intel_ring_begin(req, 4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
|
||||
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
|
||||
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
|
||||
intel_ring_emit(ring, DR4);
|
||||
} else {
|
||||
ret = intel_ring_begin(req, 6);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
|
||||
intel_ring_emit(ring, DR1);
|
||||
intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
|
||||
intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
|
||||
intel_ring_emit(ring, DR4);
|
||||
intel_ring_emit(ring, 0);
|
||||
}
|
||||
intel_ring_advance(ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_object*
|
||||
i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
|
||||
struct drm_i915_gem_exec_object2 *shadow_exec_entry,
|
||||
@ -1197,65 +1181,21 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas)
|
||||
{
|
||||
struct drm_clip_rect *cliprects = NULL;
|
||||
struct drm_device *dev = params->dev;
|
||||
struct intel_engine_cs *ring = params->ring;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 exec_start, exec_len;
|
||||
int instp_mode;
|
||||
u32 instp_mask;
|
||||
int i, ret = 0;
|
||||
|
||||
if (args->num_cliprects != 0) {
|
||||
if (ring != &dev_priv->ring[RCS]) {
|
||||
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
|
||||
DRM_DEBUG("execbuf with %u cliprects\n",
|
||||
args->num_cliprects);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cliprects = kcalloc(args->num_cliprects,
|
||||
sizeof(*cliprects),
|
||||
GFP_KERNEL);
|
||||
if (cliprects == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (copy_from_user(cliprects,
|
||||
to_user_ptr(args->cliprects_ptr),
|
||||
sizeof(*cliprects)*args->num_cliprects)) {
|
||||
ret = -EFAULT;
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
if (args->DR4 == 0xffffffff) {
|
||||
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
|
||||
args->DR4 = 0;
|
||||
}
|
||||
|
||||
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
|
||||
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
|
||||
if (ret)
|
||||
goto error;
|
||||
return ret;
|
||||
|
||||
ret = i915_switch_context(params->request);
|
||||
if (ret)
|
||||
goto error;
|
||||
return ret;
|
||||
|
||||
WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
|
||||
"%s didn't clear reload\n", ring->name);
|
||||
@ -1268,22 +1208,19 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
case I915_EXEC_CONSTANTS_REL_SURFACE:
|
||||
if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
|
||||
DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (instp_mode != dev_priv->relative_constants_mode) {
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
DRM_DEBUG("no rel constants on pre-gen4\n");
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev)->gen > 5 &&
|
||||
instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
|
||||
DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* The HW changed the meaning on this bit on gen6 */
|
||||
@ -1293,15 +1230,14 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ring == &dev_priv->ring[RCS] &&
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
instp_mode != dev_priv->relative_constants_mode) {
|
||||
ret = intel_ring_begin(params->request, 4);
|
||||
if (ret)
|
||||
goto error;
|
||||
return ret;
|
||||
|
||||
intel_ring_emit(ring, MI_NOOP);
|
||||
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
|
||||
@ -1315,42 +1251,25 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
||||
ret = i915_reset_gen7_sol_offsets(dev, params->request);
|
||||
if (ret)
|
||||
goto error;
|
||||
return ret;
|
||||
}
|
||||
|
||||
exec_len = args->batch_len;
|
||||
exec_start = params->batch_obj_vm_offset +
|
||||
params->args_batch_start_offset;
|
||||
|
||||
if (cliprects) {
|
||||
for (i = 0; i < args->num_cliprects; i++) {
|
||||
ret = i915_emit_box(params->request, &cliprects[i],
|
||||
args->DR1, args->DR4);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = ring->dispatch_execbuffer(params->request,
|
||||
exec_start, exec_len,
|
||||
params->dispatch_flags);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
} else {
|
||||
ret = ring->dispatch_execbuffer(params->request,
|
||||
exec_start, exec_len,
|
||||
params->dispatch_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = ring->dispatch_execbuffer(params->request,
|
||||
exec_start, exec_len,
|
||||
params->dispatch_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
|
||||
|
||||
i915_gem_execbuffer_move_to_active(vmas, params->request);
|
||||
i915_gem_execbuffer_retire_commands(params);
|
||||
|
||||
error:
|
||||
kfree(cliprects);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -59,19 +59,19 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
|
||||
struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int fence_reg;
|
||||
int fence_reg_lo, fence_reg_hi;
|
||||
int fence_pitch_shift;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 6) {
|
||||
fence_reg = FENCE_REG_SANDYBRIDGE_0;
|
||||
fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
|
||||
fence_reg_lo = FENCE_REG_GEN6_LO(reg);
|
||||
fence_reg_hi = FENCE_REG_GEN6_HI(reg);
|
||||
fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
|
||||
} else {
|
||||
fence_reg = FENCE_REG_965_0;
|
||||
fence_reg_lo = FENCE_REG_965_LO(reg);
|
||||
fence_reg_hi = FENCE_REG_965_HI(reg);
|
||||
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
|
||||
}
|
||||
|
||||
fence_reg += reg * 8;
|
||||
|
||||
/* To w/a incoherency with non-atomic 64-bit register updates,
|
||||
* we split the 64-bit update into two 32-bit writes. In order
|
||||
* for a partial fence not to be evaluated between writes, we
|
||||
@ -81,8 +81,8 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
|
||||
* For extra levels of paranoia, we make sure each step lands
|
||||
* before applying the next step.
|
||||
*/
|
||||
I915_WRITE(fence_reg, 0);
|
||||
POSTING_READ(fence_reg);
|
||||
I915_WRITE(fence_reg_lo, 0);
|
||||
POSTING_READ(fence_reg_lo);
|
||||
|
||||
if (obj) {
|
||||
u32 size = i915_gem_obj_ggtt_size(obj);
|
||||
@ -103,14 +103,14 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
|
||||
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
|
||||
val |= I965_FENCE_REG_VALID;
|
||||
|
||||
I915_WRITE(fence_reg + 4, val >> 32);
|
||||
POSTING_READ(fence_reg + 4);
|
||||
I915_WRITE(fence_reg_hi, val >> 32);
|
||||
POSTING_READ(fence_reg_hi);
|
||||
|
||||
I915_WRITE(fence_reg + 0, val);
|
||||
POSTING_READ(fence_reg);
|
||||
I915_WRITE(fence_reg_lo, val);
|
||||
POSTING_READ(fence_reg_lo);
|
||||
} else {
|
||||
I915_WRITE(fence_reg + 4, 0);
|
||||
POSTING_READ(fence_reg + 4);
|
||||
I915_WRITE(fence_reg_hi, 0);
|
||||
POSTING_READ(fence_reg_hi);
|
||||
}
|
||||
}
|
||||
|
||||
@ -149,13 +149,8 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
|
||||
} else
|
||||
val = 0;
|
||||
|
||||
if (reg < 8)
|
||||
reg = FENCE_REG_830_0 + reg * 4;
|
||||
else
|
||||
reg = FENCE_REG_945_8 + (reg - 8) * 4;
|
||||
|
||||
I915_WRITE(reg, val);
|
||||
POSTING_READ(reg);
|
||||
I915_WRITE(FENCE_REG(reg), val);
|
||||
POSTING_READ(FENCE_REG(reg));
|
||||
}
|
||||
|
||||
static void i830_write_fence_reg(struct drm_device *dev, int reg,
|
||||
@ -186,8 +181,8 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
|
||||
} else
|
||||
val = 0;
|
||||
|
||||
I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
|
||||
POSTING_READ(FENCE_REG_830_0 + reg * 4);
|
||||
I915_WRITE(FENCE_REG(reg), val);
|
||||
POSTING_READ(FENCE_REG(reg));
|
||||
}
|
||||
|
||||
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
|
||||
|
@ -2889,8 +2889,8 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
|
||||
/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
|
||||
* write would work. */
|
||||
I915_WRITE(GEN8_PRIVATE_PAT, pat);
|
||||
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
|
||||
I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
|
||||
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
|
||||
}
|
||||
|
||||
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
@ -2924,8 +2924,8 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||
GEN8_PPAT(6, CHV_PPAT_SNOOP) |
|
||||
GEN8_PPAT(7, CHV_PPAT_SNOOP);
|
||||
|
||||
I915_WRITE(GEN8_PRIVATE_PAT, pat);
|
||||
I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
|
||||
I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
|
||||
I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
|
||||
}
|
||||
|
||||
static int gen8_gmch_probe(struct drm_device *dev,
|
||||
|
@ -394,7 +394,8 @@ struct i915_hw_ppgtt {
|
||||
*/
|
||||
#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
|
||||
for (iter = gen6_pde_index(start); \
|
||||
pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
|
||||
length > 0 && iter < I915_PDES ? \
|
||||
(pt = (pd)->page_table[iter]), 1 : 0; \
|
||||
iter++, \
|
||||
temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
|
||||
temp = min_t(unsigned, temp, length), \
|
||||
@ -459,7 +460,8 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
|
||||
*/
|
||||
#define gen8_for_each_pde(pt, pd, start, length, temp, iter) \
|
||||
for (iter = gen8_pde_index(start); \
|
||||
pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \
|
||||
length > 0 && iter < I915_PDES ? \
|
||||
(pt = (pd)->page_table[iter]), 1 : 0; \
|
||||
iter++, \
|
||||
temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \
|
||||
temp = min(temp, length), \
|
||||
@ -467,8 +469,8 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
|
||||
|
||||
#define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \
|
||||
for (iter = gen8_pdpe_index(start); \
|
||||
pd = (pdp)->page_directory[iter], \
|
||||
length > 0 && (iter < I915_PDPES_PER_PDP(dev)); \
|
||||
length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \
|
||||
(pd = (pdp)->page_directory[iter]), 1 : 0; \
|
||||
iter++, \
|
||||
temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \
|
||||
temp = min(temp, length), \
|
||||
@ -476,8 +478,8 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
|
||||
|
||||
#define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \
|
||||
for (iter = gen8_pml4e_index(start); \
|
||||
pdp = (pml4)->pdps[iter], \
|
||||
length > 0 && iter < GEN8_PML4ES_PER_PML4; \
|
||||
length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \
|
||||
(pdp = (pml4)->pdps[iter]), 1 : 0; \
|
||||
iter++, \
|
||||
temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \
|
||||
temp = min(temp, length), \
|
||||
|
@ -73,7 +73,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
|
||||
*/
|
||||
unsigned long
|
||||
i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
long target, unsigned flags)
|
||||
unsigned long target, unsigned flags)
|
||||
{
|
||||
const struct {
|
||||
struct list_head *list;
|
||||
@ -85,6 +85,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
}, *phase;
|
||||
unsigned long count = 0;
|
||||
|
||||
trace_i915_gem_shrink(dev_priv, target, flags);
|
||||
i915_gem_retire_requests(dev_priv->dev);
|
||||
|
||||
/*
|
||||
* As we may completely rewrite the (un)bound list whilst unbinding
|
||||
* (due to retiring requests) we have to strictly process only
|
||||
@ -123,6 +126,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
obj->madv != I915_MADV_DONTNEED)
|
||||
continue;
|
||||
|
||||
if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
|
||||
continue;
|
||||
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
/* For the unbound phase, this should be a no-op! */
|
||||
@ -139,11 +145,13 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
list_splice(&still_in_list, phase->list);
|
||||
}
|
||||
|
||||
i915_gem_retire_requests(dev_priv->dev);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_shrink - Shrink buffer object caches completely
|
||||
* i915_gem_shrink_all - Shrink buffer object caches completely
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* This is a simple wraper around i915_gem_shrink() to aggressively shrink all
|
||||
@ -158,9 +166,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915_gem_evict_everything(dev_priv->dev);
|
||||
return i915_gem_shrink(dev_priv, LONG_MAX,
|
||||
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
|
||||
return i915_gem_shrink(dev_priv, -1UL,
|
||||
I915_SHRINK_BOUND |
|
||||
I915_SHRINK_UNBOUND |
|
||||
I915_SHRINK_ACTIVE);
|
||||
}
|
||||
|
||||
static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
|
||||
@ -213,7 +222,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
if (obj->pages_pin_count == num_vma_bound(obj))
|
||||
if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
|
||||
count += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
|
@ -30,6 +30,9 @@
|
||||
#include <drm/i915_drm.h>
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define KB(x) ((x) * 1024)
|
||||
#define MB(x) (KB(x) * 1024)
|
||||
|
||||
/*
|
||||
* The BIOS typically reserves some of the system's memory for the exclusive
|
||||
* use of the integrated graphics. This memory is no longer available for
|
||||
@ -51,6 +54,11 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
|
||||
if (!drm_mm_initialized(&dev_priv->mm.stolen))
|
||||
return -ENODEV;
|
||||
|
||||
/* See the comment at the drm_mm_init() call for more about this check.
|
||||
* WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
|
||||
if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
|
||||
start = 4096;
|
||||
|
||||
mutex_lock(&dev_priv->mm.stolen_lock);
|
||||
ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
|
||||
alignment, start, end,
|
||||
@ -86,24 +94,91 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
/* Almost universally we can find the Graphics Base of Stolen Memory
|
||||
* at offset 0x5c in the igfx configuration space. On a few (desktop)
|
||||
* machines this is also mirrored in the bridge device at different
|
||||
* locations, or in the MCHBAR. On gen2, the layout is again slightly
|
||||
* different with the Graphics Segment immediately following Top of
|
||||
* Memory (or Top of Usable DRAM). Note it appears that TOUD is only
|
||||
* reported by 865g, so we just use the top of memory as determined
|
||||
* by the e820 probe.
|
||||
* locations, or in the MCHBAR.
|
||||
*
|
||||
* On 865 we just check the TOUD register.
|
||||
*
|
||||
* On 830/845/85x the stolen memory base isn't available in any
|
||||
* register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
|
||||
*
|
||||
* XXX However gen2 requires an unavailable symbol.
|
||||
*/
|
||||
base = 0;
|
||||
if (INTEL_INFO(dev)->gen >= 3) {
|
||||
/* Read Graphics Base of Stolen Memory directly */
|
||||
pci_read_config_dword(dev->pdev, 0x5c, &base);
|
||||
base &= ~((1<<20) - 1);
|
||||
} else { /* GEN2 */
|
||||
#if 0
|
||||
/* Stolen is immediately above Top of Memory */
|
||||
base = max_low_pfn_mapped << PAGE_SHIFT;
|
||||
#endif
|
||||
} else if (IS_I865G(dev)) {
|
||||
u16 toud = 0;
|
||||
|
||||
/*
|
||||
* FIXME is the graphics stolen memory region
|
||||
* always at TOUD? Ie. is it always the last
|
||||
* one to be allocated by the BIOS?
|
||||
*/
|
||||
pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0),
|
||||
I865_TOUD, &toud);
|
||||
|
||||
base = toud << 16;
|
||||
} else if (IS_I85X(dev)) {
|
||||
u32 tseg_size = 0;
|
||||
u32 tom;
|
||||
u8 tmp;
|
||||
|
||||
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
|
||||
I85X_ESMRAMC, &tmp);
|
||||
|
||||
if (tmp & TSEG_ENABLE)
|
||||
tseg_size = MB(1);
|
||||
|
||||
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1),
|
||||
I85X_DRB3, &tmp);
|
||||
tom = tmp * MB(32);
|
||||
|
||||
base = tom - tseg_size - dev_priv->gtt.stolen_size;
|
||||
} else if (IS_845G(dev)) {
|
||||
u32 tseg_size = 0;
|
||||
u32 tom;
|
||||
u8 tmp;
|
||||
|
||||
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
|
||||
I845_ESMRAMC, &tmp);
|
||||
|
||||
if (tmp & TSEG_ENABLE) {
|
||||
switch (tmp & I845_TSEG_SIZE_MASK) {
|
||||
case I845_TSEG_SIZE_512K:
|
||||
tseg_size = KB(512);
|
||||
break;
|
||||
case I845_TSEG_SIZE_1M:
|
||||
tseg_size = MB(1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
|
||||
I830_DRB3, &tmp);
|
||||
tom = tmp * MB(32);
|
||||
|
||||
base = tom - tseg_size - dev_priv->gtt.stolen_size;
|
||||
} else if (IS_I830(dev)) {
|
||||
u32 tseg_size = 0;
|
||||
u32 tom;
|
||||
u8 tmp;
|
||||
|
||||
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
|
||||
I830_ESMRAMC, &tmp);
|
||||
|
||||
if (tmp & TSEG_ENABLE) {
|
||||
if (tmp & I830_TSEG_SIZE_1M)
|
||||
tseg_size = MB(1);
|
||||
else
|
||||
tseg_size = KB(512);
|
||||
}
|
||||
|
||||
pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0),
|
||||
I830_DRB3, &tmp);
|
||||
tom = tmp * MB(32);
|
||||
|
||||
base = tom - tseg_size - dev_priv->gtt.stolen_size;
|
||||
}
|
||||
|
||||
if (base == 0)
|
||||
@ -393,7 +468,17 @@ int i915_gem_init_stolen(struct drm_device *dev)
|
||||
dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
|
||||
reserved_total;
|
||||
|
||||
/* Basic memrange allocator for stolen space */
|
||||
/*
|
||||
* Basic memrange allocator for stolen space.
|
||||
*
|
||||
* TODO: Notice that some platforms require us to not use the first page
|
||||
* of the stolen memory but their BIOSes may still put the framebuffer
|
||||
* on the first page. So we don't reserve this page for now because of
|
||||
* that. Our current solution is to just prevent new nodes from being
|
||||
* inserted on the first page - see the check we have at
|
||||
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
|
||||
* problem later.
|
||||
*/
|
||||
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
|
||||
|
||||
return 0;
|
||||
|
@ -50,7 +50,6 @@ struct i915_mmu_notifier {
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root objects;
|
||||
struct list_head linear;
|
||||
unsigned long serial;
|
||||
bool has_linear;
|
||||
};
|
||||
|
||||
@ -59,13 +58,16 @@ struct i915_mmu_object {
|
||||
struct interval_tree_node it;
|
||||
struct list_head link;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct work_struct work;
|
||||
bool active;
|
||||
bool is_linear;
|
||||
};
|
||||
|
||||
static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
|
||||
static void __cancel_userptr__worker(struct work_struct *work)
|
||||
{
|
||||
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
|
||||
struct drm_i915_gem_object *obj = mo->obj;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
unsigned long end;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
/* Cancel any active worker and force us to re-evaluate gup */
|
||||
@ -88,45 +90,28 @@ static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
}
|
||||
|
||||
end = obj->userptr.ptr + obj->base.size;
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
return end;
|
||||
}
|
||||
|
||||
static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
static unsigned long cancel_userptr(struct i915_mmu_object *mo)
|
||||
{
|
||||
struct i915_mmu_object *mo;
|
||||
unsigned long serial;
|
||||
unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
|
||||
|
||||
restart:
|
||||
serial = mn->serial;
|
||||
list_for_each_entry(mo, &mn->linear, link) {
|
||||
struct drm_i915_gem_object *obj;
|
||||
|
||||
if (mo->it.last < start || mo->it.start > end)
|
||||
continue;
|
||||
|
||||
obj = mo->obj;
|
||||
|
||||
if (!kref_get_unless_zero(&obj->base.refcount))
|
||||
continue;
|
||||
|
||||
spin_unlock(&mn->lock);
|
||||
|
||||
cancel_userptr(obj);
|
||||
|
||||
spin_lock(&mn->lock);
|
||||
if (serial != mn->serial)
|
||||
goto restart;
|
||||
/* The mmu_object is released late when destroying the
|
||||
* GEM object so it is entirely possible to gain a
|
||||
* reference on an object in the process of being freed
|
||||
* since our serialisation is via the spinlock and not
|
||||
* the struct_mutex - and consequently use it after it
|
||||
* is freed and then double free it.
|
||||
*/
|
||||
if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
|
||||
schedule_work(&mo->work);
|
||||
/* only schedule one work packet to avoid the refleak */
|
||||
mo->active = false;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return end;
|
||||
}
|
||||
|
||||
static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
@ -134,46 +119,32 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
|
||||
struct interval_tree_node *it = NULL;
|
||||
unsigned long next = start;
|
||||
unsigned long serial = 0;
|
||||
struct i915_mmu_notifier *mn =
|
||||
container_of(_mn, struct i915_mmu_notifier, mn);
|
||||
struct i915_mmu_object *mo;
|
||||
|
||||
end--; /* interval ranges are inclusive, but invalidate range is exclusive */
|
||||
while (next < end) {
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
/* interval ranges are inclusive, but invalidate range is exclusive */
|
||||
end--;
|
||||
|
||||
spin_lock(&mn->lock);
|
||||
if (mn->has_linear)
|
||||
it = invalidate_range__linear(mn, mm, start, end);
|
||||
else if (serial == mn->serial)
|
||||
it = interval_tree_iter_next(it, next, end);
|
||||
else
|
||||
it = interval_tree_iter_first(&mn->objects, start, end);
|
||||
if (it != NULL) {
|
||||
obj = container_of(it, struct i915_mmu_object, it)->obj;
|
||||
|
||||
/* The mmu_object is released late when destroying the
|
||||
* GEM object so it is entirely possible to gain a
|
||||
* reference on an object in the process of being freed
|
||||
* since our serialisation is via the spinlock and not
|
||||
* the struct_mutex - and consequently use it after it
|
||||
* is freed and then double free it.
|
||||
*/
|
||||
if (!kref_get_unless_zero(&obj->base.refcount)) {
|
||||
spin_unlock(&mn->lock);
|
||||
serial = 0;
|
||||
spin_lock(&mn->lock);
|
||||
if (mn->has_linear) {
|
||||
list_for_each_entry(mo, &mn->linear, link) {
|
||||
if (mo->it.last < start || mo->it.start > end)
|
||||
continue;
|
||||
}
|
||||
|
||||
serial = mn->serial;
|
||||
cancel_userptr(mo);
|
||||
}
|
||||
spin_unlock(&mn->lock);
|
||||
if (obj == NULL)
|
||||
return;
|
||||
} else {
|
||||
struct interval_tree_node *it;
|
||||
|
||||
next = cancel_userptr(obj);
|
||||
it = interval_tree_iter_first(&mn->objects, start, end);
|
||||
while (it) {
|
||||
mo = container_of(it, struct i915_mmu_object, it);
|
||||
start = cancel_userptr(mo);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
}
|
||||
}
|
||||
spin_unlock(&mn->lock);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
|
||||
@ -193,7 +164,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
|
||||
spin_lock_init(&mn->lock);
|
||||
mn->mn.ops = &i915_gem_userptr_notifier;
|
||||
mn->objects = RB_ROOT;
|
||||
mn->serial = 1;
|
||||
INIT_LIST_HEAD(&mn->linear);
|
||||
mn->has_linear = false;
|
||||
|
||||
@ -207,12 +177,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
|
||||
return mn;
|
||||
}
|
||||
|
||||
static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
|
||||
{
|
||||
if (++mn->serial == 0)
|
||||
mn->serial = 1;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_mmu_notifier_add(struct drm_device *dev,
|
||||
struct i915_mmu_notifier *mn,
|
||||
@ -259,10 +223,9 @@ i915_mmu_notifier_add(struct drm_device *dev,
|
||||
} else
|
||||
interval_tree_insert(&mo->it, &mn->objects);
|
||||
|
||||
if (ret == 0) {
|
||||
if (ret == 0)
|
||||
list_add(&mo->link, &mn->linear);
|
||||
__i915_mmu_notifier_update_serial(mn);
|
||||
}
|
||||
|
||||
spin_unlock(&mn->lock);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
@ -290,7 +253,6 @@ i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
|
||||
mn->has_linear = i915_mmu_notifier_has_linear(mn);
|
||||
else
|
||||
interval_tree_remove(&mo->it, &mn->objects);
|
||||
__i915_mmu_notifier_update_serial(mn);
|
||||
spin_unlock(&mn->lock);
|
||||
}
|
||||
|
||||
@ -357,6 +319,7 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
|
||||
mo->it.start = obj->userptr.ptr;
|
||||
mo->it.last = mo->it.start + obj->base.size - 1;
|
||||
mo->obj = obj;
|
||||
INIT_WORK(&mo->work, __cancel_userptr__worker);
|
||||
|
||||
ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
|
||||
if (ret) {
|
||||
@ -565,31 +528,65 @@ __i915_gem_userptr_set_pages(struct drm_i915_gem_object *obj,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
|
||||
bool value)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* During mm_invalidate_range we need to cancel any userptr that
|
||||
* overlaps the range being invalidated. Doing so requires the
|
||||
* struct_mutex, and that risks recursion. In order to cause
|
||||
* recursion, the user must alias the userptr address space with
|
||||
* a GTT mmapping (possible with a MAP_FIXED) - then when we have
|
||||
* to invalidate that mmaping, mm_invalidate_range is called with
|
||||
* the userptr address *and* the struct_mutex held. To prevent that
|
||||
* we set a flag under the i915_mmu_notifier spinlock to indicate
|
||||
* whether this object is valid.
|
||||
*/
|
||||
#if defined(CONFIG_MMU_NOTIFIER)
|
||||
if (obj->userptr.mmu_object == NULL)
|
||||
return 0;
|
||||
|
||||
spin_lock(&obj->userptr.mmu_object->mn->lock);
|
||||
/* In order to serialise get_pages with an outstanding
|
||||
* cancel_userptr, we must drop the struct_mutex and try again.
|
||||
*/
|
||||
if (!value || !work_pending(&obj->userptr.mmu_object->work))
|
||||
obj->userptr.mmu_object->active = value;
|
||||
else
|
||||
ret = -EAGAIN;
|
||||
spin_unlock(&obj->userptr.mmu_object->mn->lock);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||
{
|
||||
struct get_pages_work *work = container_of(_work, typeof(*work), work);
|
||||
struct drm_i915_gem_object *obj = work->obj;
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
const int num_pages = obj->base.size >> PAGE_SHIFT;
|
||||
const int npages = obj->base.size >> PAGE_SHIFT;
|
||||
struct page **pvec;
|
||||
int pinned, ret;
|
||||
|
||||
ret = -ENOMEM;
|
||||
pinned = 0;
|
||||
|
||||
pvec = kmalloc(num_pages*sizeof(struct page *),
|
||||
pvec = kmalloc(npages*sizeof(struct page *),
|
||||
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (pvec == NULL)
|
||||
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
|
||||
pvec = drm_malloc_ab(npages, sizeof(struct page *));
|
||||
if (pvec != NULL) {
|
||||
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < num_pages) {
|
||||
while (pinned < npages) {
|
||||
ret = get_user_pages(work->task, mm,
|
||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||
num_pages - pinned,
|
||||
npages - pinned,
|
||||
!obj->userptr.read_only, 0,
|
||||
pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
@ -601,20 +598,22 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (obj->userptr.work != &work->work) {
|
||||
ret = 0;
|
||||
} else if (pinned == num_pages) {
|
||||
ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
|
||||
if (ret == 0) {
|
||||
list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
|
||||
obj->get_page.sg = obj->pages->sgl;
|
||||
obj->get_page.last = 0;
|
||||
|
||||
pinned = 0;
|
||||
if (obj->userptr.work == &work->work) {
|
||||
if (pinned == npages) {
|
||||
ret = __i915_gem_userptr_set_pages(obj, pvec, npages);
|
||||
if (ret == 0) {
|
||||
list_add_tail(&obj->global_list,
|
||||
&to_i915(dev)->mm.unbound_list);
|
||||
obj->get_page.sg = obj->pages->sgl;
|
||||
obj->get_page.last = 0;
|
||||
pinned = 0;
|
||||
}
|
||||
}
|
||||
obj->userptr.work = ERR_PTR(ret);
|
||||
if (ret)
|
||||
__i915_gem_userptr_set_active(obj, false);
|
||||
}
|
||||
|
||||
obj->userptr.work = ERR_PTR(ret);
|
||||
obj->userptr.workers--;
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -626,12 +625,61 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static int
|
||||
__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
|
||||
bool *active)
|
||||
{
|
||||
struct get_pages_work *work;
|
||||
|
||||
/* Spawn a worker so that we can acquire the
|
||||
* user pages without holding our mutex. Access
|
||||
* to the user pages requires mmap_sem, and we have
|
||||
* a strict lock ordering of mmap_sem, struct_mutex -
|
||||
* we already hold struct_mutex here and so cannot
|
||||
* call gup without encountering a lock inversion.
|
||||
*
|
||||
* Userspace will keep on repeating the operation
|
||||
* (thanks to EAGAIN) until either we hit the fast
|
||||
* path or the worker completes. If the worker is
|
||||
* cancelled or superseded, the task is still run
|
||||
* but the results ignored. (This leads to
|
||||
* complications that we may have a stray object
|
||||
* refcount that we need to be wary of when
|
||||
* checking for existing objects during creation.)
|
||||
* If the worker encounters an error, it reports
|
||||
* that error back to this function through
|
||||
* obj->userptr.work = ERR_PTR.
|
||||
*/
|
||||
if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS)
|
||||
return -EAGAIN;
|
||||
|
||||
work = kmalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (work == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
obj->userptr.work = &work->work;
|
||||
obj->userptr.workers++;
|
||||
|
||||
work->obj = obj;
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
work->task = current;
|
||||
get_task_struct(work->task);
|
||||
|
||||
INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
|
||||
schedule_work(&work->work);
|
||||
|
||||
*active = true;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
const int num_pages = obj->base.size >> PAGE_SHIFT;
|
||||
struct page **pvec;
|
||||
int pinned, ret;
|
||||
bool active;
|
||||
|
||||
/* If userspace should engineer that these pages are replaced in
|
||||
* the vma between us binding this page into the GTT and completion
|
||||
@ -649,6 +697,20 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
||||
* to the vma (discard or cloning) which should prevent the more
|
||||
* egregious cases from causing harm.
|
||||
*/
|
||||
if (IS_ERR(obj->userptr.work)) {
|
||||
/* active flag will have been dropped already by the worker */
|
||||
ret = PTR_ERR(obj->userptr.work);
|
||||
obj->userptr.work = NULL;
|
||||
return ret;
|
||||
}
|
||||
if (obj->userptr.work)
|
||||
/* active flag should still be held for the pending work */
|
||||
return -EAGAIN;
|
||||
|
||||
/* Let the mmu-notifier know that we have begun and need cancellation */
|
||||
ret = __i915_gem_userptr_set_active(obj, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pvec = NULL;
|
||||
pinned = 0;
|
||||
@ -657,73 +719,27 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
||||
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (pvec == NULL) {
|
||||
pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
|
||||
if (pvec == NULL)
|
||||
if (pvec == NULL) {
|
||||
__i915_gem_userptr_set_active(obj, false);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
|
||||
!obj->userptr.read_only, pvec);
|
||||
}
|
||||
if (pinned < num_pages) {
|
||||
if (pinned < 0) {
|
||||
ret = pinned;
|
||||
pinned = 0;
|
||||
} else {
|
||||
/* Spawn a worker so that we can acquire the
|
||||
* user pages without holding our mutex. Access
|
||||
* to the user pages requires mmap_sem, and we have
|
||||
* a strict lock ordering of mmap_sem, struct_mutex -
|
||||
* we already hold struct_mutex here and so cannot
|
||||
* call gup without encountering a lock inversion.
|
||||
*
|
||||
* Userspace will keep on repeating the operation
|
||||
* (thanks to EAGAIN) until either we hit the fast
|
||||
* path or the worker completes. If the worker is
|
||||
* cancelled or superseded, the task is still run
|
||||
* but the results ignored. (This leads to
|
||||
* complications that we may have a stray object
|
||||
* refcount that we need to be wary of when
|
||||
* checking for existing objects during creation.)
|
||||
* If the worker encounters an error, it reports
|
||||
* that error back to this function through
|
||||
* obj->userptr.work = ERR_PTR.
|
||||
*/
|
||||
ret = -EAGAIN;
|
||||
if (obj->userptr.work == NULL &&
|
||||
obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
|
||||
struct get_pages_work *work;
|
||||
|
||||
work = kmalloc(sizeof(*work), GFP_KERNEL);
|
||||
if (work != NULL) {
|
||||
obj->userptr.work = &work->work;
|
||||
obj->userptr.workers++;
|
||||
|
||||
work->obj = obj;
|
||||
drm_gem_object_reference(&obj->base);
|
||||
|
||||
work->task = current;
|
||||
get_task_struct(work->task);
|
||||
|
||||
INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
|
||||
schedule_work(&work->work);
|
||||
} else
|
||||
ret = -ENOMEM;
|
||||
} else {
|
||||
if (IS_ERR(obj->userptr.work)) {
|
||||
ret = PTR_ERR(obj->userptr.work);
|
||||
obj->userptr.work = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
active = false;
|
||||
if (pinned < 0)
|
||||
ret = pinned, pinned = 0;
|
||||
else if (pinned < num_pages)
|
||||
ret = __i915_gem_userptr_get_pages_schedule(obj, &active);
|
||||
else
|
||||
ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
|
||||
if (ret == 0) {
|
||||
obj->userptr.work = NULL;
|
||||
pinned = 0;
|
||||
}
|
||||
if (ret) {
|
||||
__i915_gem_userptr_set_active(obj, active);
|
||||
release_pages(pvec, pinned, 0);
|
||||
}
|
||||
|
||||
release_pages(pvec, pinned, 0);
|
||||
drm_free_large(pvec);
|
||||
return ret;
|
||||
}
|
||||
@ -734,6 +750,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
BUG_ON(obj->userptr.work != NULL);
|
||||
__i915_gem_userptr_set_active(obj, false);
|
||||
|
||||
if (obj->madv != I915_MADV_WILLNEED)
|
||||
obj->dirty = 0;
|
||||
|
@ -792,20 +792,15 @@ static void i915_gem_record_fences(struct drm_device *dev,
|
||||
int i;
|
||||
|
||||
if (IS_GEN3(dev) || IS_GEN2(dev)) {
|
||||
for (i = 0; i < 8; i++)
|
||||
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
|
||||
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
|
||||
for (i = 0; i < 8; i++)
|
||||
error->fence[i+8] = I915_READ(FENCE_REG_945_8 +
|
||||
(i * 4));
|
||||
} else if (IS_GEN5(dev) || IS_GEN4(dev))
|
||||
for (i = 0; i < 16; i++)
|
||||
error->fence[i] = I915_READ64(FENCE_REG_965_0 +
|
||||
(i * 8));
|
||||
else if (INTEL_INFO(dev)->gen >= 6)
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 +
|
||||
(i * 8));
|
||||
error->fence[i] = I915_READ(FENCE_REG(i));
|
||||
} else if (IS_GEN5(dev) || IS_GEN4(dev)) {
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
|
||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -891,7 +886,7 @@ static void i915_record_ring_state(struct drm_device *dev,
|
||||
ering->faddr = I915_READ(DMA_FADD_I8XX);
|
||||
ering->ipeir = I915_READ(IPEIR);
|
||||
ering->ipehr = I915_READ(IPEHR);
|
||||
ering->instdone = I915_READ(INSTDONE);
|
||||
ering->instdone = I915_READ(GEN2_INSTDONE);
|
||||
}
|
||||
|
||||
ering->waiting = waitqueue_active(&ring->irq_queue);
|
||||
@ -1393,12 +1388,12 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
|
||||
memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
|
||||
|
||||
if (IS_GEN2(dev) || IS_GEN3(dev))
|
||||
instdone[0] = I915_READ(INSTDONE);
|
||||
instdone[0] = I915_READ(GEN2_INSTDONE);
|
||||
else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
|
||||
instdone[0] = I915_READ(INSTDONE_I965);
|
||||
instdone[1] = I915_READ(INSTDONE1);
|
||||
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
|
||||
instdone[1] = I915_READ(GEN4_INSTDONE1);
|
||||
} else if (INTEL_INFO(dev)->gen >= 7) {
|
||||
instdone[0] = I915_READ(GEN7_INSTDONE_1);
|
||||
instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
|
||||
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
|
||||
instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
|
||||
instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
|
||||
|
@ -37,10 +37,11 @@
|
||||
#define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT)
|
||||
#define GS_MIA_SHIFT 16
|
||||
#define GS_MIA_MASK (0x07 << GS_MIA_SHIFT)
|
||||
#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT)
|
||||
|
||||
#define SOFT_SCRATCH(n) (0xc180 + ((n) * 4))
|
||||
|
||||
#define UOS_RSA_SCRATCH_0 0xc200
|
||||
#define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4)
|
||||
#define DMA_ADDR_0_LOW 0xc300
|
||||
#define DMA_ADDR_0_HIGH 0xc304
|
||||
#define DMA_ADDR_1_LOW 0xc308
|
||||
|
@ -155,12 +155,21 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
u32 data[2];
|
||||
|
||||
data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
|
||||
data[1] = (intel_enable_rc6(dev_priv->dev)) ? 1 : 0;
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
if (!intel_enable_rc6(dev_priv->dev) ||
|
||||
(IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
|
||||
(IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) ||
|
||||
(IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
|
||||
data[1] = 0;
|
||||
else
|
||||
/* bit 0 and 1 are for Render and Media domain separately */
|
||||
data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA;
|
||||
|
||||
return host2guc_action(guc, data, 2);
|
||||
return host2guc_action(guc, data, ARRAY_SIZE(data));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -914,3 +923,53 @@ void i915_guc_submission_fini(struct drm_device *dev)
|
||||
gem_release_guc_obj(guc->ctx_pool_obj);
|
||||
guc->ctx_pool_obj = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_guc_suspend() - notify GuC entering suspend state
|
||||
* @dev: drm device
|
||||
*/
|
||||
int intel_guc_suspend(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct intel_context *ctx;
|
||||
u32 data[3];
|
||||
|
||||
if (!i915.enable_guc_submission)
|
||||
return 0;
|
||||
|
||||
ctx = dev_priv->ring[RCS].default_context;
|
||||
|
||||
data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
|
||||
/* any value greater than GUC_POWER_D0 */
|
||||
data[1] = GUC_POWER_D1;
|
||||
/* first page is shared data with GuC */
|
||||
data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
|
||||
|
||||
return host2guc_action(guc, data, ARRAY_SIZE(data));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* intel_guc_resume() - notify GuC resuming from suspend state
|
||||
* @dev: drm device
|
||||
*/
|
||||
int intel_guc_resume(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_guc *guc = &dev_priv->guc;
|
||||
struct intel_context *ctx;
|
||||
u32 data[3];
|
||||
|
||||
if (!i915.enable_guc_submission)
|
||||
return 0;
|
||||
|
||||
ctx = dev_priv->ring[RCS].default_context;
|
||||
|
||||
data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
|
||||
data[1] = GUC_POWER_D0;
|
||||
/* first page is shared data with GuC */
|
||||
data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state);
|
||||
|
||||
return host2guc_action(guc, data, ARRAY_SIZE(data));
|
||||
}
|
||||
|
@ -581,6 +581,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
|
||||
/**
|
||||
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
|
||||
* @dev: drm device
|
||||
*/
|
||||
static void i915_enable_asle_pipestat(struct drm_device *dev)
|
||||
{
|
||||
@ -997,12 +998,16 @@ static bool vlv_c0_above(struct drm_i915_private *dev_priv,
|
||||
int threshold)
|
||||
{
|
||||
u64 time, c0;
|
||||
unsigned int mul = 100;
|
||||
|
||||
if (old->cz_clock == 0)
|
||||
return false;
|
||||
|
||||
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
|
||||
mul <<= 8;
|
||||
|
||||
time = now->cz_clock - old->cz_clock;
|
||||
time *= threshold * dev_priv->mem_freq;
|
||||
time *= threshold * dev_priv->czclk_freq;
|
||||
|
||||
/* Workload can be split between render + media, e.g. SwapBuffers
|
||||
* being blitted in X after being rendered in mesa. To account for
|
||||
@ -1010,7 +1015,7 @@ static bool vlv_c0_above(struct drm_i915_private *dev_priv,
|
||||
*/
|
||||
c0 = now->render_c0 - old->render_c0;
|
||||
c0 += now->media_c0 - old->media_c0;
|
||||
c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
|
||||
c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
|
||||
|
||||
return c0 >= time;
|
||||
}
|
||||
@ -2388,6 +2393,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
|
||||
|
||||
/**
|
||||
* i915_reset_and_wakeup - do process context error handling work
|
||||
* @dev: drm device
|
||||
*
|
||||
* Fire an error uevent so userspace can see that a hang or error
|
||||
* was detected.
|
||||
@ -2565,7 +2571,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
|
||||
* i915_handle_error - handle a gpu error
|
||||
* @dev: drm device
|
||||
*
|
||||
* Do some basic checking of regsiter state at error time and
|
||||
* Do some basic checking of register state at error time and
|
||||
* dump it to the syslog. Also call i915_capture_error_state() to make
|
||||
* sure we get a record and make it available in debugfs. Fire a uevent
|
||||
* so userspace knows something bad happened (should trigger collection
|
||||
@ -2778,6 +2784,26 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
|
||||
u64 offset = 0;
|
||||
int i, backwards;
|
||||
|
||||
/*
|
||||
* This function does not support execlist mode - any attempt to
|
||||
* proceed further into this function will result in a kernel panic
|
||||
* when dereferencing ring->buffer, which is not set up in execlist
|
||||
* mode.
|
||||
*
|
||||
* The correct way of doing it would be to derive the currently
|
||||
* executing ring buffer from the current context, which is derived
|
||||
* from the currently running request. Unfortunately, to get the
|
||||
* current request we would have to grab the struct_mutex before doing
|
||||
* anything else, which would be ill-advised since some other thread
|
||||
* might have grabbed it already and managed to hang itself, causing
|
||||
* the hang checker to deadlock.
|
||||
*
|
||||
* Therefore, this function does not support execlist mode in its
|
||||
* current form. Just return NULL and move on.
|
||||
*/
|
||||
if (ring->buffer == NULL)
|
||||
return NULL;
|
||||
|
||||
ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
|
||||
if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
|
||||
return NULL;
|
||||
|
@ -105,7 +105,7 @@
|
||||
#define GRDOM_RESET_STATUS (1<<1)
|
||||
#define GRDOM_RESET_ENABLE (1<<0)
|
||||
|
||||
#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
|
||||
#define ILK_GDSR (MCHBAR_MIRROR_BASE + 0x2ca4)
|
||||
#define ILK_GRDOM_FULL (0<<1)
|
||||
#define ILK_GRDOM_RENDER (1<<1)
|
||||
#define ILK_GRDOM_MEDIA (3<<1)
|
||||
@ -536,6 +536,10 @@
|
||||
#define GEN7_3DPRIM_START_INSTANCE 0x243C
|
||||
#define GEN7_3DPRIM_BASE_VERTEX 0x2440
|
||||
|
||||
#define GEN7_GPGPU_DISPATCHDIMX 0x2500
|
||||
#define GEN7_GPGPU_DISPATCHDIMY 0x2504
|
||||
#define GEN7_GPGPU_DISPATCHDIMZ 0x2508
|
||||
|
||||
#define OACONTROL 0x2360
|
||||
|
||||
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
|
||||
@ -728,12 +732,13 @@ enum skl_disp_power_wells {
|
||||
#define DSI_PLL_N1_DIV_MASK (3 << 16)
|
||||
#define DSI_PLL_M1_DIV_SHIFT 0
|
||||
#define DSI_PLL_M1_DIV_MASK (0x1ff << 0)
|
||||
#define CCK_CZ_CLOCK_CONTROL 0x62
|
||||
#define CCK_DISPLAY_CLOCK_CONTROL 0x6b
|
||||
#define DISPLAY_TRUNK_FORCE_ON (1 << 17)
|
||||
#define DISPLAY_TRUNK_FORCE_OFF (1 << 16)
|
||||
#define DISPLAY_FREQUENCY_STATUS (0x1f << 8)
|
||||
#define DISPLAY_FREQUENCY_STATUS_SHIFT 8
|
||||
#define DISPLAY_FREQUENCY_VALUES (0x1f << 0)
|
||||
#define CCK_TRUNK_FORCE_ON (1 << 17)
|
||||
#define CCK_TRUNK_FORCE_OFF (1 << 16)
|
||||
#define CCK_FREQUENCY_STATUS (0x1f << 8)
|
||||
#define CCK_FREQUENCY_STATUS_SHIFT 8
|
||||
#define CCK_FREQUENCY_VALUES (0x1f << 0)
|
||||
|
||||
/**
|
||||
* DOC: DPIO
|
||||
@ -1395,7 +1400,8 @@ enum skl_disp_power_wells {
|
||||
#define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \
|
||||
_PORT_TX_DW3_LN0_B, \
|
||||
_PORT_TX_DW3_LN0_C)
|
||||
#define UNIQE_TRANGE_EN_METHOD (1 << 27)
|
||||
#define SCALE_DCOMP_METHOD (1 << 26)
|
||||
#define UNIQUE_TRANGE_EN_METHOD (1 << 27)
|
||||
|
||||
#define _PORT_TX_DW4_LN0_A 0x162510
|
||||
#define _PORT_TX_DW4_LN0_B 0x6C510
|
||||
@ -1436,9 +1442,15 @@ enum skl_disp_power_wells {
|
||||
|
||||
/*
|
||||
* Fence registers
|
||||
* [0-7] @ 0x2000 gen2,gen3
|
||||
* [8-15] @ 0x3000 945,g33,pnv
|
||||
*
|
||||
* [0-15] @ 0x3000 gen4,gen5
|
||||
*
|
||||
* [0-15] @ 0x100000 gen6,vlv,chv
|
||||
* [0-31] @ 0x100000 gen7+
|
||||
*/
|
||||
#define FENCE_REG_830_0 0x2000
|
||||
#define FENCE_REG_945_8 0x3000
|
||||
#define FENCE_REG(i) (0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4)
|
||||
#define I830_FENCE_START_MASK 0x07f80000
|
||||
#define I830_FENCE_TILING_Y_SHIFT 12
|
||||
#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
|
||||
@ -1451,14 +1463,16 @@ enum skl_disp_power_wells {
|
||||
#define I915_FENCE_START_MASK 0x0ff00000
|
||||
#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
|
||||
|
||||
#define FENCE_REG_965_0 0x03000
|
||||
#define FENCE_REG_965_LO(i) (0x03000 + (i) * 8)
|
||||
#define FENCE_REG_965_HI(i) (0x03000 + (i) * 8 + 4)
|
||||
#define I965_FENCE_PITCH_SHIFT 2
|
||||
#define I965_FENCE_TILING_Y_SHIFT 1
|
||||
#define I965_FENCE_REG_VALID (1<<0)
|
||||
#define I965_FENCE_MAX_PITCH_VAL 0x0400
|
||||
|
||||
#define FENCE_REG_SANDYBRIDGE_0 0x100000
|
||||
#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
|
||||
#define FENCE_REG_GEN6_LO(i) (0x100000 + (i) * 8)
|
||||
#define FENCE_REG_GEN6_HI(i) (0x100000 + (i) * 8 + 4)
|
||||
#define GEN6_FENCE_PITCH_SHIFT 32
|
||||
#define GEN7_FENCE_MAX_PITCH_VAL 0x0800
|
||||
|
||||
|
||||
@ -1542,7 +1556,8 @@ enum skl_disp_power_wells {
|
||||
#define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3)
|
||||
#define RING_FAULT_VALID (1<<0)
|
||||
#define DONE_REG 0x40b0
|
||||
#define GEN8_PRIVATE_PAT 0x40e0
|
||||
#define GEN8_PRIVATE_PAT_LO 0x40e0
|
||||
#define GEN8_PRIVATE_PAT_HI (0x40e0 + 4)
|
||||
#define BSD_HWS_PGA_GEN7 (0x04180)
|
||||
#define BLT_HWS_PGA_GEN7 (0x04280)
|
||||
#define VEBOX_HWS_PGA_GEN7 (0x04380)
|
||||
@ -1582,14 +1597,17 @@ enum skl_disp_power_wells {
|
||||
#endif
|
||||
#define IPEIR_I965 0x02064
|
||||
#define IPEHR_I965 0x02068
|
||||
#define INSTDONE_I965 0x0206c
|
||||
#define GEN7_INSTDONE_1 0x0206c
|
||||
#define GEN7_SC_INSTDONE 0x07100
|
||||
#define GEN7_SAMPLER_INSTDONE 0x0e160
|
||||
#define GEN7_ROW_INSTDONE 0x0e164
|
||||
#define I915_NUM_INSTDONE_REG 4
|
||||
#define RING_IPEIR(base) ((base)+0x64)
|
||||
#define RING_IPEHR(base) ((base)+0x68)
|
||||
/*
|
||||
* On GEN4, only the render ring INSTDONE exists and has a different
|
||||
* layout than the GEN7+ version.
|
||||
* The GEN2 counterpart of this register is GEN2_INSTDONE.
|
||||
*/
|
||||
#define RING_INSTDONE(base) ((base)+0x6c)
|
||||
#define RING_INSTPS(base) ((base)+0x70)
|
||||
#define RING_DMA_FADD(base) ((base)+0x78)
|
||||
@ -1597,7 +1615,7 @@ enum skl_disp_power_wells {
|
||||
#define RING_INSTPM(base) ((base)+0xc0)
|
||||
#define RING_MI_MODE(base) ((base)+0x9c)
|
||||
#define INSTPS 0x02070 /* 965+ only */
|
||||
#define INSTDONE1 0x0207c /* 965+ only */
|
||||
#define GEN4_INSTDONE1 0x0207c /* 965+ only, aka INSTDONE_2 on SNB */
|
||||
#define ACTHD_I965 0x02074
|
||||
#define HWS_PGA 0x02080
|
||||
#define HWS_ADDRESS_MASK 0xfffff000
|
||||
@ -1606,7 +1624,7 @@ enum skl_disp_power_wells {
|
||||
#define PWRCTX_EN (1<<0)
|
||||
#define IPEIR 0x02088
|
||||
#define IPEHR 0x0208c
|
||||
#define INSTDONE 0x02090
|
||||
#define GEN2_INSTDONE 0x02090
|
||||
#define NOPID 0x02094
|
||||
#define HWSTAM 0x02098
|
||||
#define DMA_FADD_I8XX 0x020d0
|
||||
@ -1876,12 +1894,27 @@ enum skl_disp_power_wells {
|
||||
#define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT)
|
||||
|
||||
#define GEN8_FUSE2 0x9120
|
||||
#define GEN8_F2_SS_DIS_SHIFT 21
|
||||
#define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT)
|
||||
#define GEN8_F2_S_ENA_SHIFT 25
|
||||
#define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT)
|
||||
|
||||
#define GEN9_F2_SS_DIS_SHIFT 20
|
||||
#define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT)
|
||||
|
||||
#define GEN8_EU_DISABLE0 0x9134
|
||||
#define GEN8_EU_DIS0_S0_MASK 0xffffff
|
||||
#define GEN8_EU_DIS0_S1_SHIFT 24
|
||||
#define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT)
|
||||
|
||||
#define GEN8_EU_DISABLE1 0x9138
|
||||
#define GEN8_EU_DIS1_S1_MASK 0xffff
|
||||
#define GEN8_EU_DIS1_S2_SHIFT 16
|
||||
#define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT)
|
||||
|
||||
#define GEN8_EU_DISABLE2 0x913c
|
||||
#define GEN8_EU_DIS2_S2_MASK 0xff
|
||||
|
||||
#define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4)
|
||||
|
||||
#define GEN6_BSD_SLEEP_PSMI_CONTROL 0x12050
|
||||
@ -2475,8 +2508,8 @@ enum skl_disp_power_wells {
|
||||
#define PALETTE_A_OFFSET 0xa000
|
||||
#define PALETTE_B_OFFSET 0xa800
|
||||
#define CHV_PALETTE_C_OFFSET 0xc000
|
||||
#define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \
|
||||
dev_priv->info.display_mmio_offset)
|
||||
#define PALETTE(pipe, i) (dev_priv->info.palette_offsets[pipe] + \
|
||||
dev_priv->info.display_mmio_offset + (i) * 4)
|
||||
|
||||
/* MCH MMIO space */
|
||||
|
||||
@ -2807,8 +2840,11 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
|
||||
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
|
||||
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
|
||||
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
|
||||
INTERVAL_1_33_US(us) : \
|
||||
(IS_BROXTON(dev_priv) ? \
|
||||
INTERVAL_0_833_US(us) : \
|
||||
INTERVAL_1_33_US(us)) : \
|
||||
INTERVAL_1_28_US(us))
|
||||
|
||||
/*
|
||||
@ -3264,7 +3300,9 @@ enum skl_disp_power_wells {
|
||||
#define GEN3_SDVOC 0x61160
|
||||
#define GEN4_HDMIB GEN3_SDVOB
|
||||
#define GEN4_HDMIC GEN3_SDVOC
|
||||
#define CHV_HDMID 0x6116C
|
||||
#define VLV_HDMIB (VLV_DISPLAY_BASE + GEN4_HDMIB)
|
||||
#define VLV_HDMIC (VLV_DISPLAY_BASE + GEN4_HDMIC)
|
||||
#define CHV_HDMID (VLV_DISPLAY_BASE + 0x6116C)
|
||||
#define PCH_SDVOB 0xe1140
|
||||
#define PCH_HDMIB PCH_SDVOB
|
||||
#define PCH_HDMIC 0xe1150
|
||||
@ -3596,17 +3634,29 @@ enum skl_disp_power_wells {
|
||||
#define UTIL_PIN_CTL 0x48400
|
||||
#define UTIL_PIN_ENABLE (1 << 31)
|
||||
|
||||
#define UTIL_PIN_PIPE(x) ((x) << 29)
|
||||
#define UTIL_PIN_PIPE_MASK (3 << 29)
|
||||
#define UTIL_PIN_MODE_PWM (1 << 24)
|
||||
#define UTIL_PIN_MODE_MASK (0xf << 24)
|
||||
#define UTIL_PIN_POLARITY (1 << 22)
|
||||
|
||||
/* BXT backlight register definition. */
|
||||
#define BXT_BLC_PWM_CTL1 0xC8250
|
||||
#define _BXT_BLC_PWM_CTL1 0xC8250
|
||||
#define BXT_BLC_PWM_ENABLE (1 << 31)
|
||||
#define BXT_BLC_PWM_POLARITY (1 << 29)
|
||||
#define BXT_BLC_PWM_FREQ1 0xC8254
|
||||
#define BXT_BLC_PWM_DUTY1 0xC8258
|
||||
#define _BXT_BLC_PWM_FREQ1 0xC8254
|
||||
#define _BXT_BLC_PWM_DUTY1 0xC8258
|
||||
|
||||
#define BXT_BLC_PWM_CTL2 0xC8350
|
||||
#define BXT_BLC_PWM_FREQ2 0xC8354
|
||||
#define BXT_BLC_PWM_DUTY2 0xC8358
|
||||
#define _BXT_BLC_PWM_CTL2 0xC8350
|
||||
#define _BXT_BLC_PWM_FREQ2 0xC8354
|
||||
#define _BXT_BLC_PWM_DUTY2 0xC8358
|
||||
|
||||
#define BXT_BLC_PWM_CTL(controller) _PIPE(controller, \
|
||||
_BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2)
|
||||
#define BXT_BLC_PWM_FREQ(controller) _PIPE(controller, \
|
||||
_BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2)
|
||||
#define BXT_BLC_PWM_DUTY(controller) _PIPE(controller, \
|
||||
_BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2)
|
||||
|
||||
#define PCH_GTC_CTL 0xe7000
|
||||
#define PCH_GTC_ENABLE (1 << 31)
|
||||
@ -4093,6 +4143,10 @@ enum skl_disp_power_wells {
|
||||
#define DP_C 0x64200
|
||||
#define DP_D 0x64300
|
||||
|
||||
#define VLV_DP_B (VLV_DISPLAY_BASE + DP_B)
|
||||
#define VLV_DP_C (VLV_DISPLAY_BASE + DP_C)
|
||||
#define CHV_DP_D (VLV_DISPLAY_BASE + DP_D)
|
||||
|
||||
#define DP_PORT_EN (1 << 31)
|
||||
#define DP_PIPEB_SELECT (1 << 30)
|
||||
#define DP_PIPE_MASK (1 << 30)
|
||||
@ -5631,7 +5685,7 @@ enum skl_disp_power_wells {
|
||||
/* legacy palette */
|
||||
#define _LGC_PALETTE_A 0x4a000
|
||||
#define _LGC_PALETTE_B 0x4a800
|
||||
#define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B)
|
||||
#define LGC_PALETTE(pipe, i) (_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
|
||||
|
||||
#define _GAMMA_MODE_A 0x4a480
|
||||
#define _GAMMA_MODE_B 0x4ac80
|
||||
@ -6868,6 +6922,9 @@ enum skl_disp_power_wells {
|
||||
#define GEN6_RC6 3
|
||||
#define GEN6_RC7 4
|
||||
|
||||
#define GEN8_GT_SLICE_INFO 0x138064
|
||||
#define GEN8_LSLICESTAT_MASK 0x7
|
||||
|
||||
#define CHV_POWER_SS0_SIG1 0xa720
|
||||
#define CHV_POWER_SS1_SIG1 0xa728
|
||||
#define CHV_SS_PG_ENABLE (1<<1)
|
||||
@ -7403,8 +7460,8 @@ enum skl_disp_power_wells {
|
||||
#define DPLL_CFGCR2_PDIV_7 (4<<2)
|
||||
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
|
||||
|
||||
#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
|
||||
#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
|
||||
#define DPLL_CFGCR1(id) (DPLL1_CFGCR1 + ((id) - SKL_DPLL1) * 8)
|
||||
#define DPLL_CFGCR2(id) (DPLL1_CFGCR2 + ((id) - SKL_DPLL1) * 8)
|
||||
|
||||
/* BXT display engine PLL */
|
||||
#define BXT_DE_PLL_CTL 0x6d000
|
||||
@ -7509,6 +7566,68 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
|
||||
|
||||
/* BXT MIPI clock controls */
|
||||
#define BXT_MAX_VAR_OUTPUT_KHZ 39500
|
||||
|
||||
#define BXT_MIPI_CLOCK_CTL 0x46090
|
||||
#define BXT_MIPI1_DIV_SHIFT 26
|
||||
#define BXT_MIPI2_DIV_SHIFT 10
|
||||
#define BXT_MIPI_DIV_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \
|
||||
BXT_MIPI2_DIV_SHIFT)
|
||||
/* Var clock divider to generate TX source. Result must be < 39.5 M */
|
||||
#define BXT_MIPI1_ESCLK_VAR_DIV_MASK (0x3F << 26)
|
||||
#define BXT_MIPI2_ESCLK_VAR_DIV_MASK (0x3F << 10)
|
||||
#define BXT_MIPI_ESCLK_VAR_DIV_MASK(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \
|
||||
BXT_MIPI2_ESCLK_VAR_DIV_MASK)
|
||||
|
||||
#define BXT_MIPI_ESCLK_VAR_DIV(port, val) \
|
||||
(val << BXT_MIPI_DIV_SHIFT(port))
|
||||
/* TX control divider to select actual TX clock output from (8x/var) */
|
||||
#define BXT_MIPI1_TX_ESCLK_SHIFT 21
|
||||
#define BXT_MIPI2_TX_ESCLK_SHIFT 5
|
||||
#define BXT_MIPI_TX_ESCLK_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \
|
||||
BXT_MIPI2_TX_ESCLK_SHIFT)
|
||||
#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (3 << 21)
|
||||
#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (3 << 5)
|
||||
#define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \
|
||||
BXT_MIPI2_TX_ESCLK_FIXDIV_MASK)
|
||||
#define BXT_MIPI_TX_ESCLK_8XDIV_BY2(port) \
|
||||
(0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_TX_ESCLK_8XDIV_BY4(port) \
|
||||
(0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_TX_ESCLK_8XDIV_BY8(port) \
|
||||
(0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port))
|
||||
/* RX control divider to select actual RX clock output from 8x*/
|
||||
#define BXT_MIPI1_RX_ESCLK_SHIFT 19
|
||||
#define BXT_MIPI2_RX_ESCLK_SHIFT 3
|
||||
#define BXT_MIPI_RX_ESCLK_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \
|
||||
BXT_MIPI2_RX_ESCLK_SHIFT)
|
||||
#define BXT_MIPI1_RX_ESCLK_FIXDIV_MASK (3 << 19)
|
||||
#define BXT_MIPI2_RX_ESCLK_FIXDIV_MASK (3 << 3)
|
||||
#define BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port) \
|
||||
(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_RX_ESCLK_8X_BY2(port) \
|
||||
(1 << BXT_MIPI_RX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_RX_ESCLK_8X_BY3(port) \
|
||||
(2 << BXT_MIPI_RX_ESCLK_SHIFT(port))
|
||||
#define BXT_MIPI_RX_ESCLK_8X_BY4(port) \
|
||||
(3 << BXT_MIPI_RX_ESCLK_SHIFT(port))
|
||||
/* BXT-A WA: Always prog DPHY dividers to 00 */
|
||||
#define BXT_MIPI1_DPHY_DIV_SHIFT 16
|
||||
#define BXT_MIPI2_DPHY_DIV_SHIFT 0
|
||||
#define BXT_MIPI_DPHY_DIV_SHIFT(port) \
|
||||
_MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \
|
||||
BXT_MIPI2_DPHY_DIV_SHIFT)
|
||||
#define BXT_MIPI_1_DPHY_DIVIDER_MASK (3 << 16)
|
||||
#define BXT_MIPI_2_DPHY_DIVIDER_MASK (3 << 0)
|
||||
#define BXT_MIPI_DPHY_DIVIDER_MASK(port) \
|
||||
(3 << BXT_MIPI_DPHY_DIV_SHIFT(port))
|
||||
|
||||
/* BXT MIPI mode configure */
|
||||
#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
|
||||
#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
|
||||
@ -7550,6 +7669,13 @@ enum skl_disp_power_wells {
|
||||
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
|
||||
#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
|
||||
#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
|
||||
|
||||
/* BXT port control */
|
||||
#define _BXT_MIPIA_PORT_CTRL 0x6B0C0
|
||||
#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
|
||||
#define BXT_MIPI_PORT_CTRL(tc) _MIPI_PORT(tc, _BXT_MIPIA_PORT_CTRL, \
|
||||
_BXT_MIPIC_PORT_CTRL)
|
||||
|
||||
#define DPI_ENABLE (1 << 31) /* A + C */
|
||||
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
|
||||
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
|
||||
|
@ -39,7 +39,7 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u64 raw_time; /* 32b value may overflow during fixed point math */
|
||||
u64 units = 128ULL, div = 100000ULL, bias = 100ULL;
|
||||
u64 units = 128ULL, div = 100000ULL;
|
||||
u32 ret;
|
||||
|
||||
if (!intel_enable_rc6(dev))
|
||||
@ -49,41 +49,19 @@ static u32 calc_residency(struct drm_device *dev, const u32 reg)
|
||||
|
||||
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
u32 clk_reg, czcount_30ns;
|
||||
|
||||
if (IS_CHERRYVIEW(dev))
|
||||
clk_reg = CHV_CLK_CTL1;
|
||||
else
|
||||
clk_reg = VLV_CLK_CTL2;
|
||||
|
||||
czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT;
|
||||
|
||||
if (!czcount_30ns) {
|
||||
WARN(!czcount_30ns, "bogus CZ count value");
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) {
|
||||
/* Special case for 320Mhz */
|
||||
div = 10000000ULL;
|
||||
units = 3125ULL;
|
||||
} else {
|
||||
czcount_30ns += 1;
|
||||
div = 1000000ULL;
|
||||
units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns);
|
||||
}
|
||||
units = 1;
|
||||
div = dev_priv->czclk_freq;
|
||||
|
||||
if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
|
||||
units <<= 8;
|
||||
|
||||
div = div * bias;
|
||||
} else if (IS_BROXTON(dev)) {
|
||||
units = 1;
|
||||
div = 1200; /* 833.33ns */
|
||||
}
|
||||
|
||||
raw_time = I915_READ(reg) * units;
|
||||
ret = DIV_ROUND_UP_ULL(raw_time, div);
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
@ -107,6 +107,26 @@ TRACE_EVENT(i915_gem_object_create,
|
||||
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_gem_shrink,
|
||||
TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags),
|
||||
TP_ARGS(i915, target, flags),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, dev)
|
||||
__field(unsigned long, target)
|
||||
__field(unsigned, flags)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = i915->dev->primary->index;
|
||||
__entry->target = target;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("dev=%d, target=%lu, flags=%x",
|
||||
__entry->dev, __entry->target, __entry->flags)
|
||||
);
|
||||
|
||||
TRACE_EVENT(i915_vma_bind,
|
||||
TP_PROTO(struct i915_vma *vma, unsigned flags),
|
||||
TP_ARGS(vma, flags),
|
||||
|
@ -94,6 +94,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
|
||||
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
|
||||
|
||||
crtc_state->update_pipe = false;
|
||||
crtc_state->disable_lp_wm = false;
|
||||
|
||||
return &crtc_state->base;
|
||||
}
|
||||
|
@ -50,6 +50,11 @@
|
||||
* co-operation between the graphics and audio drivers is handled via audio
|
||||
* related registers. (The notable exception is the power management, not
|
||||
* covered here.)
|
||||
*
|
||||
* The struct i915_audio_component is used to interact between the graphics
|
||||
* and audio drivers. The struct i915_audio_component_ops *ops in it is
|
||||
* defined in graphics driver and called in audio driver. The
|
||||
* struct i915_audio_component_audio_ops *audio_ops is called from i915 driver.
|
||||
*/
|
||||
|
||||
static const struct {
|
||||
@ -68,18 +73,44 @@ static const struct {
|
||||
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
|
||||
};
|
||||
|
||||
/* HDMI N/CTS table */
|
||||
#define TMDS_297M 297000
|
||||
#define TMDS_296M DIV_ROUND_UP(297000 * 1000, 1001)
|
||||
static const struct {
|
||||
int sample_rate;
|
||||
int clock;
|
||||
int n;
|
||||
int cts;
|
||||
} aud_ncts[] = {
|
||||
{ 44100, TMDS_296M, 4459, 234375 },
|
||||
{ 44100, TMDS_297M, 4704, 247500 },
|
||||
{ 48000, TMDS_296M, 5824, 281250 },
|
||||
{ 48000, TMDS_297M, 5120, 247500 },
|
||||
{ 32000, TMDS_296M, 5824, 421875 },
|
||||
{ 32000, TMDS_297M, 3072, 222750 },
|
||||
{ 88200, TMDS_296M, 8918, 234375 },
|
||||
{ 88200, TMDS_297M, 9408, 247500 },
|
||||
{ 96000, TMDS_296M, 11648, 281250 },
|
||||
{ 96000, TMDS_297M, 10240, 247500 },
|
||||
{ 176400, TMDS_296M, 17836, 234375 },
|
||||
{ 176400, TMDS_297M, 18816, 247500 },
|
||||
{ 192000, TMDS_296M, 23296, 281250 },
|
||||
{ 192000, TMDS_297M, 20480, 247500 },
|
||||
};
|
||||
|
||||
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
|
||||
static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
|
||||
static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
|
||||
if (mode->clock == hdmi_audio_clock[i].clock)
|
||||
if (adjusted_mode->crtc_clock == hdmi_audio_clock[i].clock)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
|
||||
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
|
||||
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
|
||||
adjusted_mode->crtc_clock);
|
||||
i = 1;
|
||||
}
|
||||
|
||||
@ -90,6 +121,45 @@ static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
|
||||
return hdmi_audio_clock[i].config;
|
||||
}
|
||||
|
||||
static int audio_config_get_n(const struct drm_display_mode *mode, int rate)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) {
|
||||
if ((rate == aud_ncts[i].sample_rate) &&
|
||||
(mode->clock == aud_ncts[i].clock)) {
|
||||
return aud_ncts[i].n;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static uint32_t audio_config_setup_n_reg(int n, uint32_t val)
|
||||
{
|
||||
int n_low, n_up;
|
||||
uint32_t tmp = val;
|
||||
|
||||
n_low = n & 0xfff;
|
||||
n_up = (n >> 12) & 0xff;
|
||||
tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK);
|
||||
tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) |
|
||||
(n_low << AUD_CONFIG_LOWER_N_SHIFT) |
|
||||
AUD_CONFIG_N_PROG_ENABLE);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/* check whether N/CTS/M need be set manually */
|
||||
static bool audio_rate_need_prog(struct intel_crtc *crtc,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
if (((mode->clock == TMDS_297M) ||
|
||||
(mode->clock == TMDS_296M)) &&
|
||||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool intel_eld_uptodate(struct drm_connector *connector,
|
||||
int reg_eldv, uint32_t bits_eldv,
|
||||
int reg_elda, uint32_t bits_elda,
|
||||
@ -138,7 +208,7 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder)
|
||||
|
||||
static void g4x_audio_codec_enable(struct drm_connector *connector,
|
||||
struct intel_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
uint8_t *eld = connector->eld;
|
||||
@ -184,6 +254,8 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
|
||||
|
||||
DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
|
||||
|
||||
mutex_lock(&dev_priv->av_mutex);
|
||||
|
||||
/* Disable timestamps */
|
||||
tmp = I915_READ(HSW_AUD_CFG(pipe));
|
||||
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
|
||||
@ -199,22 +271,31 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
|
||||
tmp &= ~AUDIO_ELD_VALID(pipe);
|
||||
tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
|
||||
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
|
||||
|
||||
mutex_unlock(&dev_priv->av_mutex);
|
||||
}
|
||||
|
||||
static void hsw_audio_codec_enable(struct drm_connector *connector,
|
||||
struct intel_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
struct i915_audio_component *acomp = dev_priv->audio_component;
|
||||
const uint8_t *eld = connector->eld;
|
||||
struct intel_digital_port *intel_dig_port =
|
||||
enc_to_dig_port(&encoder->base);
|
||||
enum port port = intel_dig_port->port;
|
||||
uint32_t tmp;
|
||||
int len, i;
|
||||
int n, rate;
|
||||
|
||||
DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
|
||||
pipe_name(pipe), drm_eld_size(eld));
|
||||
|
||||
mutex_lock(&dev_priv->av_mutex);
|
||||
|
||||
/* Enable audio presence detect, invalidate ELD */
|
||||
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
|
||||
tmp |= AUDIO_OUTPUT_ENABLE(pipe);
|
||||
@ -246,13 +327,32 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
|
||||
/* Enable timestamps */
|
||||
tmp = I915_READ(HSW_AUD_CFG(pipe));
|
||||
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
|
||||
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
|
||||
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
else
|
||||
tmp |= audio_config_hdmi_pixel_clock(mode);
|
||||
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
|
||||
|
||||
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
|
||||
if (audio_rate_need_prog(intel_crtc, adjusted_mode)) {
|
||||
if (!acomp)
|
||||
rate = 0;
|
||||
else if (port >= PORT_A && port <= PORT_E)
|
||||
rate = acomp->aud_sample_rate[port];
|
||||
else {
|
||||
DRM_ERROR("invalid port: %d\n", port);
|
||||
rate = 0;
|
||||
}
|
||||
n = audio_config_get_n(adjusted_mode, rate);
|
||||
if (n != 0)
|
||||
tmp = audio_config_setup_n_reg(n, tmp);
|
||||
else
|
||||
DRM_DEBUG_KMS("no suitable N value is found\n");
|
||||
}
|
||||
|
||||
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
|
||||
|
||||
mutex_unlock(&dev_priv->av_mutex);
|
||||
}
|
||||
|
||||
static void ilk_audio_codec_disable(struct intel_encoder *encoder)
|
||||
@ -304,7 +404,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
|
||||
|
||||
static void ilk_audio_codec_enable(struct drm_connector *connector,
|
||||
struct intel_encoder *encoder,
|
||||
struct drm_display_mode *mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
@ -381,7 +481,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
|
||||
if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
tmp |= AUD_CONFIG_N_VALUE_INDEX;
|
||||
else
|
||||
tmp |= audio_config_hdmi_pixel_clock(mode);
|
||||
tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
|
||||
I915_WRITE(aud_config, tmp);
|
||||
}
|
||||
|
||||
@ -396,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
|
||||
struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
struct drm_connector *connector;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -419,10 +519,11 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
|
||||
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
|
||||
connector->eld[5] |= (1 << 2);
|
||||
|
||||
connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
|
||||
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
|
||||
|
||||
if (dev_priv->display.audio_codec_enable)
|
||||
dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
|
||||
dev_priv->display.audio_codec_enable(connector, intel_encoder,
|
||||
adjusted_mode);
|
||||
|
||||
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
|
||||
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port);
|
||||
@ -527,12 +628,91 @@ static int i915_audio_component_get_cdclk_freq(struct device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_audio_component_sync_audio_rate(struct device *dev,
|
||||
int port, int rate)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev_to_i915(dev);
|
||||
struct drm_device *drm_dev = dev_priv->dev;
|
||||
struct intel_encoder *intel_encoder;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
struct intel_crtc *crtc;
|
||||
struct drm_display_mode *mode;
|
||||
struct i915_audio_component *acomp = dev_priv->audio_component;
|
||||
enum pipe pipe = -1;
|
||||
u32 tmp;
|
||||
int n;
|
||||
|
||||
/* HSW, BDW SKL need this fix */
|
||||
if (!IS_SKYLAKE(dev_priv) &&
|
||||
!IS_BROADWELL(dev_priv) &&
|
||||
!IS_HASWELL(dev_priv))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&dev_priv->av_mutex);
|
||||
/* 1. get the pipe */
|
||||
for_each_intel_encoder(drm_dev, intel_encoder) {
|
||||
if (intel_encoder->type != INTEL_OUTPUT_HDMI)
|
||||
continue;
|
||||
intel_dig_port = enc_to_dig_port(&intel_encoder->base);
|
||||
if (port == intel_dig_port->port) {
|
||||
crtc = to_intel_crtc(intel_encoder->base.crtc);
|
||||
if (!crtc) {
|
||||
DRM_DEBUG_KMS("%s: crtc is NULL\n", __func__);
|
||||
continue;
|
||||
}
|
||||
pipe = crtc->pipe;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe == INVALID_PIPE) {
|
||||
DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port));
|
||||
mutex_unlock(&dev_priv->av_mutex);
|
||||
return -ENODEV;
|
||||
}
|
||||
DRM_DEBUG_KMS("pipe %c connects port %c\n",
|
||||
pipe_name(pipe), port_name(port));
|
||||
mode = &crtc->config->base.adjusted_mode;
|
||||
|
||||
/* port must be valid now, otherwise the pipe will be invalid */
|
||||
acomp->aud_sample_rate[port] = rate;
|
||||
|
||||
/* 2. check whether to set the N/CTS/M manually or not */
|
||||
if (!audio_rate_need_prog(crtc, mode)) {
|
||||
tmp = I915_READ(HSW_AUD_CFG(pipe));
|
||||
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
|
||||
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
|
||||
mutex_unlock(&dev_priv->av_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
n = audio_config_get_n(mode, rate);
|
||||
if (n == 0) {
|
||||
DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n",
|
||||
port_name(port));
|
||||
tmp = I915_READ(HSW_AUD_CFG(pipe));
|
||||
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
|
||||
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
|
||||
mutex_unlock(&dev_priv->av_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* 3. set the N/CTS/M */
|
||||
tmp = I915_READ(HSW_AUD_CFG(pipe));
|
||||
tmp = audio_config_setup_n_reg(n, tmp);
|
||||
I915_WRITE(HSW_AUD_CFG(pipe), tmp);
|
||||
|
||||
mutex_unlock(&dev_priv->av_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct i915_audio_component_ops i915_audio_component_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.get_power = i915_audio_component_get_power,
|
||||
.put_power = i915_audio_component_put_power,
|
||||
.codec_wake_override = i915_audio_component_codec_wake_override,
|
||||
.get_cdclk_freq = i915_audio_component_get_cdclk_freq,
|
||||
.sync_audio_rate = i915_audio_component_sync_audio_rate,
|
||||
};
|
||||
|
||||
static int i915_audio_component_bind(struct device *i915_dev,
|
||||
@ -540,6 +720,7 @@ static int i915_audio_component_bind(struct device *i915_dev,
|
||||
{
|
||||
struct i915_audio_component *acomp = data;
|
||||
struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
|
||||
int i;
|
||||
|
||||
if (WARN_ON(acomp->ops || acomp->dev))
|
||||
return -EEXIST;
|
||||
@ -547,6 +728,9 @@ static int i915_audio_component_bind(struct device *i915_dev,
|
||||
drm_modeset_lock_all(dev_priv->dev);
|
||||
acomp->ops = &i915_audio_component_ops;
|
||||
acomp->dev = i915_dev;
|
||||
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
|
||||
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
|
||||
acomp->aud_sample_rate[i] = 0;
|
||||
dev_priv->audio_component = acomp;
|
||||
drm_modeset_unlock_all(dev_priv->dev);
|
||||
|
||||
|
@ -158,7 +158,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crt *crt = intel_encoder_to_crt(encoder);
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
u32 adpa;
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5)
|
||||
@ -890,7 +890,7 @@ void intel_crt_init(struct drm_device *dev)
|
||||
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
|
||||
FDI_RX_LINK_REVERSAL_OVERRIDE;
|
||||
|
||||
dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
|
||||
dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
|
||||
}
|
||||
|
||||
intel_crt_reset(connector);
|
||||
|
@ -265,6 +265,15 @@ void intel_csr_load_program(struct drm_device *dev)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Firmware gets lost on S3/S4, but not when entering system
|
||||
* standby or suspend-to-idle (which is just like forced runtime pm).
|
||||
* Unfortunately the ACPI subsystem doesn't yet give us a way to
|
||||
* differentiate this, hence figure it out with this hack.
|
||||
*/
|
||||
if (I915_READ(CSR_PROGRAM(0)))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->csr_lock);
|
||||
fw_size = dev_priv->csr.dmc_fw_size;
|
||||
for (i = 0; i < fw_size; i++)
|
||||
|
@ -256,9 +256,6 @@ struct bxt_ddi_buf_trans {
|
||||
bool default_index; /* true if the entry represents default value */
|
||||
};
|
||||
|
||||
/* BSpec does not define separate vswing/pre-emphasis values for eDP.
|
||||
* Using DP values for eDP as well.
|
||||
*/
|
||||
static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
|
||||
/* Idx NT mV diff db */
|
||||
{ 52, 0x9A, 0, 128, true }, /* 0: 400 0 */
|
||||
@ -273,6 +270,20 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = {
|
||||
{ 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */
|
||||
};
|
||||
|
||||
static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = {
|
||||
/* Idx NT mV diff db */
|
||||
{ 26, 0, 0, 128, false }, /* 0: 200 0 */
|
||||
{ 38, 0, 0, 112, false }, /* 1: 200 1.5 */
|
||||
{ 48, 0, 0, 96, false }, /* 2: 200 4 */
|
||||
{ 54, 0, 0, 69, false }, /* 3: 200 6 */
|
||||
{ 32, 0, 0, 128, false }, /* 4: 250 0 */
|
||||
{ 48, 0, 0, 104, false }, /* 5: 250 1.5 */
|
||||
{ 54, 0, 0, 85, false }, /* 6: 250 4 */
|
||||
{ 43, 0, 0, 128, false }, /* 7: 300 0 */
|
||||
{ 54, 0, 0, 101, false }, /* 8: 300 1.5 */
|
||||
{ 48, 0, 0, 128, false }, /* 9: 300 0 */
|
||||
};
|
||||
|
||||
/* BSpec has 2 recommended values - entries 0 and 8.
|
||||
* Using the entry with higher vswing.
|
||||
*/
|
||||
@ -298,21 +309,26 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
|
||||
enum port *port)
|
||||
{
|
||||
struct drm_encoder *encoder = &intel_encoder->base;
|
||||
int type = intel_encoder->type;
|
||||
|
||||
if (type == INTEL_OUTPUT_DP_MST) {
|
||||
switch (intel_encoder->type) {
|
||||
case INTEL_OUTPUT_DP_MST:
|
||||
*dig_port = enc_to_mst(encoder)->primary;
|
||||
*port = (*dig_port)->port;
|
||||
} else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
|
||||
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
|
||||
break;
|
||||
case INTEL_OUTPUT_DISPLAYPORT:
|
||||
case INTEL_OUTPUT_EDP:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
case INTEL_OUTPUT_UNKNOWN:
|
||||
*dig_port = enc_to_dig_port(encoder);
|
||||
*port = (*dig_port)->port;
|
||||
} else if (type == INTEL_OUTPUT_ANALOG) {
|
||||
break;
|
||||
case INTEL_OUTPUT_ANALOG:
|
||||
*dig_port = NULL;
|
||||
*port = PORT_E;
|
||||
} else {
|
||||
DRM_ERROR("Invalid DDI encoder type %d\n", type);
|
||||
BUG();
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -542,8 +558,10 @@ void intel_prepare_ddi(struct drm_device *dev)
|
||||
enum port port;
|
||||
bool supports_hdmi;
|
||||
|
||||
ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
|
||||
if (intel_encoder->type == INTEL_OUTPUT_DSI)
|
||||
continue;
|
||||
|
||||
ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
|
||||
if (visited[port])
|
||||
continue;
|
||||
|
||||
@ -592,7 +610,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
||||
*
|
||||
* WaFDIAutoLinkSetTimingOverrride:hsw
|
||||
*/
|
||||
I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
|
||||
I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) |
|
||||
FDI_RX_PWRDN_LANE0_VAL(2) |
|
||||
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
|
||||
|
||||
@ -600,13 +618,13 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
||||
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
|
||||
FDI_RX_PLL_ENABLE |
|
||||
FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
|
||||
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
|
||||
POSTING_READ(_FDI_RXA_CTL);
|
||||
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
|
||||
POSTING_READ(FDI_RX_CTL(PIPE_A));
|
||||
udelay(220);
|
||||
|
||||
/* Switch from Rawclk to PCDclk */
|
||||
rx_ctl_val |= FDI_PCDCLK;
|
||||
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
|
||||
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
|
||||
|
||||
/* Configure Port Clock Select */
|
||||
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel);
|
||||
@ -635,21 +653,21 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
||||
udelay(600);
|
||||
|
||||
/* Program PCH FDI Receiver TU */
|
||||
I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
|
||||
I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
|
||||
|
||||
/* Enable PCH FDI Receiver with auto-training */
|
||||
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
|
||||
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
|
||||
POSTING_READ(_FDI_RXA_CTL);
|
||||
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
|
||||
POSTING_READ(FDI_RX_CTL(PIPE_A));
|
||||
|
||||
/* Wait for FDI receiver lane calibration */
|
||||
udelay(30);
|
||||
|
||||
/* Unset FDI_RX_MISC pwrdn lanes */
|
||||
temp = I915_READ(_FDI_RXA_MISC);
|
||||
temp = I915_READ(FDI_RX_MISC(PIPE_A));
|
||||
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
|
||||
I915_WRITE(_FDI_RXA_MISC, temp);
|
||||
POSTING_READ(_FDI_RXA_MISC);
|
||||
I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
|
||||
POSTING_READ(FDI_RX_MISC(PIPE_A));
|
||||
|
||||
/* Wait for FDI auto training time */
|
||||
udelay(5);
|
||||
@ -683,15 +701,15 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
|
||||
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
|
||||
|
||||
rx_ctl_val &= ~FDI_RX_ENABLE;
|
||||
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
|
||||
POSTING_READ(_FDI_RXA_CTL);
|
||||
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
|
||||
POSTING_READ(FDI_RX_CTL(PIPE_A));
|
||||
|
||||
/* Reset FDI_RX_MISC pwrdn lanes */
|
||||
temp = I915_READ(_FDI_RXA_MISC);
|
||||
temp = I915_READ(FDI_RX_MISC(PIPE_A));
|
||||
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
|
||||
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
|
||||
I915_WRITE(_FDI_RXA_MISC, temp);
|
||||
POSTING_READ(_FDI_RXA_MISC);
|
||||
I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
|
||||
POSTING_READ(FDI_RX_MISC(PIPE_A));
|
||||
}
|
||||
|
||||
DRM_ERROR("FDI link training failed!\n");
|
||||
@ -953,8 +971,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
|
||||
uint32_t cfgcr1_val, cfgcr2_val;
|
||||
uint32_t p0, p1, p2, dco_freq;
|
||||
|
||||
cfgcr1_reg = GET_CFG_CR1_REG(dpll);
|
||||
cfgcr2_reg = GET_CFG_CR2_REG(dpll);
|
||||
cfgcr1_reg = DPLL_CFGCR1(dpll);
|
||||
cfgcr2_reg = DPLL_CFGCR2(dpll);
|
||||
|
||||
cfgcr1_val = I915_READ(cfgcr1_reg);
|
||||
cfgcr2_val = I915_READ(cfgcr2_reg);
|
||||
@ -2027,7 +2045,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
|
||||
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
|
||||
{
|
||||
struct drm_crtc *crtc = &intel_crtc->base;
|
||||
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
|
||||
enum port port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
@ -2112,7 +2131,11 @@ static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
|
||||
u32 n_entries, i;
|
||||
uint32_t val;
|
||||
|
||||
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
|
||||
if (type == INTEL_OUTPUT_EDP && dev_priv->edp_low_vswing) {
|
||||
n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
|
||||
ddi_translations = bxt_ddi_translations_edp;
|
||||
} else if (type == INTEL_OUTPUT_DISPLAYPORT
|
||||
|| type == INTEL_OUTPUT_EDP) {
|
||||
n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
|
||||
ddi_translations = bxt_ddi_translations_dp;
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
@ -2150,9 +2173,13 @@ static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
|
||||
I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val);
|
||||
|
||||
val = I915_READ(BXT_PORT_TX_DW3_LN0(port));
|
||||
val &= ~UNIQE_TRANGE_EN_METHOD;
|
||||
val &= ~SCALE_DCOMP_METHOD;
|
||||
if (ddi_translations[level].enable)
|
||||
val |= UNIQE_TRANGE_EN_METHOD;
|
||||
val |= SCALE_DCOMP_METHOD;
|
||||
|
||||
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
|
||||
DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
|
||||
|
||||
I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val);
|
||||
|
||||
val = I915_READ(BXT_PORT_TX_DW4_LN0(port));
|
||||
@ -2293,7 +2320,6 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
|
||||
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
} else if (type == INTEL_OUTPUT_HDMI) {
|
||||
@ -2480,20 +2506,20 @@ static const struct skl_dpll_regs skl_dpll_regs[3] = {
|
||||
{
|
||||
/* DPLL 1 */
|
||||
.ctl = LCPLL2_CTL,
|
||||
.cfgcr1 = DPLL1_CFGCR1,
|
||||
.cfgcr2 = DPLL1_CFGCR2,
|
||||
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL1),
|
||||
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL1),
|
||||
},
|
||||
{
|
||||
/* DPLL 2 */
|
||||
.ctl = WRPLL_CTL1,
|
||||
.cfgcr1 = DPLL2_CFGCR1,
|
||||
.cfgcr2 = DPLL2_CFGCR2,
|
||||
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL2),
|
||||
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL2),
|
||||
},
|
||||
{
|
||||
/* DPLL 3 */
|
||||
.ctl = WRPLL_CTL2,
|
||||
.cfgcr1 = DPLL3_CFGCR1,
|
||||
.cfgcr2 = DPLL3_CFGCR2,
|
||||
.cfgcr1 = DPLL_CFGCR1(SKL_DPLL3),
|
||||
.cfgcr2 = DPLL_CFGCR2(SKL_DPLL3),
|
||||
},
|
||||
};
|
||||
|
||||
@ -2999,22 +3025,22 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
|
||||
|
||||
intel_ddi_post_disable(intel_encoder);
|
||||
|
||||
val = I915_READ(_FDI_RXA_CTL);
|
||||
val = I915_READ(FDI_RX_CTL(PIPE_A));
|
||||
val &= ~FDI_RX_ENABLE;
|
||||
I915_WRITE(_FDI_RXA_CTL, val);
|
||||
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
|
||||
|
||||
val = I915_READ(_FDI_RXA_MISC);
|
||||
val = I915_READ(FDI_RX_MISC(PIPE_A));
|
||||
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
|
||||
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
|
||||
I915_WRITE(_FDI_RXA_MISC, val);
|
||||
I915_WRITE(FDI_RX_MISC(PIPE_A), val);
|
||||
|
||||
val = I915_READ(_FDI_RXA_CTL);
|
||||
val = I915_READ(FDI_RX_CTL(PIPE_A));
|
||||
val &= ~FDI_PCDCLK;
|
||||
I915_WRITE(_FDI_RXA_CTL, val);
|
||||
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
|
||||
|
||||
val = I915_READ(_FDI_RXA_CTL);
|
||||
val = I915_READ(FDI_RX_CTL(PIPE_A));
|
||||
val &= ~FDI_RX_PLL_ENABLE;
|
||||
I915_WRITE(_FDI_RXA_CTL, val);
|
||||
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
|
||||
}
|
||||
|
||||
void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
|
@ -132,6 +132,42 @@ struct intel_limit {
|
||||
intel_p2_t p2;
|
||||
};
|
||||
|
||||
/* returns HPLL frequency in kHz */
|
||||
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
|
||||
|
||||
/* Obtain SKU information */
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
|
||||
CCK_FUSE_HPLL_FREQ_MASK;
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
return vco_freq[hpll_freq] * 1000;
|
||||
}
|
||||
|
||||
static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
|
||||
const char *name, u32 reg)
|
||||
{
|
||||
u32 val;
|
||||
int divider;
|
||||
|
||||
if (dev_priv->hpll_freq == 0)
|
||||
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
val = vlv_cck_read(dev_priv, reg);
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
divider = val & CCK_FREQUENCY_VALUES;
|
||||
|
||||
WARN((val & CCK_FREQUENCY_STATUS) !=
|
||||
(divider << CCK_FREQUENCY_STATUS_SHIFT),
|
||||
"%s change in progress\n", name);
|
||||
|
||||
return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
|
||||
}
|
||||
|
||||
int
|
||||
intel_pch_rawclk(struct drm_device *dev)
|
||||
{
|
||||
@ -175,6 +211,17 @@ int intel_hrawclk(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_update_czclk(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!IS_VALLEYVIEW(dev_priv))
|
||||
return;
|
||||
|
||||
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
|
||||
CCK_CZ_CLOCK_CONTROL);
|
||||
|
||||
DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
|
||||
}
|
||||
|
||||
static inline u32 /* units of 100MHz */
|
||||
intel_fdi_link_freq(struct drm_device *dev)
|
||||
{
|
||||
@ -1295,7 +1342,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
|
||||
bool cur_state;
|
||||
|
||||
if (IS_845G(dev) || IS_I865G(dev))
|
||||
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
|
||||
cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
|
||||
else
|
||||
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
|
||||
|
||||
@ -2003,9 +2050,9 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
|
||||
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
|
||||
|
||||
/* Workaround: set timing override bit. */
|
||||
val = I915_READ(_TRANSA_CHICKEN2);
|
||||
val = I915_READ(TRANS_CHICKEN2(PIPE_A));
|
||||
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
|
||||
I915_WRITE(_TRANSA_CHICKEN2, val);
|
||||
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
|
||||
|
||||
val = TRANS_ENABLE;
|
||||
pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
|
||||
@ -2063,9 +2110,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
|
||||
DRM_ERROR("Failed to disable PCH transcoder\n");
|
||||
|
||||
/* Workaround: clear timing override bit. */
|
||||
val = I915_READ(_TRANSA_CHICKEN2);
|
||||
val = I915_READ(TRANS_CHICKEN2(PIPE_A));
|
||||
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
|
||||
I915_WRITE(_TRANSA_CHICKEN2, val);
|
||||
I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2498,6 +2545,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
|
||||
struct drm_framebuffer *fb = &plane_config->fb->base;
|
||||
@ -2510,6 +2558,12 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
|
||||
if (plane_config->size == 0)
|
||||
return false;
|
||||
|
||||
/* If the FB is too big, just don't use it since fbdev is not very
|
||||
* important and we should probably use that space with FBC or other
|
||||
* features. */
|
||||
if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
|
||||
return false;
|
||||
|
||||
obj = i915_gem_object_create_stolen_for_preallocated(dev,
|
||||
base_aligned,
|
||||
base_aligned,
|
||||
@ -3077,27 +3131,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
|
||||
fb->pixel_format);
|
||||
surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
|
||||
|
||||
/*
|
||||
* FIXME: intel_plane_state->src, dst aren't set when transitional
|
||||
* update_plane helpers are called from legacy paths.
|
||||
* Once full atomic crtc is available, below check can be avoided.
|
||||
*/
|
||||
if (drm_rect_width(&plane_state->src)) {
|
||||
scaler_id = plane_state->scaler_id;
|
||||
src_x = plane_state->src.x1 >> 16;
|
||||
src_y = plane_state->src.y1 >> 16;
|
||||
src_w = drm_rect_width(&plane_state->src) >> 16;
|
||||
src_h = drm_rect_height(&plane_state->src) >> 16;
|
||||
dst_x = plane_state->dst.x1;
|
||||
dst_y = plane_state->dst.y1;
|
||||
dst_w = drm_rect_width(&plane_state->dst);
|
||||
dst_h = drm_rect_height(&plane_state->dst);
|
||||
WARN_ON(drm_rect_width(&plane_state->src) == 0);
|
||||
|
||||
WARN_ON(x != src_x || y != src_y);
|
||||
} else {
|
||||
src_w = intel_crtc->config->pipe_src_w;
|
||||
src_h = intel_crtc->config->pipe_src_h;
|
||||
}
|
||||
scaler_id = plane_state->scaler_id;
|
||||
src_x = plane_state->src.x1 >> 16;
|
||||
src_y = plane_state->src.y1 >> 16;
|
||||
src_w = drm_rect_width(&plane_state->src) >> 16;
|
||||
src_h = drm_rect_height(&plane_state->src) >> 16;
|
||||
dst_x = plane_state->dst.x1;
|
||||
dst_y = plane_state->dst.y1;
|
||||
dst_w = drm_rect_width(&plane_state->dst);
|
||||
dst_h = drm_rect_height(&plane_state->dst);
|
||||
|
||||
WARN_ON(x != src_x || y != src_y);
|
||||
|
||||
if (intel_rotation_90_or_270(rotation)) {
|
||||
/* stride = Surface height in tiles */
|
||||
@ -4392,8 +4438,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *state)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&state->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
|
||||
|
||||
DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
|
||||
intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
|
||||
@ -4401,7 +4446,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
|
||||
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
|
||||
&state->scaler_state.scaler_id, DRM_ROTATE_0,
|
||||
state->pipe_src_w, state->pipe_src_h,
|
||||
adjusted_mode->hdisplay, adjusted_mode->vdisplay);
|
||||
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -4594,7 +4639,6 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
int palreg = PALETTE(pipe);
|
||||
int i;
|
||||
bool reenable_ips = false;
|
||||
|
||||
@ -4609,10 +4653,6 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
|
||||
assert_pll_enabled(dev_priv, pipe);
|
||||
}
|
||||
|
||||
/* use legacy palette for Ironlake */
|
||||
if (!HAS_GMCH_DISPLAY(dev))
|
||||
palreg = LGC_PALETTE(pipe);
|
||||
|
||||
/* Workaround : Do not read or write the pipe palette/gamma data while
|
||||
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
|
||||
*/
|
||||
@ -4624,7 +4664,14 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
I915_WRITE(palreg + 4 * i,
|
||||
u32 palreg;
|
||||
|
||||
if (HAS_GMCH_DISPLAY(dev))
|
||||
palreg = PALETTE(pipe, i);
|
||||
else
|
||||
palreg = LGC_PALETTE(pipe, i);
|
||||
|
||||
I915_WRITE(palreg,
|
||||
(intel_crtc->lut_r[i] << 16) |
|
||||
(intel_crtc->lut_g[i] << 8) |
|
||||
intel_crtc->lut_b[i]);
|
||||
@ -4757,7 +4804,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
|
||||
struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_plane *plane;
|
||||
|
||||
if (atomic->wait_vblank)
|
||||
intel_wait_for_vblank(dev, crtc->pipe);
|
||||
@ -4776,10 +4822,6 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
|
||||
if (atomic->post_enable_primary)
|
||||
intel_post_enable_primary(&crtc->base);
|
||||
|
||||
drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks)
|
||||
intel_update_sprite_watermarks(plane, &crtc->base,
|
||||
0, 0, 0, false, false);
|
||||
|
||||
memset(atomic, 0, sizeof(*atomic));
|
||||
}
|
||||
|
||||
@ -4922,6 +4964,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||
int pipe = intel_crtc->pipe, hsw_workaround_pipe;
|
||||
struct intel_crtc_state *pipe_config =
|
||||
to_intel_crtc_state(crtc->state);
|
||||
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
|
||||
|
||||
if (WARN_ON(intel_crtc->active))
|
||||
return;
|
||||
@ -4951,9 +4994,12 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||
intel_crtc->active = true;
|
||||
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
if (encoder->pre_pll_enable)
|
||||
encoder->pre_pll_enable(encoder);
|
||||
if (encoder->pre_enable)
|
||||
encoder->pre_enable(encoder);
|
||||
}
|
||||
|
||||
if (intel_crtc->config->has_pch_encoder) {
|
||||
intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
|
||||
@ -4961,7 +5007,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||
dev_priv->display.fdi_link_train(crtc);
|
||||
}
|
||||
|
||||
intel_ddi_enable_pipe_clock(intel_crtc);
|
||||
if (!is_dsi)
|
||||
intel_ddi_enable_pipe_clock(intel_crtc);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
skylake_pfit_enable(intel_crtc);
|
||||
@ -4975,7 +5022,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||
intel_crtc_load_lut(crtc);
|
||||
|
||||
intel_ddi_set_pipe_settings(crtc);
|
||||
intel_ddi_enable_transcoder_func(crtc);
|
||||
if (!is_dsi)
|
||||
intel_ddi_enable_transcoder_func(crtc);
|
||||
|
||||
intel_update_watermarks(crtc);
|
||||
intel_enable_pipe(intel_crtc);
|
||||
@ -4983,7 +5031,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
|
||||
if (intel_crtc->config->has_pch_encoder)
|
||||
lpt_pch_enable(crtc);
|
||||
|
||||
if (intel_crtc->config->dp_encoder_is_mst)
|
||||
if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
|
||||
intel_ddi_set_vc_payload_alloc(crtc, true);
|
||||
|
||||
assert_vblank_disabled(crtc);
|
||||
@ -5067,9 +5115,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
|
||||
|
||||
ironlake_fdi_pll_disable(intel_crtc);
|
||||
}
|
||||
|
||||
intel_crtc->active = false;
|
||||
intel_update_watermarks(crtc);
|
||||
}
|
||||
|
||||
static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||
@ -5079,6 +5124,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
struct intel_encoder *encoder;
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
|
||||
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder) {
|
||||
intel_opregion_notify_encoder(encoder, false);
|
||||
@ -5096,14 +5142,16 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||
if (intel_crtc->config->dp_encoder_is_mst)
|
||||
intel_ddi_set_vc_payload_alloc(crtc, false);
|
||||
|
||||
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
|
||||
if (!is_dsi)
|
||||
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 9)
|
||||
skylake_scaler_disable(intel_crtc);
|
||||
else
|
||||
ironlake_pfit_disable(intel_crtc, false);
|
||||
|
||||
intel_ddi_disable_pipe_clock(intel_crtc);
|
||||
if (!is_dsi)
|
||||
intel_ddi_disable_pipe_clock(intel_crtc);
|
||||
|
||||
if (intel_crtc->config->has_pch_encoder) {
|
||||
lpt_disable_pch_transcoder(dev_priv);
|
||||
@ -5113,9 +5161,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
|
||||
for_each_encoder_on_crtc(dev, crtc, encoder)
|
||||
if (encoder->post_disable)
|
||||
encoder->post_disable(encoder);
|
||||
|
||||
intel_crtc->active = false;
|
||||
intel_update_watermarks(crtc);
|
||||
}
|
||||
|
||||
static void i9xx_pfit_enable(struct intel_crtc *crtc)
|
||||
@ -5709,10 +5754,16 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
|
||||
if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
|
||||
DRM_ERROR("DBuf power disable timeout\n");
|
||||
|
||||
/* disable DPLL0 */
|
||||
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
|
||||
if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
|
||||
DRM_ERROR("Couldn't disable DPLL0\n");
|
||||
/*
|
||||
* DMC assumes ownership of LCPLL and will get confused if we touch it.
|
||||
*/
|
||||
if (dev_priv->csr.dmc_payload) {
|
||||
/* disable DPLL0 */
|
||||
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) &
|
||||
~LCPLL_PLL_ENABLE);
|
||||
if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
|
||||
DRM_ERROR("Couldn't disable DPLL0\n");
|
||||
}
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
|
||||
}
|
||||
@ -5749,20 +5800,6 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
|
||||
DRM_ERROR("DBuf power enable timeout\n");
|
||||
}
|
||||
|
||||
/* returns HPLL frequency in kHz */
|
||||
static int valleyview_get_vco(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
|
||||
|
||||
/* Obtain SKU information */
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
|
||||
CCK_FUSE_HPLL_FREQ_MASK;
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
return vco_freq[hpll_freq] * 1000;
|
||||
}
|
||||
|
||||
/* Adjust CDclk dividers to allow high res or save power if possible */
|
||||
static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
|
||||
{
|
||||
@ -5800,12 +5837,12 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
|
||||
|
||||
/* adjust cdclk divider */
|
||||
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
|
||||
val &= ~DISPLAY_FREQUENCY_VALUES;
|
||||
val &= ~CCK_FREQUENCY_VALUES;
|
||||
val |= divider;
|
||||
vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
|
||||
|
||||
if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
|
||||
DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
|
||||
CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
|
||||
50))
|
||||
DRM_ERROR("timed out waiting for CDclk change\n");
|
||||
}
|
||||
@ -5983,7 +6020,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
|
||||
else
|
||||
default_credits = PFI_CREDIT(8);
|
||||
|
||||
if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) {
|
||||
if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
|
||||
/* CHV suggested value is 31 or 63 */
|
||||
if (IS_CHERRYVIEW(dev_priv))
|
||||
credits = PFI_CREDIT_63;
|
||||
@ -6214,9 +6251,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
|
||||
|
||||
if (!IS_GEN2(dev))
|
||||
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
|
||||
|
||||
intel_crtc->active = false;
|
||||
intel_update_watermarks(crtc);
|
||||
}
|
||||
|
||||
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
||||
@ -6236,6 +6270,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
||||
|
||||
intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
intel_crtc->active = false;
|
||||
intel_update_watermarks(crtc);
|
||||
intel_disable_shared_dpll(intel_crtc);
|
||||
|
||||
domains = intel_crtc->enabled_power_domains;
|
||||
@ -6472,7 +6508,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
int lane, link_bw, fdi_dotclock, ret;
|
||||
bool needs_recompute = false;
|
||||
|
||||
@ -6551,7 +6587,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
|
||||
/* FIXME should check pixel clock limits on all platforms */
|
||||
if (INTEL_INFO(dev)->gen < 4) {
|
||||
@ -6588,7 +6624,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
|
||||
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
|
||||
*/
|
||||
if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
|
||||
adjusted_mode->hsync_start == adjusted_mode->hdisplay)
|
||||
adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
|
||||
return -EINVAL;
|
||||
|
||||
if (HAS_IPS(dev))
|
||||
@ -6715,24 +6751,8 @@ static int haswell_get_display_clock_speed(struct drm_device *dev)
|
||||
|
||||
static int valleyview_get_display_clock_speed(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 val;
|
||||
int divider;
|
||||
|
||||
if (dev_priv->hpll_freq == 0)
|
||||
dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->sb_lock);
|
||||
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
divider = val & DISPLAY_FREQUENCY_VALUES;
|
||||
|
||||
WARN((val & DISPLAY_FREQUENCY_STATUS) !=
|
||||
(divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
|
||||
"cdclk change in progress\n");
|
||||
|
||||
return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
|
||||
return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
|
||||
CCK_DISPLAY_CLOCK_CONTROL);
|
||||
}
|
||||
|
||||
static int ilk_get_display_clock_speed(struct drm_device *dev)
|
||||
@ -7619,8 +7639,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
enum pipe pipe = intel_crtc->pipe;
|
||||
enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
uint32_t crtc_vtotal, crtc_vblank_end;
|
||||
int vsyncshift = 0;
|
||||
|
||||
@ -9884,13 +9903,13 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
|
||||
/* On these chipsets we can only modify the base/size/stride
|
||||
* whilst the cursor is disabled.
|
||||
*/
|
||||
I915_WRITE(_CURACNTR, 0);
|
||||
POSTING_READ(_CURACNTR);
|
||||
I915_WRITE(CURCNTR(PIPE_A), 0);
|
||||
POSTING_READ(CURCNTR(PIPE_A));
|
||||
intel_crtc->cursor_cntl = 0;
|
||||
}
|
||||
|
||||
if (intel_crtc->cursor_base != base) {
|
||||
I915_WRITE(_CURABASE, base);
|
||||
I915_WRITE(CURBASE(PIPE_A), base);
|
||||
intel_crtc->cursor_base = base;
|
||||
}
|
||||
|
||||
@ -9900,8 +9919,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
|
||||
}
|
||||
|
||||
if (intel_crtc->cursor_cntl != cntl) {
|
||||
I915_WRITE(_CURACNTR, cntl);
|
||||
POSTING_READ(_CURACNTR);
|
||||
I915_WRITE(CURCNTR(PIPE_A), cntl);
|
||||
POSTING_READ(CURCNTR(PIPE_A));
|
||||
intel_crtc->cursor_cntl = cntl;
|
||||
}
|
||||
}
|
||||
@ -11558,18 +11577,32 @@ retry:
|
||||
static bool intel_wm_need_update(struct drm_plane *plane,
|
||||
struct drm_plane_state *state)
|
||||
{
|
||||
/* Update watermarks on tiling changes. */
|
||||
struct intel_plane_state *new = to_intel_plane_state(state);
|
||||
struct intel_plane_state *cur = to_intel_plane_state(plane->state);
|
||||
|
||||
/* Update watermarks on tiling or size changes. */
|
||||
if (!plane->state->fb || !state->fb ||
|
||||
plane->state->fb->modifier[0] != state->fb->modifier[0] ||
|
||||
plane->state->rotation != state->rotation)
|
||||
return true;
|
||||
|
||||
if (plane->state->crtc_w != state->crtc_w)
|
||||
plane->state->rotation != state->rotation ||
|
||||
drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
|
||||
drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
|
||||
drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
|
||||
drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool needs_scaling(struct intel_plane_state *state)
|
||||
{
|
||||
int src_w = drm_rect_width(&state->src) >> 16;
|
||||
int src_h = drm_rect_height(&state->src) >> 16;
|
||||
int dst_w = drm_rect_width(&state->dst);
|
||||
int dst_h = drm_rect_height(&state->dst);
|
||||
|
||||
return (src_w != dst_w || src_h != dst_h);
|
||||
}
|
||||
|
||||
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
struct drm_plane_state *plane_state)
|
||||
{
|
||||
@ -11585,7 +11618,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
bool mode_changed = needs_modeset(crtc_state);
|
||||
bool was_crtc_enabled = crtc->state->active;
|
||||
bool is_crtc_enabled = crtc_state->active;
|
||||
|
||||
bool turn_off, turn_on, visible, was_visible;
|
||||
struct drm_framebuffer *fb = plane_state->fb;
|
||||
|
||||
@ -11703,11 +11735,23 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
|
||||
case DRM_PLANE_TYPE_CURSOR:
|
||||
break;
|
||||
case DRM_PLANE_TYPE_OVERLAY:
|
||||
if (turn_off && !mode_changed) {
|
||||
/*
|
||||
* WaCxSRDisabledForSpriteScaling:ivb
|
||||
*
|
||||
* cstate->update_wm was already set above, so this flag will
|
||||
* take effect when we commit and program watermarks.
|
||||
*/
|
||||
if (IS_IVYBRIDGE(dev) &&
|
||||
needs_scaling(to_intel_plane_state(plane_state)) &&
|
||||
!needs_scaling(old_plane_state)) {
|
||||
to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
|
||||
} else if (turn_off && !mode_changed) {
|
||||
intel_crtc->atomic.wait_vblank = true;
|
||||
intel_crtc->atomic.update_sprite_watermarks |=
|
||||
1 << i;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -12571,8 +12615,8 @@ static void check_wm_state(struct drm_device *dev)
|
||||
}
|
||||
|
||||
/* cursor */
|
||||
hw_entry = &hw_ddb.cursor[pipe];
|
||||
sw_entry = &sw_ddb->cursor[pipe];
|
||||
hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
|
||||
sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
|
||||
|
||||
if (skl_ddb_entry_equal(hw_entry, sw_entry))
|
||||
continue;
|
||||
@ -12815,11 +12859,11 @@ static void update_scanline_offset(struct intel_crtc *crtc)
|
||||
* one to the value.
|
||||
*/
|
||||
if (IS_GEN2(dev)) {
|
||||
const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
int vtotal;
|
||||
|
||||
vtotal = mode->crtc_vtotal;
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vtotal = adjusted_mode->crtc_vtotal;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vtotal /= 2;
|
||||
|
||||
crtc->scanline_offset = vtotal - 1;
|
||||
@ -13323,8 +13367,6 @@ static void intel_shared_dpll_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
intel_update_cdclk(dev);
|
||||
|
||||
if (HAS_DDI(dev))
|
||||
intel_ddi_pll_init(dev);
|
||||
else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
|
||||
@ -13983,7 +14025,7 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
* On SKL pre-D0 the strap isn't connected, so we assume
|
||||
* it's there.
|
||||
*/
|
||||
found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
|
||||
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
|
||||
/* WaIgnoreDDIAStrap: skl */
|
||||
if (found || IS_SKYLAKE(dev))
|
||||
intel_ddi_init(dev, PORT_A);
|
||||
@ -14044,29 +14086,26 @@ static void intel_setup_outputs(struct drm_device *dev)
|
||||
* eDP ports. Consult the VBT as well as DP_DETECTED to
|
||||
* detect eDP ports.
|
||||
*/
|
||||
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED &&
|
||||
if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
|
||||
!intel_dp_is_edp(dev, PORT_B))
|
||||
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
|
||||
PORT_B);
|
||||
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
|
||||
intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
|
||||
if (I915_READ(VLV_DP_B) & DP_DETECTED ||
|
||||
intel_dp_is_edp(dev, PORT_B))
|
||||
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
|
||||
intel_dp_init(dev, VLV_DP_B, PORT_B);
|
||||
|
||||
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED &&
|
||||
if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
|
||||
!intel_dp_is_edp(dev, PORT_C))
|
||||
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
|
||||
PORT_C);
|
||||
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
|
||||
intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
|
||||
if (I915_READ(VLV_DP_C) & DP_DETECTED ||
|
||||
intel_dp_is_edp(dev, PORT_C))
|
||||
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
|
||||
intel_dp_init(dev, VLV_DP_C, PORT_C);
|
||||
|
||||
if (IS_CHERRYVIEW(dev)) {
|
||||
if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
|
||||
intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
|
||||
PORT_D);
|
||||
/* eDP not supported on port D, so don't check VBT */
|
||||
if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
|
||||
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
|
||||
if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
|
||||
intel_hdmi_init(dev, CHV_HDMID, PORT_D);
|
||||
if (I915_READ(CHV_DP_D) & DP_DETECTED)
|
||||
intel_dp_init(dev, CHV_DP_D, PORT_D);
|
||||
}
|
||||
|
||||
intel_dsi_init(dev);
|
||||
@ -14557,8 +14596,6 @@ static void intel_init_display(struct drm_device *dev)
|
||||
dev_priv->display.queue_flip = intel_default_queue_flip;
|
||||
}
|
||||
|
||||
intel_panel_init_backlight_funcs(dev);
|
||||
|
||||
mutex_init(&dev_priv->pps_mutex);
|
||||
}
|
||||
|
||||
@ -14836,6 +14873,9 @@ void intel_modeset_init(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
intel_update_czclk(dev_priv);
|
||||
intel_update_cdclk(dev);
|
||||
|
||||
intel_shared_dpll_init(dev);
|
||||
|
||||
/* Just disable it once at startup */
|
||||
@ -15124,11 +15164,15 @@ static bool primary_get_hw_state(struct intel_plane *plane)
|
||||
/* FIXME read out full plane state for all planes */
|
||||
static void readout_plane_state(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_plane *primary = crtc->base.primary;
|
||||
struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(crtc->base.primary->state);
|
||||
to_intel_plane_state(primary->state);
|
||||
|
||||
plane_state->visible =
|
||||
primary_get_hw_state(to_intel_plane(crtc->base.primary));
|
||||
primary_get_hw_state(to_intel_plane(primary));
|
||||
|
||||
if (plane_state->visible)
|
||||
crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
|
||||
}
|
||||
|
||||
static void intel_modeset_readout_hw_state(struct drm_device *dev)
|
||||
|
@ -1587,7 +1587,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
enum port port = dp_to_dig_port(intel_dp)->port;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
|
||||
intel_dp_set_link_params(intel_dp, crtc->config);
|
||||
|
||||
@ -2604,7 +2604,6 @@ static void intel_enable_dp(struct intel_encoder *encoder)
|
||||
|
||||
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
|
||||
if (crtc->config->has_audio) {
|
||||
@ -3417,11 +3416,6 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
}
|
||||
|
||||
/* LRC Bypass */
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
|
||||
val |= DPIO_LRC_BYPASS;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
|
||||
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
return 0;
|
||||
@ -3696,8 +3690,8 @@ static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
|
||||
}
|
||||
|
||||
/* Enable corresponding port and start training pattern 1 */
|
||||
void
|
||||
intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
static void
|
||||
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
@ -3810,8 +3804,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
intel_dp->DP = DP;
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
static void
|
||||
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_device *dev = dig_port->base.base.dev;
|
||||
@ -3864,7 +3858,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
if (!drm_dp_clock_recovery_ok(link_status,
|
||||
intel_dp->lane_count)) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp, &DP,
|
||||
training_pattern |
|
||||
DP_LINK_SCRAMBLING_DISABLE);
|
||||
@ -3881,7 +3875,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
/* Try 5 times, then try clock recovery if that fails */
|
||||
if (tries > 5) {
|
||||
intel_dp->train_set_valid = false;
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_set_link_train(intel_dp, &DP,
|
||||
training_pattern |
|
||||
DP_LINK_SCRAMBLING_DISABLE);
|
||||
@ -3914,6 +3908,13 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp)
|
||||
DP_TRAINING_PATTERN_DISABLE);
|
||||
}
|
||||
|
||||
void
|
||||
intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
{
|
||||
intel_dp_link_training_clock_recovery(intel_dp);
|
||||
intel_dp_link_training_channel_equalization(intel_dp);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
{
|
||||
@ -4382,7 +4383,6 @@ go_again:
|
||||
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
|
||||
DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
}
|
||||
|
||||
@ -4473,7 +4473,6 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
|
||||
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
|
||||
intel_encoder->base.name);
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
}
|
||||
}
|
||||
@ -6000,7 +5999,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
}
|
||||
|
||||
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
|
||||
intel_connector->panel.backlight_power = intel_edp_backlight_power;
|
||||
intel_connector->panel.backlight.power = intel_edp_backlight_power;
|
||||
intel_panel_setup_backlight(connector, pipe);
|
||||
|
||||
return true;
|
||||
@ -6169,10 +6168,8 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
||||
return;
|
||||
|
||||
intel_connector = intel_connector_alloc();
|
||||
if (!intel_connector) {
|
||||
kfree(intel_dig_port);
|
||||
return;
|
||||
}
|
||||
if (!intel_connector)
|
||||
goto err_connector_alloc;
|
||||
|
||||
intel_encoder = &intel_dig_port->base;
|
||||
encoder = &intel_encoder->base;
|
||||
@ -6220,11 +6217,18 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
|
||||
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
dev_priv->hotplug.irq_port[port] = intel_dig_port;
|
||||
|
||||
if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(intel_dig_port);
|
||||
kfree(intel_connector);
|
||||
}
|
||||
if (!intel_dp_init_connector(intel_dig_port, intel_connector))
|
||||
goto err_init_connector;
|
||||
|
||||
return;
|
||||
|
||||
err_init_connector:
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(intel_connector);
|
||||
err_connector_alloc:
|
||||
kfree(intel_dig_port);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void intel_dp_mst_suspend(struct drm_device *dev)
|
||||
|
@ -40,7 +40,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
struct drm_atomic_state *state;
|
||||
int bpp, i;
|
||||
int lane_count, slots;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
struct drm_connector *drm_connector;
|
||||
struct intel_connector *connector, *found = NULL;
|
||||
struct drm_connector_state *connector_state;
|
||||
@ -78,7 +78,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
|
||||
return false;
|
||||
}
|
||||
|
||||
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp);
|
||||
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
|
||||
|
||||
pipe_config->pbn = mst_pbn;
|
||||
slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
|
||||
@ -188,7 +188,6 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
|
||||
|
||||
|
||||
intel_dp_start_link_train(intel_dp);
|
||||
intel_dp_complete_link_train(intel_dp);
|
||||
intel_dp_stop_link_train(intel_dp);
|
||||
}
|
||||
|
||||
|
@ -179,12 +179,22 @@ struct intel_panel {
|
||||
bool active_low_pwm;
|
||||
|
||||
/* PWM chip */
|
||||
bool util_pin_active_low; /* bxt+ */
|
||||
u8 controller; /* bxt+ only */
|
||||
struct pwm_device *pwm;
|
||||
|
||||
struct backlight_device *device;
|
||||
} backlight;
|
||||
|
||||
void (*backlight_power)(struct intel_connector *, bool enable);
|
||||
/* Connector and platform specific backlight functions */
|
||||
int (*setup)(struct intel_connector *connector, enum pipe pipe);
|
||||
uint32_t (*get)(struct intel_connector *connector);
|
||||
void (*set)(struct intel_connector *connector, uint32_t level);
|
||||
void (*disable)(struct intel_connector *connector);
|
||||
void (*enable)(struct intel_connector *connector);
|
||||
uint32_t (*hz_to_pwm)(struct intel_connector *connector,
|
||||
uint32_t hz);
|
||||
void (*power)(struct intel_connector *, bool enable);
|
||||
} backlight;
|
||||
};
|
||||
|
||||
struct intel_connector {
|
||||
@ -458,6 +468,9 @@ struct intel_crtc_state {
|
||||
|
||||
/* w/a for waiting 2 vblanks during crtc enable */
|
||||
enum pipe hsw_workaround_pipe;
|
||||
|
||||
/* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
|
||||
bool disable_lp_wm;
|
||||
};
|
||||
|
||||
struct vlv_wm_state {
|
||||
@ -683,7 +696,7 @@ struct intel_hdmi {
|
||||
const void *frame, ssize_t len);
|
||||
void (*set_infoframes)(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode);
|
||||
const struct drm_display_mode *adjusted_mode);
|
||||
bool (*infoframe_enabled)(struct drm_encoder *encoder);
|
||||
};
|
||||
|
||||
@ -1191,7 +1204,6 @@ bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
|
||||
void intel_dp_set_link_params(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *pipe_config);
|
||||
void intel_dp_start_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_complete_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
|
||||
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
||||
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
|
||||
@ -1300,6 +1312,7 @@ int intel_connector_update_modes(struct drm_connector *connector,
|
||||
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
|
||||
void intel_attach_force_audio_property(struct drm_connector *connector);
|
||||
void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
|
||||
void intel_attach_aspect_ratio_property(struct drm_connector *connector);
|
||||
|
||||
|
||||
/* intel_overlay.c */
|
||||
@ -1332,7 +1345,6 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
|
||||
void intel_panel_enable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_disable_backlight(struct intel_connector *connector);
|
||||
void intel_panel_destroy_backlight(struct drm_connector *connector);
|
||||
void intel_panel_init_backlight_funcs(struct drm_device *dev);
|
||||
enum drm_connector_status intel_panel_detect(struct drm_device *dev);
|
||||
extern struct drm_display_mode *intel_find_panel_downclock(
|
||||
struct drm_device *dev,
|
||||
@ -1387,12 +1399,6 @@ void intel_init_clock_gating(struct drm_device *dev);
|
||||
void intel_suspend_hw(struct drm_device *dev);
|
||||
int ilk_wm_max_level(const struct drm_device *dev);
|
||||
void intel_update_watermarks(struct drm_crtc *crtc);
|
||||
void intel_update_sprite_watermarks(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc,
|
||||
uint32_t sprite_width,
|
||||
uint32_t sprite_height,
|
||||
int pixel_size,
|
||||
bool enabled, bool scaled);
|
||||
void intel_init_pm(struct drm_device *dev);
|
||||
void intel_pm_setup(struct drm_device *dev);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
|
@ -282,58 +282,46 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
||||
static void bxt_dsi_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 temp;
|
||||
u32 val;
|
||||
|
||||
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
|
||||
temp = I915_READ(VLV_CHICKEN_3);
|
||||
temp &= ~PIXEL_OVERLAP_CNT_MASK |
|
||||
intel_dsi->pixel_overlap <<
|
||||
PIXEL_OVERLAP_CNT_SHIFT;
|
||||
I915_WRITE(VLV_CHICKEN_3, temp);
|
||||
}
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
/* Exit Low power state in 4 steps*/
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
temp = I915_READ(MIPI_PORT_CTRL(port));
|
||||
temp &= ~LANE_CONFIGURATION_MASK;
|
||||
temp &= ~DUAL_LINK_MODE_MASK;
|
||||
|
||||
if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
|
||||
temp |= (intel_dsi->dual_link - 1)
|
||||
<< DUAL_LINK_MODE_SHIFT;
|
||||
temp |= intel_crtc->pipe ?
|
||||
LANE_CONFIGURATION_DUAL_LINK_B :
|
||||
LANE_CONFIGURATION_DUAL_LINK_A;
|
||||
}
|
||||
/* assert ip_tg_enable signal */
|
||||
I915_WRITE(MIPI_PORT_CTRL(port), temp | DPI_ENABLE);
|
||||
POSTING_READ(MIPI_PORT_CTRL(port));
|
||||
/* 1. Enable MIPI PHY transparent latch */
|
||||
val = I915_READ(BXT_MIPI_PORT_CTRL(port));
|
||||
I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
|
||||
usleep_range(2000, 2500);
|
||||
|
||||
/* 2. Enter ULPS */
|
||||
val = I915_READ(MIPI_DEVICE_READY(port));
|
||||
val &= ~ULPS_STATE_MASK;
|
||||
val |= (ULPS_STATE_ENTER | DEVICE_READY);
|
||||
I915_WRITE(MIPI_DEVICE_READY(port), val);
|
||||
usleep_range(2, 3);
|
||||
|
||||
/* 3. Exit ULPS */
|
||||
val = I915_READ(MIPI_DEVICE_READY(port));
|
||||
val &= ~ULPS_STATE_MASK;
|
||||
val |= (ULPS_STATE_EXIT | DEVICE_READY);
|
||||
I915_WRITE(MIPI_DEVICE_READY(port), val);
|
||||
usleep_range(1000, 1500);
|
||||
|
||||
/* Clear ULPS and set device ready */
|
||||
val = I915_READ(MIPI_DEVICE_READY(port));
|
||||
val &= ~ULPS_STATE_MASK;
|
||||
val |= DEVICE_READY;
|
||||
I915_WRITE(MIPI_DEVICE_READY(port), val);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_dsi_port_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 temp;
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
/* de-assert ip_tg_enable signal */
|
||||
temp = I915_READ(MIPI_PORT_CTRL(port));
|
||||
I915_WRITE(MIPI_PORT_CTRL(port), temp & ~DPI_ENABLE);
|
||||
POSTING_READ(MIPI_PORT_CTRL(port));
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_dsi_device_ready(struct intel_encoder *encoder)
|
||||
static void vlv_dsi_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
@ -372,6 +360,75 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_dsi_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
|
||||
if (IS_VALLEYVIEW(dev))
|
||||
vlv_dsi_device_ready(encoder);
|
||||
else if (IS_BROXTON(dev))
|
||||
bxt_dsi_device_ready(encoder);
|
||||
}
|
||||
|
||||
static void intel_dsi_port_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 temp;
|
||||
u32 port_ctrl;
|
||||
|
||||
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
|
||||
temp = I915_READ(VLV_CHICKEN_3);
|
||||
temp &= ~PIXEL_OVERLAP_CNT_MASK |
|
||||
intel_dsi->pixel_overlap <<
|
||||
PIXEL_OVERLAP_CNT_SHIFT;
|
||||
I915_WRITE(VLV_CHICKEN_3, temp);
|
||||
}
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
|
||||
MIPI_PORT_CTRL(port);
|
||||
|
||||
temp = I915_READ(port_ctrl);
|
||||
|
||||
temp &= ~LANE_CONFIGURATION_MASK;
|
||||
temp &= ~DUAL_LINK_MODE_MASK;
|
||||
|
||||
if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) {
|
||||
temp |= (intel_dsi->dual_link - 1)
|
||||
<< DUAL_LINK_MODE_SHIFT;
|
||||
temp |= intel_crtc->pipe ?
|
||||
LANE_CONFIGURATION_DUAL_LINK_B :
|
||||
LANE_CONFIGURATION_DUAL_LINK_A;
|
||||
}
|
||||
/* assert ip_tg_enable signal */
|
||||
I915_WRITE(port_ctrl, temp | DPI_ENABLE);
|
||||
POSTING_READ(port_ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_dsi_port_disable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 temp;
|
||||
u32 port_ctrl;
|
||||
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
/* de-assert ip_tg_enable signal */
|
||||
port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
|
||||
MIPI_PORT_CTRL(port);
|
||||
temp = I915_READ(port_ctrl);
|
||||
I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
|
||||
POSTING_READ(port_ctrl);
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_dsi_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
@ -419,19 +476,24 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
|
||||
|
||||
msleep(intel_dsi->panel_on_delay);
|
||||
|
||||
/* Disable DPOunit clock gating, can stall pipe
|
||||
* and we need DPLL REFA always enabled */
|
||||
tmp = I915_READ(DPLL(pipe));
|
||||
tmp |= DPLL_REF_CLK_ENABLE_VLV;
|
||||
I915_WRITE(DPLL(pipe), tmp);
|
||||
if (IS_VALLEYVIEW(dev)) {
|
||||
/*
|
||||
* Disable DPOunit clock gating, can stall pipe
|
||||
* and we need DPLL REFA always enabled
|
||||
*/
|
||||
tmp = I915_READ(DPLL(pipe));
|
||||
tmp |= DPLL_REF_CLK_ENABLE_VLV;
|
||||
I915_WRITE(DPLL(pipe), tmp);
|
||||
|
||||
/* update the hw state for DPLL */
|
||||
intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
|
||||
/* update the hw state for DPLL */
|
||||
intel_crtc->config->dpll_hw_state.dpll =
|
||||
DPLL_INTEGRATED_REF_CLK_VLV |
|
||||
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
|
||||
|
||||
tmp = I915_READ(DSPCLK_GATE_D);
|
||||
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
|
||||
I915_WRITE(DSPCLK_GATE_D, tmp);
|
||||
tmp = I915_READ(DSPCLK_GATE_D);
|
||||
tmp |= DPOUNIT_CLOCK_GATE_DISABLE;
|
||||
I915_WRITE(DSPCLK_GATE_D, tmp);
|
||||
}
|
||||
|
||||
/* put device in ready state */
|
||||
intel_dsi_device_ready(encoder);
|
||||
@ -495,12 +557,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
|
||||
/* Panel commands can be sent when clock is in LP11 */
|
||||
I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
|
||||
|
||||
temp = I915_READ(MIPI_CTRL(port));
|
||||
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
|
||||
I915_WRITE(MIPI_CTRL(port), temp |
|
||||
intel_dsi->escape_clk_div <<
|
||||
ESCAPE_CLOCK_DIVIDER_SHIFT);
|
||||
|
||||
intel_dsi_reset_clocks(encoder, port);
|
||||
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
|
||||
|
||||
temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
|
||||
@ -519,10 +576,12 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
|
||||
|
||||
static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 val;
|
||||
u32 port_ctrl = 0;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
@ -539,18 +598,22 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
|
||||
ULPS_STATE_ENTER);
|
||||
usleep_range(2000, 2500);
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
port_ctrl = BXT_MIPI_PORT_CTRL(port);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
/* Common bit for both MIPI Port A & MIPI Port C */
|
||||
port_ctrl = MIPI_PORT_CTRL(PORT_A);
|
||||
|
||||
/* Wait till Clock lanes are in LP-00 state for MIPI Port A
|
||||
* only. MIPI Port C has no similar bit for checking
|
||||
*/
|
||||
if (wait_for(((I915_READ(MIPI_PORT_CTRL(PORT_A)) & AFE_LATCHOUT)
|
||||
== 0x00000), 30))
|
||||
if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT)
|
||||
== 0x00000), 30))
|
||||
DRM_ERROR("DSI LP not going Low\n");
|
||||
|
||||
/* Disable MIPI PHY transparent latch
|
||||
* Common bit for both MIPI Port A & MIPI Port C
|
||||
*/
|
||||
val = I915_READ(MIPI_PORT_CTRL(PORT_A));
|
||||
I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD);
|
||||
/* Disable MIPI PHY transparent latch */
|
||||
val = I915_READ(port_ctrl);
|
||||
I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
|
||||
usleep_range(1000, 1500);
|
||||
|
||||
I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
|
||||
@ -593,7 +656,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
enum intel_display_power_domain power_domain;
|
||||
u32 dpi_enabled, func;
|
||||
u32 dpi_enabled, func, ctrl_reg;
|
||||
enum port port;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
@ -605,8 +668,9 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
/* XXX: this only works for one DSI output */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
func = I915_READ(MIPI_DSI_FUNC_PRG(port));
|
||||
dpi_enabled = I915_READ(MIPI_PORT_CTRL(port)) &
|
||||
DPI_ENABLE;
|
||||
ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) :
|
||||
MIPI_PORT_CTRL(port);
|
||||
dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
|
||||
|
||||
/* Due to some hardware limitations on BYT, MIPI Port C DPI
|
||||
* Enable bit does not get set. To check whether DSI Port C
|
||||
@ -631,7 +695,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
|
||||
static void intel_dsi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
u32 pclk;
|
||||
u32 pclk = 0;
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
/*
|
||||
@ -640,7 +704,11 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
|
||||
*/
|
||||
pipe_config->dpll_hw_state.dpll_md = 0;
|
||||
|
||||
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
|
||||
if (IS_BROXTON(encoder->base.dev))
|
||||
pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
|
||||
else if (IS_VALLEYVIEW(encoder->base.dev))
|
||||
pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
|
||||
|
||||
if (!pclk)
|
||||
return;
|
||||
|
||||
@ -698,7 +766,7 @@ static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
|
||||
}
|
||||
|
||||
static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -710,10 +778,10 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
|
||||
u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
|
||||
|
||||
hactive = mode->hdisplay;
|
||||
hfp = mode->hsync_start - mode->hdisplay;
|
||||
hsync = mode->hsync_end - mode->hsync_start;
|
||||
hbp = mode->htotal - mode->hsync_end;
|
||||
hactive = adjusted_mode->crtc_hdisplay;
|
||||
hfp = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_hdisplay;
|
||||
hsync = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
|
||||
hbp = adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end;
|
||||
|
||||
if (intel_dsi->dual_link) {
|
||||
hactive /= 2;
|
||||
@ -724,9 +792,9 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
hbp /= 2;
|
||||
}
|
||||
|
||||
vfp = mode->vsync_start - mode->vdisplay;
|
||||
vsync = mode->vsync_end - mode->vsync_start;
|
||||
vbp = mode->vtotal - mode->vsync_end;
|
||||
vfp = adjusted_mode->crtc_vsync_start - adjusted_mode->crtc_vdisplay;
|
||||
vsync = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
|
||||
vbp = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_end;
|
||||
|
||||
/* horizontal values are in terms of high speed byte clock */
|
||||
hactive = txbyteclkhs(hactive, bpp, lane_count,
|
||||
@ -745,11 +813,11 @@ static void set_dsi_timings(struct drm_encoder *encoder,
|
||||
* whereas these values should be based on resolution.
|
||||
*/
|
||||
I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
|
||||
mode->hdisplay);
|
||||
adjusted_mode->crtc_hdisplay);
|
||||
I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
|
||||
mode->vdisplay);
|
||||
adjusted_mode->crtc_vdisplay);
|
||||
I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
|
||||
mode->vtotal);
|
||||
adjusted_mode->crtc_vtotal);
|
||||
}
|
||||
|
||||
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
|
||||
@ -774,8 +842,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
enum port port;
|
||||
unsigned int bpp = intel_crtc->config->pipe_bpp;
|
||||
u32 val, tmp;
|
||||
@ -783,7 +850,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
|
||||
DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
|
||||
|
||||
mode_hdisplay = adjusted_mode->hdisplay;
|
||||
mode_hdisplay = adjusted_mode->crtc_hdisplay;
|
||||
|
||||
if (intel_dsi->dual_link) {
|
||||
mode_hdisplay /= 2;
|
||||
@ -833,7 +900,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
|
||||
|
||||
I915_WRITE(MIPI_DPI_RESOLUTION(port),
|
||||
adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT |
|
||||
adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT |
|
||||
mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
|
||||
}
|
||||
|
||||
@ -879,15 +946,15 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
|
||||
if (is_vid_mode(intel_dsi) &&
|
||||
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
|
||||
I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
|
||||
txbyteclkhs(adjusted_mode->htotal, bpp,
|
||||
intel_dsi->lane_count,
|
||||
intel_dsi->burst_mode_ratio) + 1);
|
||||
txbyteclkhs(adjusted_mode->crtc_htotal, bpp,
|
||||
intel_dsi->lane_count,
|
||||
intel_dsi->burst_mode_ratio) + 1);
|
||||
} else {
|
||||
I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
|
||||
txbyteclkhs(adjusted_mode->vtotal *
|
||||
adjusted_mode->htotal,
|
||||
bpp, intel_dsi->lane_count,
|
||||
intel_dsi->burst_mode_ratio) + 1);
|
||||
txbyteclkhs(adjusted_mode->crtc_vtotal *
|
||||
adjusted_mode->crtc_htotal,
|
||||
bpp, intel_dsi->lane_count,
|
||||
intel_dsi->burst_mode_ratio) + 1);
|
||||
}
|
||||
I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
|
||||
I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
|
||||
|
@ -127,6 +127,9 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
|
||||
extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
|
||||
extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
|
||||
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
|
||||
extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
|
||||
extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
|
||||
enum port port);
|
||||
|
||||
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
|
||||
|
||||
|
@ -384,6 +384,90 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
|
||||
return pclk;
|
||||
}
|
||||
|
||||
u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
|
||||
{
|
||||
u32 pclk;
|
||||
u32 dsi_clk;
|
||||
u32 dsi_ratio;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
|
||||
/* Divide by zero */
|
||||
if (!pipe_bpp) {
|
||||
DRM_ERROR("Invalid BPP(0)\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) &
|
||||
BXT_DSI_PLL_RATIO_MASK;
|
||||
|
||||
/* Invalid DSI ratio ? */
|
||||
if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
|
||||
dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
|
||||
DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
|
||||
|
||||
/* pixel_format and pipe_bpp should agree */
|
||||
assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
|
||||
|
||||
pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp);
|
||||
|
||||
DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
|
||||
return pclk;
|
||||
}
|
||||
|
||||
static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
||||
{
|
||||
u32 temp;
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
|
||||
temp = I915_READ(MIPI_CTRL(port));
|
||||
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
|
||||
I915_WRITE(MIPI_CTRL(port), temp |
|
||||
intel_dsi->escape_clk_div <<
|
||||
ESCAPE_CLOCK_DIVIDER_SHIFT);
|
||||
}
|
||||
|
||||
/* Program BXT Mipi clocks and dividers */
|
||||
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port)
|
||||
{
|
||||
u32 tmp;
|
||||
u32 divider;
|
||||
u32 dsi_rate;
|
||||
u32 pll_ratio;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Clear old configurations */
|
||||
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
|
||||
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
|
||||
|
||||
/* Get the current DSI rate(actual) */
|
||||
pll_ratio = I915_READ(BXT_DSI_PLL_CTL) &
|
||||
BXT_DSI_PLL_RATIO_MASK;
|
||||
dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
|
||||
|
||||
/* Max possible output of clock is 39.5 MHz, program value -1 */
|
||||
divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1;
|
||||
tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider);
|
||||
|
||||
/*
|
||||
* Tx escape clock must be as close to 20MHz possible, but should
|
||||
* not exceed it. Hence select divide by 2
|
||||
*/
|
||||
tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port);
|
||||
|
||||
tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port);
|
||||
|
||||
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
|
||||
}
|
||||
|
||||
static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
@ -435,6 +519,8 @@ static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
|
||||
static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
|
||||
enum port port;
|
||||
u32 val;
|
||||
|
||||
DRM_DEBUG_KMS("\n");
|
||||
@ -453,6 +539,10 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Program TX, RX, Dphy clocks */
|
||||
for_each_dsi_port(port, intel_dsi->ports)
|
||||
bxt_dsi_program_clocks(encoder->base.dev, port);
|
||||
|
||||
/* Enable DSI PLL */
|
||||
val = I915_READ(BXT_DSI_PLL_ENABLE);
|
||||
val |= BXT_DSI_PLL_DO_ENABLE;
|
||||
@ -486,3 +576,29 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder)
|
||||
else if (IS_BROXTON(dev))
|
||||
bxt_disable_dsi_pll(encoder);
|
||||
}
|
||||
|
||||
static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
||||
{
|
||||
u32 tmp;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* Clear old configurations */
|
||||
tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
|
||||
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port));
|
||||
tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port));
|
||||
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
|
||||
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
|
||||
}
|
||||
|
||||
void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
bxt_dsi_reset_clocks(encoder, port);
|
||||
else if (IS_VALLEYVIEW(dev))
|
||||
vlv_dsi_reset_clocks(encoder, port);
|
||||
}
|
||||
|
@ -97,7 +97,8 @@ struct intel_dvo {
|
||||
|
||||
struct intel_dvo_device dev;
|
||||
|
||||
struct drm_display_mode *panel_fixed_mode;
|
||||
struct intel_connector *attached_connector;
|
||||
|
||||
bool panel_wants_dither;
|
||||
};
|
||||
|
||||
@ -201,6 +202,8 @@ intel_dvo_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
to_intel_connector(connector)->panel.fixed_mode;
|
||||
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
|
||||
int target_clock = mode->clock;
|
||||
|
||||
@ -209,13 +212,13 @@ intel_dvo_mode_valid(struct drm_connector *connector,
|
||||
|
||||
/* XXX: Validate clock range */
|
||||
|
||||
if (intel_dvo->panel_fixed_mode) {
|
||||
if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay)
|
||||
if (fixed_mode) {
|
||||
if (mode->hdisplay > fixed_mode->hdisplay)
|
||||
return MODE_PANEL;
|
||||
if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay)
|
||||
if (mode->vdisplay > fixed_mode->vdisplay)
|
||||
return MODE_PANEL;
|
||||
|
||||
target_clock = intel_dvo->panel_fixed_mode->clock;
|
||||
target_clock = fixed_mode->clock;
|
||||
}
|
||||
|
||||
if (target_clock > max_dotclk)
|
||||
@ -228,6 +231,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
intel_dvo->attached_connector->panel.fixed_mode;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
|
||||
/* If we have timings from the BIOS for the panel, put them in
|
||||
@ -235,21 +240,8 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
|
||||
* with the panel scaling set up to source from the H/VDisplay
|
||||
* of the original mode.
|
||||
*/
|
||||
if (intel_dvo->panel_fixed_mode != NULL) {
|
||||
#define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x
|
||||
C(hdisplay);
|
||||
C(hsync_start);
|
||||
C(hsync_end);
|
||||
C(htotal);
|
||||
C(vdisplay);
|
||||
C(vsync_start);
|
||||
C(vsync_end);
|
||||
C(vtotal);
|
||||
C(clock);
|
||||
#undef C
|
||||
|
||||
drm_mode_set_crtcinfo(adjusted_mode, 0);
|
||||
}
|
||||
if (fixed_mode)
|
||||
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -259,7 +251,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
|
||||
int pipe = crtc->pipe;
|
||||
u32 dvo_val;
|
||||
@ -293,11 +285,11 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder)
|
||||
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
|
||||
|
||||
/*I915_WRITE(DVOB_SRCDIM,
|
||||
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
|
||||
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
|
||||
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
|
||||
(adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
|
||||
I915_WRITE(dvo_srcdim_reg,
|
||||
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
|
||||
(adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
|
||||
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
|
||||
(adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
|
||||
/*I915_WRITE(DVOB, dvo_val);*/
|
||||
I915_WRITE(dvo_reg, dvo_val);
|
||||
}
|
||||
@ -318,8 +310,9 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
|
||||
|
||||
static int intel_dvo_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
|
||||
struct drm_i915_private *dev_priv = connector->dev->dev_private;
|
||||
const struct drm_display_mode *fixed_mode =
|
||||
to_intel_connector(connector)->panel.fixed_mode;
|
||||
|
||||
/* We should probably have an i2c driver get_modes function for those
|
||||
* devices which will have a fixed set of modes determined by the chip
|
||||
@ -331,9 +324,9 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
|
||||
if (!list_empty(&connector->probed_modes))
|
||||
return 1;
|
||||
|
||||
if (intel_dvo->panel_fixed_mode != NULL) {
|
||||
if (fixed_mode) {
|
||||
struct drm_display_mode *mode;
|
||||
mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode);
|
||||
mode = drm_mode_duplicate(connector->dev, fixed_mode);
|
||||
if (mode) {
|
||||
drm_mode_probed_add(connector, mode);
|
||||
return 1;
|
||||
@ -346,6 +339,7 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
|
||||
static void intel_dvo_destroy(struct drm_connector *connector)
|
||||
{
|
||||
drm_connector_cleanup(connector);
|
||||
intel_panel_fini(&to_intel_connector(connector)->panel);
|
||||
kfree(connector);
|
||||
}
|
||||
|
||||
@ -372,8 +366,6 @@ static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
|
||||
if (intel_dvo->dev.dev_ops->destroy)
|
||||
intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
|
||||
|
||||
kfree(intel_dvo->panel_fixed_mode);
|
||||
|
||||
intel_encoder_destroy(encoder);
|
||||
}
|
||||
|
||||
@ -438,6 +430,8 @@ void intel_dvo_init(struct drm_device *dev)
|
||||
return;
|
||||
}
|
||||
|
||||
intel_dvo->attached_connector = intel_connector;
|
||||
|
||||
intel_encoder = &intel_dvo->base;
|
||||
drm_encoder_init(dev, &intel_encoder->base,
|
||||
&intel_dvo_enc_funcs, encoder_type);
|
||||
@ -542,8 +536,9 @@ void intel_dvo_init(struct drm_device *dev)
|
||||
* headers, likely), so for now, just get the current
|
||||
* mode being output through DVO.
|
||||
*/
|
||||
intel_dvo->panel_fixed_mode =
|
||||
intel_dvo_get_current_mode(connector);
|
||||
intel_panel_init(&intel_connector->panel,
|
||||
intel_dvo_get_current_mode(connector),
|
||||
NULL);
|
||||
intel_dvo->panel_wants_dither = true;
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,11 @@
|
||||
#include "intel_drv.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->fbc.enable_fbc != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
|
||||
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
|
||||
@ -439,7 +444,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
void intel_fbc_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
if (!fbc_supported(dev_priv))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
@ -457,7 +462,7 @@ void intel_fbc_disable_crtc(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
if (!fbc_supported(dev_priv))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
@ -685,7 +690,7 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||
|
||||
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
if (!fbc_supported(dev_priv))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
@ -693,16 +698,61 @@ void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
}
|
||||
|
||||
static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
|
||||
int fb_cpp)
|
||||
/*
|
||||
* For SKL+, the plane source size used by the hardware is based on the value we
|
||||
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
|
||||
* we wrote to PIPESRC.
|
||||
*/
|
||||
static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
|
||||
int *width, int *height)
|
||||
{
|
||||
struct intel_plane_state *plane_state =
|
||||
to_intel_plane_state(crtc->base.primary->state);
|
||||
int w, h;
|
||||
|
||||
if (intel_rotation_90_or_270(plane_state->base.rotation)) {
|
||||
w = drm_rect_height(&plane_state->src) >> 16;
|
||||
h = drm_rect_width(&plane_state->src) >> 16;
|
||||
} else {
|
||||
w = drm_rect_width(&plane_state->src) >> 16;
|
||||
h = drm_rect_height(&plane_state->src) >> 16;
|
||||
}
|
||||
|
||||
if (width)
|
||||
*width = w;
|
||||
if (height)
|
||||
*height = h;
|
||||
}
|
||||
|
||||
static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||
int lines;
|
||||
|
||||
intel_fbc_get_plane_source_size(crtc, NULL, &lines);
|
||||
if (INTEL_INFO(dev_priv)->gen >= 7)
|
||||
lines = min(lines, 2048);
|
||||
|
||||
return lines * fb->pitches[0];
|
||||
}
|
||||
|
||||
static int intel_fbc_setup_cfb(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
struct drm_framebuffer *fb = crtc->base.primary->fb;
|
||||
int size, cpp;
|
||||
|
||||
size = intel_fbc_calculate_cfb_size(crtc);
|
||||
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
|
||||
if (size <= dev_priv->fbc.uncompressed_size)
|
||||
return 0;
|
||||
|
||||
/* Release any current block */
|
||||
__intel_fbc_cleanup_cfb(dev_priv);
|
||||
|
||||
return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
|
||||
return intel_fbc_alloc_cfb(dev_priv, size, cpp);
|
||||
}
|
||||
|
||||
static bool stride_is_valid(struct drm_i915_private *dev_priv,
|
||||
@ -749,6 +799,35 @@ static bool pixel_format_is_valid(struct drm_framebuffer *fb)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For some reason, the hardware tracking starts looking at whatever we
|
||||
* programmed as the display plane base address register. It does not look at
|
||||
* the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
|
||||
* variables instead of just looking at the pipe/plane size.
|
||||
*/
|
||||
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
|
||||
unsigned int effective_w, effective_h, max_w, max_h;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
|
||||
max_w = 4096;
|
||||
max_h = 4096;
|
||||
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
|
||||
max_w = 4096;
|
||||
max_h = 2048;
|
||||
} else {
|
||||
max_w = 2048;
|
||||
max_h = 1536;
|
||||
}
|
||||
|
||||
intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
|
||||
effective_w += crtc->adjusted_x;
|
||||
effective_h += crtc->adjusted_y;
|
||||
|
||||
return effective_w <= max_w && effective_h <= max_h;
|
||||
}
|
||||
|
||||
/**
|
||||
* __intel_fbc_update - enable/disable FBC as needed, unlocked
|
||||
* @dev_priv: i915 device instance
|
||||
@ -775,7 +854,6 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
const struct drm_display_mode *adjusted_mode;
|
||||
unsigned int max_width, max_height;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
|
||||
|
||||
@ -824,21 +902,11 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
|
||||
max_width = 4096;
|
||||
max_height = 4096;
|
||||
} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
|
||||
max_width = 4096;
|
||||
max_height = 2048;
|
||||
} else {
|
||||
max_width = 2048;
|
||||
max_height = 1536;
|
||||
}
|
||||
if (intel_crtc->config->pipe_src_w > max_width ||
|
||||
intel_crtc->config->pipe_src_h > max_height) {
|
||||
if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE);
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) &&
|
||||
intel_crtc->plane != PLANE_A) {
|
||||
set_no_fbc_reason(dev_priv, FBC_BAD_PLANE);
|
||||
@ -883,8 +951,7 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
|
||||
drm_format_plane_cpp(fb->pixel_format, 0))) {
|
||||
if (intel_fbc_setup_cfb(intel_crtc)) {
|
||||
set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
|
||||
goto out_disable;
|
||||
}
|
||||
@ -948,7 +1015,7 @@ out_disable:
|
||||
*/
|
||||
void intel_fbc_update(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
if (!fbc_supported(dev_priv))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
@ -962,7 +1029,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
unsigned int fbc_bits;
|
||||
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
if (!fbc_supported(dev_priv))
|
||||
return;
|
||||
|
||||
if (origin == ORIGIN_GTT)
|
||||
@ -989,7 +1056,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
|
||||
void intel_fbc_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned int frontbuffer_bits, enum fb_op_origin origin)
|
||||
{
|
||||
if (!dev_priv->fbc.enable_fbc)
|
||||
if (!fbc_supported(dev_priv))
|
||||
return;
|
||||
|
||||
if (origin == ORIGIN_GTT)
|
||||
|
@ -121,8 +121,9 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
container_of(helper, struct intel_fbdev, helper);
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_device *dev = helper->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {};
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
int size, ret;
|
||||
|
||||
/* we don't do packed 24bpp */
|
||||
@ -139,7 +140,12 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
||||
|
||||
size = mode_cmd.pitches[0] * mode_cmd.height;
|
||||
size = PAGE_ALIGN(size);
|
||||
obj = i915_gem_object_create_stolen(dev, size);
|
||||
|
||||
/* If the FB is too big, just don't use it since fbdev is not very
|
||||
* important and we should probably use that space with FBC or other
|
||||
* features. */
|
||||
if (size * 2 < dev_priv->gtt.stolen_usable_size)
|
||||
obj = i915_gem_object_create_stolen(dev, size);
|
||||
if (obj == NULL)
|
||||
obj = i915_gem_alloc_object(dev, size);
|
||||
if (!obj) {
|
||||
|
@ -110,6 +110,8 @@ extern void intel_guc_ucode_init(struct drm_device *dev);
|
||||
extern int intel_guc_ucode_load(struct drm_device *dev);
|
||||
extern void intel_guc_ucode_fini(struct drm_device *dev);
|
||||
extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
|
||||
extern int intel_guc_suspend(struct drm_device *dev);
|
||||
extern int intel_guc_resume(struct drm_device *dev);
|
||||
|
||||
/* i915_guc_submission.c */
|
||||
int i915_guc_submission_init(struct drm_device *dev);
|
||||
|
@ -218,12 +218,23 @@ struct guc_context_desc {
|
||||
u64 desc_private;
|
||||
} __packed;
|
||||
|
||||
#define GUC_FORCEWAKE_RENDER (1 << 0)
|
||||
#define GUC_FORCEWAKE_MEDIA (1 << 1)
|
||||
|
||||
#define GUC_POWER_UNSPECIFIED 0
|
||||
#define GUC_POWER_D0 1
|
||||
#define GUC_POWER_D1 2
|
||||
#define GUC_POWER_D2 3
|
||||
#define GUC_POWER_D3 4
|
||||
|
||||
/* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
|
||||
enum host2guc_action {
|
||||
HOST2GUC_ACTION_DEFAULT = 0x0,
|
||||
HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6,
|
||||
HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10,
|
||||
HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20,
|
||||
HOST2GUC_ACTION_ENTER_S_STATE = 0x501,
|
||||
HOST2GUC_ACTION_EXIT_S_STATE = 0x502,
|
||||
HOST2GUC_ACTION_SLPC_REQUEST = 0x3003,
|
||||
HOST2GUC_ACTION_LIMIT
|
||||
};
|
||||
|
@ -90,9 +90,6 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
I915_WRITE(RING_MODE_GEN7(ring), irqs);
|
||||
|
||||
/* tell DE to send nothing to GuC */
|
||||
I915_WRITE(DE_GUCRMR, ~0);
|
||||
|
||||
/* route all GT interrupts to the host */
|
||||
I915_WRITE(GUC_BCS_RCS_IER, 0);
|
||||
I915_WRITE(GUC_VCS2_VCS1_IER, 0);
|
||||
@ -110,13 +107,6 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
|
||||
for_each_ring(ring, dev_priv, i)
|
||||
I915_WRITE(RING_MODE_GEN7(ring), irqs);
|
||||
|
||||
/* tell DE to send (all) flip_done to GuC */
|
||||
irqs = DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEA_SPR_FLIP_DONE |
|
||||
DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEB_SPR_FLIP_DONE |
|
||||
DERRMR_PIPEC_PRI_FLIP_DONE | DERRMR_PIPEC_SPR_FLIP_DONE;
|
||||
/* Unmasked bits will cause GuC response message to be sent */
|
||||
I915_WRITE(DE_GUCRMR, ~irqs);
|
||||
|
||||
/* route USER_INTERRUPT to Host, all others are sent to GuC. */
|
||||
irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
|
||||
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
|
||||
@ -209,9 +199,10 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
|
||||
u32 *status)
|
||||
{
|
||||
u32 val = I915_READ(GUC_STATUS);
|
||||
u32 uk_val = val & GS_UKERNEL_MASK;
|
||||
*status = val;
|
||||
return ((val & GS_UKERNEL_MASK) == GS_UKERNEL_READY ||
|
||||
(val & GS_UKERNEL_MASK) == GS_UKERNEL_LAPIC_DONE);
|
||||
return (uk_val == GS_UKERNEL_READY ||
|
||||
((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -257,7 +248,7 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
|
||||
/* Copy RSA signature from the fw image to HW for verification */
|
||||
sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset);
|
||||
for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++)
|
||||
I915_WRITE(UOS_RSA_SCRATCH_0 + i * sizeof(u32), rsa[i]);
|
||||
I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]);
|
||||
|
||||
/* Set the source address for the new blob */
|
||||
offset = i915_gem_obj_ggtt_offset(fw_obj);
|
||||
@ -392,7 +383,6 @@ int intel_guc_ucode_load(struct drm_device *dev)
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
|
||||
|
||||
direct_interrupts_to_host(dev_priv);
|
||||
i915_guc_submission_disable(dev);
|
||||
|
||||
if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE)
|
||||
return 0;
|
||||
@ -442,6 +432,9 @@ int intel_guc_ucode_load(struct drm_device *dev)
|
||||
intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
/* The execbuf_client will be recreated. Release it first. */
|
||||
i915_guc_submission_disable(dev);
|
||||
|
||||
err = i915_guc_submission_enable(dev);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
@ -447,16 +447,13 @@ static void intel_write_infoframe(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
union hdmi_infoframe frame;
|
||||
int ret;
|
||||
|
||||
/* Set user selected PAR to incoming mode's member */
|
||||
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
|
||||
|
||||
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
|
||||
adjusted_mode);
|
||||
if (ret < 0) {
|
||||
@ -494,7 +491,7 @@ static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
|
||||
|
||||
static void
|
||||
intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
union hdmi_infoframe frame;
|
||||
int ret;
|
||||
@ -509,7 +506,7 @@ intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
|
||||
|
||||
static void g4x_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
@ -661,7 +658,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
|
||||
|
||||
static void ibx_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
@ -713,7 +710,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
|
||||
|
||||
static void cpt_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
@ -755,7 +752,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
|
||||
|
||||
static void vlv_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
|
||||
@ -807,7 +804,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
|
||||
|
||||
static void hsw_set_infoframes(struct drm_encoder *encoder,
|
||||
bool enable,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
|
||||
@ -844,7 +841,7 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
u32 hdmi_val;
|
||||
|
||||
hdmi_val = SDVO_ENCODING_HDMI;
|
||||
@ -1312,6 +1309,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Set user selected PAR to incoming mode's member */
|
||||
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1537,8 +1537,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
|
||||
intel_hdmi_prepare(encoder);
|
||||
|
||||
@ -1555,8 +1554,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
enum dpio_channel port = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
u32 val;
|
||||
@ -1822,8 +1820,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc =
|
||||
to_intel_crtc(encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&intel_crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
|
||||
enum dpio_channel ch = vlv_dport_to_channel(dport);
|
||||
int pipe = intel_crtc->pipe;
|
||||
int data, i, stagger;
|
||||
@ -1955,11 +1952,6 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
|
||||
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
|
||||
vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
|
||||
|
||||
/* LRC Bypass */
|
||||
val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
|
||||
val |= DPIO_LRC_BYPASS;
|
||||
vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
|
||||
|
||||
mutex_unlock(&dev_priv->sb_lock);
|
||||
|
||||
intel_hdmi->set_infoframes(&encoder->base,
|
||||
@ -2006,15 +1998,6 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
|
||||
.destroy = intel_encoder_destroy,
|
||||
};
|
||||
|
||||
static void
|
||||
intel_attach_aspect_ratio_property(struct drm_connector *connector)
|
||||
{
|
||||
if (!drm_mode_create_aspect_ratio_property(connector->dev))
|
||||
drm_object_attach_property(&connector->base,
|
||||
connector->dev->mode_config.aspect_ratio_property,
|
||||
DRM_MODE_PICTURE_ASPECT_NONE);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
|
||||
{
|
||||
|
@ -904,21 +904,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (args->num_cliprects != 0) {
|
||||
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (args->DR4 == 0xffffffff) {
|
||||
DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
|
||||
args->DR4 = 0;
|
||||
}
|
||||
|
||||
if (args->DR1 || args->DR4 || args->cliprects_ptr) {
|
||||
DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
|
||||
DRM_DEBUG("sol reset is gen7 only\n");
|
||||
return -EINVAL;
|
||||
|
@ -139,8 +139,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
int pipe = crtc->pipe;
|
||||
u32 temp;
|
||||
|
||||
|
@ -126,3 +126,12 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
|
||||
|
||||
drm_object_attach_property(&connector->base, prop, 0);
|
||||
}
|
||||
|
||||
void
|
||||
intel_attach_aspect_ratio_property(struct drm_connector *connector)
|
||||
{
|
||||
if (!drm_mode_create_aspect_ratio_property(connector->dev))
|
||||
drm_object_attach_property(&connector->base,
|
||||
connector->dev->mode_config.aspect_ratio_property,
|
||||
DRM_MODE_PICTURE_ASPECT_NONE);
|
||||
}
|
||||
|
@ -341,8 +341,12 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
if (!HAS_DDI(dev))
|
||||
return 0;
|
||||
|
||||
port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
if (port == PORT_E) {
|
||||
if (intel_encoder->type == INTEL_OUTPUT_DSI)
|
||||
port = 0;
|
||||
else
|
||||
port = intel_ddi_get_encoder_port(intel_encoder);
|
||||
|
||||
if (port == PORT_E) {
|
||||
port = 0;
|
||||
} else {
|
||||
parm |= 1 << port;
|
||||
@ -363,6 +367,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
|
||||
break;
|
||||
case INTEL_OUTPUT_EDP:
|
||||
case INTEL_OUTPUT_DSI:
|
||||
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
|
||||
break;
|
||||
default:
|
||||
|
@ -105,59 +105,55 @@ intel_pch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
int fitting_mode)
|
||||
{
|
||||
struct drm_display_mode *adjusted_mode;
|
||||
int x, y, width, height;
|
||||
|
||||
adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
|
||||
x = y = width = height = 0;
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
int x = 0, y = 0, width = 0, height = 0;
|
||||
|
||||
/* Native modes don't need fitting */
|
||||
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
|
||||
adjusted_mode->vdisplay == pipe_config->pipe_src_h)
|
||||
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
|
||||
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
|
||||
goto done;
|
||||
|
||||
switch (fitting_mode) {
|
||||
case DRM_MODE_SCALE_CENTER:
|
||||
width = pipe_config->pipe_src_w;
|
||||
height = pipe_config->pipe_src_h;
|
||||
x = (adjusted_mode->hdisplay - width + 1)/2;
|
||||
y = (adjusted_mode->vdisplay - height + 1)/2;
|
||||
x = (adjusted_mode->crtc_hdisplay - width + 1)/2;
|
||||
y = (adjusted_mode->crtc_vdisplay - height + 1)/2;
|
||||
break;
|
||||
|
||||
case DRM_MODE_SCALE_ASPECT:
|
||||
/* Scale but preserve the aspect ratio */
|
||||
{
|
||||
u32 scaled_width = adjusted_mode->hdisplay
|
||||
u32 scaled_width = adjusted_mode->crtc_hdisplay
|
||||
* pipe_config->pipe_src_h;
|
||||
u32 scaled_height = pipe_config->pipe_src_w
|
||||
* adjusted_mode->vdisplay;
|
||||
* adjusted_mode->crtc_vdisplay;
|
||||
if (scaled_width > scaled_height) { /* pillar */
|
||||
width = scaled_height / pipe_config->pipe_src_h;
|
||||
if (width & 1)
|
||||
width++;
|
||||
x = (adjusted_mode->hdisplay - width + 1) / 2;
|
||||
x = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
|
||||
y = 0;
|
||||
height = adjusted_mode->vdisplay;
|
||||
height = adjusted_mode->crtc_vdisplay;
|
||||
} else if (scaled_width < scaled_height) { /* letter */
|
||||
height = scaled_width / pipe_config->pipe_src_w;
|
||||
if (height & 1)
|
||||
height++;
|
||||
y = (adjusted_mode->vdisplay - height + 1) / 2;
|
||||
y = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
|
||||
x = 0;
|
||||
width = adjusted_mode->hdisplay;
|
||||
width = adjusted_mode->crtc_hdisplay;
|
||||
} else {
|
||||
x = y = 0;
|
||||
width = adjusted_mode->hdisplay;
|
||||
height = adjusted_mode->vdisplay;
|
||||
width = adjusted_mode->crtc_hdisplay;
|
||||
height = adjusted_mode->crtc_vdisplay;
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case DRM_MODE_SCALE_FULLSCREEN:
|
||||
x = y = 0;
|
||||
width = adjusted_mode->hdisplay;
|
||||
height = adjusted_mode->vdisplay;
|
||||
width = adjusted_mode->crtc_hdisplay;
|
||||
height = adjusted_mode->crtc_vdisplay;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -172,46 +168,46 @@ done:
|
||||
}
|
||||
|
||||
static void
|
||||
centre_horizontally(struct drm_display_mode *mode,
|
||||
centre_horizontally(struct drm_display_mode *adjusted_mode,
|
||||
int width)
|
||||
{
|
||||
u32 border, sync_pos, blank_width, sync_width;
|
||||
|
||||
/* keep the hsync and hblank widths constant */
|
||||
sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
|
||||
blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
|
||||
sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start;
|
||||
blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
|
||||
sync_pos = (blank_width - sync_width + 1) / 2;
|
||||
|
||||
border = (mode->hdisplay - width + 1) / 2;
|
||||
border = (adjusted_mode->crtc_hdisplay - width + 1) / 2;
|
||||
border += border & 1; /* make the border even */
|
||||
|
||||
mode->crtc_hdisplay = width;
|
||||
mode->crtc_hblank_start = width + border;
|
||||
mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
|
||||
adjusted_mode->crtc_hdisplay = width;
|
||||
adjusted_mode->crtc_hblank_start = width + border;
|
||||
adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width;
|
||||
|
||||
mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
|
||||
mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
|
||||
adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos;
|
||||
adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width;
|
||||
}
|
||||
|
||||
static void
|
||||
centre_vertically(struct drm_display_mode *mode,
|
||||
centre_vertically(struct drm_display_mode *adjusted_mode,
|
||||
int height)
|
||||
{
|
||||
u32 border, sync_pos, blank_width, sync_width;
|
||||
|
||||
/* keep the vsync and vblank widths constant */
|
||||
sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
|
||||
blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
|
||||
sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start;
|
||||
blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start;
|
||||
sync_pos = (blank_width - sync_width + 1) / 2;
|
||||
|
||||
border = (mode->vdisplay - height + 1) / 2;
|
||||
border = (adjusted_mode->crtc_vdisplay - height + 1) / 2;
|
||||
|
||||
mode->crtc_vdisplay = height;
|
||||
mode->crtc_vblank_start = height + border;
|
||||
mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
|
||||
adjusted_mode->crtc_vdisplay = height;
|
||||
adjusted_mode->crtc_vblank_start = height + border;
|
||||
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width;
|
||||
|
||||
mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
|
||||
mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
|
||||
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos;
|
||||
adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width;
|
||||
}
|
||||
|
||||
static inline u32 panel_fitter_scaling(u32 source, u32 target)
|
||||
@ -230,11 +226,11 @@ static inline u32 panel_fitter_scaling(u32 source, u32 target)
|
||||
static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
u32 *pfit_control)
|
||||
{
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
u32 scaled_width = adjusted_mode->hdisplay *
|
||||
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
u32 scaled_width = adjusted_mode->crtc_hdisplay *
|
||||
pipe_config->pipe_src_h;
|
||||
u32 scaled_height = pipe_config->pipe_src_w *
|
||||
adjusted_mode->vdisplay;
|
||||
adjusted_mode->crtc_vdisplay;
|
||||
|
||||
/* 965+ is easy, it does everything in hw */
|
||||
if (scaled_width > scaled_height)
|
||||
@ -243,7 +239,7 @@ static void i965_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
else if (scaled_width < scaled_height)
|
||||
*pfit_control |= PFIT_ENABLE |
|
||||
PFIT_SCALING_LETTER;
|
||||
else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w)
|
||||
else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w)
|
||||
*pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
|
||||
}
|
||||
|
||||
@ -252,10 +248,10 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
u32 *border)
|
||||
{
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
u32 scaled_width = adjusted_mode->hdisplay *
|
||||
u32 scaled_width = adjusted_mode->crtc_hdisplay *
|
||||
pipe_config->pipe_src_h;
|
||||
u32 scaled_height = pipe_config->pipe_src_w *
|
||||
adjusted_mode->vdisplay;
|
||||
adjusted_mode->crtc_vdisplay;
|
||||
u32 bits;
|
||||
|
||||
/*
|
||||
@ -269,9 +265,9 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
pipe_config->pipe_src_h);
|
||||
|
||||
*border = LVDS_BORDER_ENABLE;
|
||||
if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) {
|
||||
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) {
|
||||
bits = panel_fitter_scaling(pipe_config->pipe_src_h,
|
||||
adjusted_mode->vdisplay);
|
||||
adjusted_mode->crtc_vdisplay);
|
||||
|
||||
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
|
||||
bits << PFIT_VERT_SCALE_SHIFT);
|
||||
@ -285,9 +281,9 @@ static void i9xx_scale_aspect(struct intel_crtc_state *pipe_config,
|
||||
pipe_config->pipe_src_w);
|
||||
|
||||
*border = LVDS_BORDER_ENABLE;
|
||||
if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
|
||||
if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
|
||||
bits = panel_fitter_scaling(pipe_config->pipe_src_w,
|
||||
adjusted_mode->hdisplay);
|
||||
adjusted_mode->crtc_hdisplay);
|
||||
|
||||
*pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
|
||||
bits << PFIT_VERT_SCALE_SHIFT);
|
||||
@ -310,13 +306,11 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
{
|
||||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
|
||||
struct drm_display_mode *adjusted_mode;
|
||||
|
||||
adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
|
||||
|
||||
/* Native modes don't need fitting */
|
||||
if (adjusted_mode->hdisplay == pipe_config->pipe_src_w &&
|
||||
adjusted_mode->vdisplay == pipe_config->pipe_src_h)
|
||||
if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w &&
|
||||
adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h)
|
||||
goto out;
|
||||
|
||||
switch (fitting_mode) {
|
||||
@ -342,8 +336,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
||||
* Full scaling, even if it changes the aspect ratio.
|
||||
* Fortunately this is all done for us in hw.
|
||||
*/
|
||||
if (pipe_config->pipe_src_h != adjusted_mode->vdisplay ||
|
||||
pipe_config->pipe_src_w != adjusted_mode->hdisplay) {
|
||||
if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay ||
|
||||
pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) {
|
||||
pfit_control |= PFIT_ENABLE;
|
||||
if (INTEL_INFO(dev)->gen >= 4)
|
||||
pfit_control |= PFIT_SCALING_AUTO;
|
||||
@ -542,9 +536,10 @@ static u32 vlv_get_backlight(struct intel_connector *connector)
|
||||
static u32 bxt_get_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
return I915_READ(BXT_BLC_PWM_DUTY1);
|
||||
return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
|
||||
}
|
||||
|
||||
static u32 pwm_get_backlight(struct intel_connector *connector)
|
||||
@ -566,7 +561,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
|
||||
if (panel->backlight.enabled) {
|
||||
val = dev_priv->display.get_backlight(connector);
|
||||
val = panel->backlight.get(connector);
|
||||
val = intel_panel_compute_brightness(connector, val);
|
||||
}
|
||||
|
||||
@ -640,8 +635,9 @@ static void bxt_set_backlight(struct intel_connector *connector, u32 level)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
|
||||
I915_WRITE(BXT_BLC_PWM_DUTY1, level);
|
||||
I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
|
||||
}
|
||||
|
||||
static void pwm_set_backlight(struct intel_connector *connector, u32 level)
|
||||
@ -655,13 +651,12 @@ static void pwm_set_backlight(struct intel_connector *connector, u32 level)
|
||||
static void
|
||||
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
|
||||
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
|
||||
|
||||
level = intel_panel_compute_brightness(connector, level);
|
||||
dev_priv->display.set_backlight(connector, level);
|
||||
panel->backlight.set(connector, level);
|
||||
}
|
||||
|
||||
/* set backlight brightness to level in range [0..max], scaling wrt hw min */
|
||||
@ -793,12 +788,20 @@ static void bxt_disable_backlight(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u32 tmp, val;
|
||||
|
||||
intel_panel_actually_set_backlight(connector, 0);
|
||||
|
||||
tmp = I915_READ(BXT_BLC_PWM_CTL1);
|
||||
I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
|
||||
tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
|
||||
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
|
||||
tmp & ~BXT_BLC_PWM_ENABLE);
|
||||
|
||||
if (panel->backlight.controller == 1) {
|
||||
val = I915_READ(UTIL_PIN_CTL);
|
||||
val &= ~UTIL_PIN_ENABLE;
|
||||
I915_WRITE(UTIL_PIN_CTL, val);
|
||||
}
|
||||
}
|
||||
|
||||
static void pwm_disable_backlight(struct intel_connector *connector)
|
||||
@ -836,7 +839,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
|
||||
if (panel->backlight.device)
|
||||
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
|
||||
panel->backlight.enabled = false;
|
||||
dev_priv->display.disable_backlight(connector);
|
||||
panel->backlight.disable(connector);
|
||||
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
}
|
||||
@ -1030,16 +1033,38 @@ static void bxt_enable_backlight(struct intel_connector *connector)
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u32 pwm_ctl;
|
||||
enum pipe pipe = intel_get_pipe_from_connector(connector);
|
||||
u32 pwm_ctl, val;
|
||||
|
||||
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1);
|
||||
/* To use 2nd set of backlight registers, utility pin has to be
|
||||
* enabled with PWM mode.
|
||||
* The field should only be changed when the utility pin is disabled
|
||||
*/
|
||||
if (panel->backlight.controller == 1) {
|
||||
val = I915_READ(UTIL_PIN_CTL);
|
||||
if (val & UTIL_PIN_ENABLE) {
|
||||
DRM_DEBUG_KMS("util pin already enabled\n");
|
||||
val &= ~UTIL_PIN_ENABLE;
|
||||
I915_WRITE(UTIL_PIN_CTL, val);
|
||||
}
|
||||
|
||||
val = 0;
|
||||
if (panel->backlight.util_pin_active_low)
|
||||
val |= UTIL_PIN_POLARITY;
|
||||
I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) |
|
||||
UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
|
||||
}
|
||||
|
||||
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
|
||||
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
|
||||
DRM_DEBUG_KMS("backlight already enabled\n");
|
||||
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
|
||||
I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl);
|
||||
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
|
||||
pwm_ctl);
|
||||
}
|
||||
|
||||
I915_WRITE(BXT_BLC_PWM_FREQ1, panel->backlight.max);
|
||||
I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
|
||||
panel->backlight.max);
|
||||
|
||||
intel_panel_actually_set_backlight(connector, panel->backlight.level);
|
||||
|
||||
@ -1047,9 +1072,10 @@ static void bxt_enable_backlight(struct intel_connector *connector)
|
||||
if (panel->backlight.active_low_pwm)
|
||||
pwm_ctl |= BXT_BLC_PWM_POLARITY;
|
||||
|
||||
I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl);
|
||||
POSTING_READ(BXT_BLC_PWM_CTL1);
|
||||
I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
|
||||
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
|
||||
POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
|
||||
I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
|
||||
pwm_ctl | BXT_BLC_PWM_ENABLE);
|
||||
}
|
||||
|
||||
static void pwm_enable_backlight(struct intel_connector *connector)
|
||||
@ -1085,7 +1111,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
|
||||
panel->backlight.device->props.max_brightness);
|
||||
}
|
||||
|
||||
dev_priv->display.enable_backlight(connector);
|
||||
panel->backlight.enable(connector);
|
||||
panel->backlight.enabled = true;
|
||||
if (panel->backlight.device)
|
||||
panel->backlight.device->props.power = FB_BLANK_UNBLANK;
|
||||
@ -1113,10 +1139,10 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
|
||||
* callback needs to take this into account.
|
||||
*/
|
||||
if (panel->backlight.enabled) {
|
||||
if (panel->backlight_power) {
|
||||
if (panel->backlight.power) {
|
||||
bool enable = bd->props.power == FB_BLANK_UNBLANK &&
|
||||
bd->props.brightness != 0;
|
||||
panel->backlight_power(connector, enable);
|
||||
panel->backlight.power(connector, enable);
|
||||
}
|
||||
} else {
|
||||
bd->props.power = FB_BLANK_POWERDOWN;
|
||||
@ -1341,6 +1367,7 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
|
||||
u32 pwm;
|
||||
|
||||
@ -1349,12 +1376,12 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dev_priv->display.backlight_hz_to_pwm) {
|
||||
if (!panel->backlight.hz_to_pwm) {
|
||||
DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pwm = dev_priv->display.backlight_hz_to_pwm(connector, pwm_freq_hz);
|
||||
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
|
||||
if (!pwm) {
|
||||
DRM_DEBUG_KMS("backlight frequency conversion failed\n");
|
||||
return 0;
|
||||
@ -1568,10 +1595,28 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
u32 pwm_ctl, val;
|
||||
|
||||
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1);
|
||||
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
|
||||
/*
|
||||
* For BXT hard coding the Backlight controller to 0.
|
||||
* TODO : Read the controller value from VBT and generalize
|
||||
*/
|
||||
panel->backlight.controller = 0;
|
||||
|
||||
panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1);
|
||||
pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
|
||||
|
||||
/* Keeping the check if controller 1 is to be programmed.
|
||||
* This will come into affect once the VBT parsing
|
||||
* is fixed for controller selection, and controller 1 is used
|
||||
* for a prticular display configuration.
|
||||
*/
|
||||
if (panel->backlight.controller == 1) {
|
||||
val = I915_READ(UTIL_PIN_CTL);
|
||||
panel->backlight.util_pin_active_low =
|
||||
val & UTIL_PIN_POLARITY;
|
||||
}
|
||||
|
||||
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
|
||||
panel->backlight.max =
|
||||
I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
|
||||
|
||||
if (!panel->backlight.max)
|
||||
panel->backlight.max = get_backlight_max_vbt(connector);
|
||||
@ -1639,9 +1684,13 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
|
||||
}
|
||||
}
|
||||
|
||||
/* ensure intel_panel has been initialized first */
|
||||
if (WARN_ON(!panel->backlight.setup))
|
||||
return -ENODEV;
|
||||
|
||||
/* set level and max in panel struct */
|
||||
mutex_lock(&dev_priv->backlight_lock);
|
||||
ret = dev_priv->display.setup_backlight(intel_connector, pipe);
|
||||
ret = panel->backlight.setup(intel_connector, pipe);
|
||||
mutex_unlock(&dev_priv->backlight_lock);
|
||||
|
||||
if (ret) {
|
||||
@ -1673,62 +1722,66 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
|
||||
}
|
||||
|
||||
/* Set up chip specific backlight functions */
|
||||
void intel_panel_init_backlight_funcs(struct drm_device *dev)
|
||||
static void
|
||||
intel_panel_init_backlight_funcs(struct intel_panel *panel)
|
||||
{
|
||||
struct intel_connector *intel_connector =
|
||||
container_of(panel, struct intel_connector, panel);
|
||||
struct drm_device *dev = intel_connector->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
if (IS_BROXTON(dev)) {
|
||||
dev_priv->display.setup_backlight = bxt_setup_backlight;
|
||||
dev_priv->display.enable_backlight = bxt_enable_backlight;
|
||||
dev_priv->display.disable_backlight = bxt_disable_backlight;
|
||||
dev_priv->display.set_backlight = bxt_set_backlight;
|
||||
dev_priv->display.get_backlight = bxt_get_backlight;
|
||||
panel->backlight.setup = bxt_setup_backlight;
|
||||
panel->backlight.enable = bxt_enable_backlight;
|
||||
panel->backlight.disable = bxt_disable_backlight;
|
||||
panel->backlight.set = bxt_set_backlight;
|
||||
panel->backlight.get = bxt_get_backlight;
|
||||
} else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) {
|
||||
dev_priv->display.setup_backlight = lpt_setup_backlight;
|
||||
dev_priv->display.enable_backlight = lpt_enable_backlight;
|
||||
dev_priv->display.disable_backlight = lpt_disable_backlight;
|
||||
dev_priv->display.set_backlight = lpt_set_backlight;
|
||||
dev_priv->display.get_backlight = lpt_get_backlight;
|
||||
panel->backlight.setup = lpt_setup_backlight;
|
||||
panel->backlight.enable = lpt_enable_backlight;
|
||||
panel->backlight.disable = lpt_disable_backlight;
|
||||
panel->backlight.set = lpt_set_backlight;
|
||||
panel->backlight.get = lpt_get_backlight;
|
||||
if (HAS_PCH_LPT(dev))
|
||||
dev_priv->display.backlight_hz_to_pwm = lpt_hz_to_pwm;
|
||||
panel->backlight.hz_to_pwm = lpt_hz_to_pwm;
|
||||
else
|
||||
dev_priv->display.backlight_hz_to_pwm = spt_hz_to_pwm;
|
||||
panel->backlight.hz_to_pwm = spt_hz_to_pwm;
|
||||
} else if (HAS_PCH_SPLIT(dev)) {
|
||||
dev_priv->display.setup_backlight = pch_setup_backlight;
|
||||
dev_priv->display.enable_backlight = pch_enable_backlight;
|
||||
dev_priv->display.disable_backlight = pch_disable_backlight;
|
||||
dev_priv->display.set_backlight = pch_set_backlight;
|
||||
dev_priv->display.get_backlight = pch_get_backlight;
|
||||
dev_priv->display.backlight_hz_to_pwm = pch_hz_to_pwm;
|
||||
panel->backlight.setup = pch_setup_backlight;
|
||||
panel->backlight.enable = pch_enable_backlight;
|
||||
panel->backlight.disable = pch_disable_backlight;
|
||||
panel->backlight.set = pch_set_backlight;
|
||||
panel->backlight.get = pch_get_backlight;
|
||||
panel->backlight.hz_to_pwm = pch_hz_to_pwm;
|
||||
} else if (IS_VALLEYVIEW(dev)) {
|
||||
if (dev_priv->vbt.has_mipi) {
|
||||
dev_priv->display.setup_backlight = pwm_setup_backlight;
|
||||
dev_priv->display.enable_backlight = pwm_enable_backlight;
|
||||
dev_priv->display.disable_backlight = pwm_disable_backlight;
|
||||
dev_priv->display.set_backlight = pwm_set_backlight;
|
||||
dev_priv->display.get_backlight = pwm_get_backlight;
|
||||
panel->backlight.setup = pwm_setup_backlight;
|
||||
panel->backlight.enable = pwm_enable_backlight;
|
||||
panel->backlight.disable = pwm_disable_backlight;
|
||||
panel->backlight.set = pwm_set_backlight;
|
||||
panel->backlight.get = pwm_get_backlight;
|
||||
} else {
|
||||
dev_priv->display.setup_backlight = vlv_setup_backlight;
|
||||
dev_priv->display.enable_backlight = vlv_enable_backlight;
|
||||
dev_priv->display.disable_backlight = vlv_disable_backlight;
|
||||
dev_priv->display.set_backlight = vlv_set_backlight;
|
||||
dev_priv->display.get_backlight = vlv_get_backlight;
|
||||
dev_priv->display.backlight_hz_to_pwm = vlv_hz_to_pwm;
|
||||
panel->backlight.setup = vlv_setup_backlight;
|
||||
panel->backlight.enable = vlv_enable_backlight;
|
||||
panel->backlight.disable = vlv_disable_backlight;
|
||||
panel->backlight.set = vlv_set_backlight;
|
||||
panel->backlight.get = vlv_get_backlight;
|
||||
panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
|
||||
}
|
||||
} else if (IS_GEN4(dev)) {
|
||||
dev_priv->display.setup_backlight = i965_setup_backlight;
|
||||
dev_priv->display.enable_backlight = i965_enable_backlight;
|
||||
dev_priv->display.disable_backlight = i965_disable_backlight;
|
||||
dev_priv->display.set_backlight = i9xx_set_backlight;
|
||||
dev_priv->display.get_backlight = i9xx_get_backlight;
|
||||
dev_priv->display.backlight_hz_to_pwm = i965_hz_to_pwm;
|
||||
panel->backlight.setup = i965_setup_backlight;
|
||||
panel->backlight.enable = i965_enable_backlight;
|
||||
panel->backlight.disable = i965_disable_backlight;
|
||||
panel->backlight.set = i9xx_set_backlight;
|
||||
panel->backlight.get = i9xx_get_backlight;
|
||||
panel->backlight.hz_to_pwm = i965_hz_to_pwm;
|
||||
} else {
|
||||
dev_priv->display.setup_backlight = i9xx_setup_backlight;
|
||||
dev_priv->display.enable_backlight = i9xx_enable_backlight;
|
||||
dev_priv->display.disable_backlight = i9xx_disable_backlight;
|
||||
dev_priv->display.set_backlight = i9xx_set_backlight;
|
||||
dev_priv->display.get_backlight = i9xx_get_backlight;
|
||||
dev_priv->display.backlight_hz_to_pwm = i9xx_hz_to_pwm;
|
||||
panel->backlight.setup = i9xx_setup_backlight;
|
||||
panel->backlight.enable = i9xx_enable_backlight;
|
||||
panel->backlight.disable = i9xx_disable_backlight;
|
||||
panel->backlight.set = i9xx_set_backlight;
|
||||
panel->backlight.get = i9xx_get_backlight;
|
||||
panel->backlight.hz_to_pwm = i9xx_hz_to_pwm;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1736,6 +1789,8 @@ int intel_panel_init(struct intel_panel *panel,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
struct drm_display_mode *downclock_mode)
|
||||
{
|
||||
intel_panel_init_backlight_funcs(panel);
|
||||
|
||||
panel->fixed_mode = fixed_mode;
|
||||
panel->downclock_mode = downclock_mode;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -717,7 +717,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct i915_workarounds *w = &dev_priv->workarounds;
|
||||
|
||||
if (WARN_ON_ONCE(w->count == 0))
|
||||
if (w->count == 0)
|
||||
return 0;
|
||||
|
||||
ring->gpu_caches_dirty = true;
|
||||
@ -800,42 +800,29 @@ static int wa_add(struct drm_i915_private *dev_priv,
|
||||
|
||||
#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
|
||||
|
||||
static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
||||
static int gen8_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
|
||||
|
||||
/* WaDisableAsyncFlipPerfMode:bdw */
|
||||
/* WaDisableAsyncFlipPerfMode:bdw,chv */
|
||||
WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
|
||||
|
||||
/* WaDisablePartialInstShootdown:bdw */
|
||||
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
|
||||
/* WaDisablePartialInstShootdown:bdw,chv */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
|
||||
STALL_DOP_GATING_DISABLE);
|
||||
|
||||
/* WaDisableDopClockGating:bdw */
|
||||
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
|
||||
DOP_CLOCK_GATING_DISABLE);
|
||||
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
|
||||
GEN8_SAMPLER_POWER_BYPASS_DIS);
|
||||
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
|
||||
|
||||
/* Use Force Non-Coherent whenever executing a 3D context. This is a
|
||||
* workaround for for a possible hang in the unlikely event a TLB
|
||||
* invalidation occurs during a PSD flush.
|
||||
*/
|
||||
/* WaForceEnableNonCoherent:bdw,chv */
|
||||
/* WaHdcDisableFetchWhenMasked:bdw,chv */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
/* WaForceEnableNonCoherent:bdw */
|
||||
HDC_FORCE_NON_COHERENT |
|
||||
/* WaForceContextSaveRestoreNonCoherent:bdw */
|
||||
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
|
||||
/* WaHdcDisableFetchWhenMasked:bdw */
|
||||
HDC_DONOT_FETCH_MEM_WHEN_MASKED |
|
||||
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
|
||||
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
|
||||
HDC_FORCE_NON_COHERENT);
|
||||
|
||||
/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
|
||||
* "The Hierarchical Z RAW Stall Optimization allows non-overlapping
|
||||
@ -843,13 +830,12 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
||||
* stalling waiting for the earlier ones to write to Hierarchical Z
|
||||
* buffer."
|
||||
*
|
||||
* This optimization is off by default for Broadwell; turn it on.
|
||||
* This optimization is off by default for BDW and CHV; turn it on.
|
||||
*/
|
||||
WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
|
||||
|
||||
/* Wa4x4STCOptimizationDisable:bdw */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1,
|
||||
GEN8_4x4_STC_OPTIMIZATION_DISABLE);
|
||||
/* Wa4x4STCOptimizationDisable:bdw,chv */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
|
||||
|
||||
/*
|
||||
* BSpec recommends 8x4 when MSAA is used,
|
||||
@ -866,56 +852,51 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int chv_init_workarounds(struct intel_engine_cs *ring)
|
||||
static int bdw_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
int ret;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
|
||||
ret = gen8_init_workarounds(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaDisableAsyncFlipPerfMode:chv */
|
||||
WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
|
||||
/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
|
||||
|
||||
/* WaDisablePartialInstShootdown:chv */
|
||||
/* WaDisableThreadStallDopClockGating:chv */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
|
||||
STALL_DOP_GATING_DISABLE);
|
||||
/* WaDisableDopClockGating:bdw */
|
||||
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
|
||||
DOP_CLOCK_GATING_DISABLE);
|
||||
|
||||
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
|
||||
GEN8_SAMPLER_POWER_BYPASS_DIS);
|
||||
|
||||
/* Use Force Non-Coherent whenever executing a 3D context. This is a
|
||||
* workaround for a possible hang in the unlikely event a TLB
|
||||
* invalidation occurs during a PSD flush.
|
||||
*/
|
||||
/* WaForceEnableNonCoherent:chv */
|
||||
/* WaHdcDisableFetchWhenMasked:chv */
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0,
|
||||
HDC_FORCE_NON_COHERENT |
|
||||
HDC_DONOT_FETCH_MEM_WHEN_MASKED);
|
||||
/* WaForceContextSaveRestoreNonCoherent:bdw */
|
||||
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
|
||||
/* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
|
||||
(IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
|
||||
|
||||
/* According to the CACHE_MODE_0 default value documentation, some
|
||||
* CHV platforms disable this optimization by default. Turn it on.
|
||||
*/
|
||||
WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wa4x4STCOptimizationDisable:chv */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1,
|
||||
GEN8_4x4_STC_OPTIMIZATION_DISABLE);
|
||||
static int chv_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
int ret;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
ret = gen8_init_workarounds(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaDisableThreadStallDopClockGating:chv */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
|
||||
|
||||
/* Improve HiZ throughput on CHV. */
|
||||
WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
|
||||
|
||||
/*
|
||||
* BSpec recommends 8x4 when MSAA is used,
|
||||
* however in practice 16x4 seems fastest.
|
||||
*
|
||||
* Note that PS/WM thread counts depend on the WIZ hashing
|
||||
* disable bit, which we don't touch here, but it's good
|
||||
* to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
|
||||
*/
|
||||
WA_SET_FIELD_MASKED(GEN7_GT_MODE,
|
||||
GEN6_WIZ_HASHING_MASK,
|
||||
GEN6_WIZ_HASHING_16x4);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -961,10 +942,9 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
||||
}
|
||||
|
||||
/* Wa4x4STCOptimizationDisable:skl,bxt */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
|
||||
|
||||
/* WaDisablePartialResolveInVc:skl,bxt */
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
|
||||
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
|
||||
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
|
||||
|
||||
/* WaCcsTlbPrefetchDisable:skl,bxt */
|
||||
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
|
||||
@ -1041,10 +1021,13 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring)
|
||||
|
||||
static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
int ret;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
gen9_init_workarounds(ring);
|
||||
ret = gen9_init_workarounds(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaDisablePowerCompilerClockGating:skl */
|
||||
if (INTEL_REVID(dev) == SKL_REVID_B0)
|
||||
@ -1081,10 +1064,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
||||
|
||||
static int bxt_init_workarounds(struct intel_engine_cs *ring)
|
||||
{
|
||||
int ret;
|
||||
struct drm_device *dev = ring->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
gen9_init_workarounds(ring);
|
||||
ret = gen9_init_workarounds(ring);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaDisableThreadStallDopClockGating:bxt */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
|
||||
@ -2637,6 +2623,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
|
||||
GEN8_RING_SEMAPHORE_INIT;
|
||||
}
|
||||
} else if (INTEL_INFO(dev)->gen >= 6) {
|
||||
ring->init_context = intel_rcs_ctx_init;
|
||||
ring->add_request = gen6_add_request;
|
||||
ring->flush = gen7_render_ring_flush;
|
||||
if (INTEL_INFO(dev)->gen == 6)
|
||||
|
@ -657,9 +657,15 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
} else {
|
||||
if (enable_requested) {
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
|
||||
if (IS_SKYLAKE(dev) &&
|
||||
(power_well->data == SKL_DISP_PW_1) &&
|
||||
(intel_csr_load_status_get(dev_priv) == FW_LOADED))
|
||||
DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n");
|
||||
else {
|
||||
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
|
||||
POSTING_READ(HSW_PWR_WELL_DRIVER);
|
||||
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
|
||||
}
|
||||
|
||||
if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
|
||||
power_well->data == SKL_DISP_PW_2) {
|
||||
@ -988,8 +994,29 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
|
||||
lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
|
||||
u32 phy_control = dev_priv->chv_phy_control;
|
||||
u32 phy_status = 0;
|
||||
u32 phy_status_mask = 0xffffffff;
|
||||
u32 tmp;
|
||||
|
||||
/*
|
||||
* The BIOS can leave the PHY is some weird state
|
||||
* where it doesn't fully power down some parts.
|
||||
* Disable the asserts until the PHY has been fully
|
||||
* reset (ie. the power well has been disabled at
|
||||
* least once).
|
||||
*/
|
||||
if (!dev_priv->chv_phy_assert[DPIO_PHY0])
|
||||
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) |
|
||||
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) |
|
||||
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) |
|
||||
PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) |
|
||||
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) |
|
||||
PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1));
|
||||
|
||||
if (!dev_priv->chv_phy_assert[DPIO_PHY1])
|
||||
phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) |
|
||||
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) |
|
||||
PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1));
|
||||
|
||||
if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) {
|
||||
phy_status |= PHY_POWERGOOD(DPIO_PHY0);
|
||||
|
||||
@ -1050,11 +1077,13 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
|
||||
phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1);
|
||||
}
|
||||
|
||||
phy_status &= phy_status_mask;
|
||||
|
||||
/*
|
||||
* The PHY may be busy with some initial calibration and whatnot,
|
||||
* so the power state can take a while to actually change.
|
||||
*/
|
||||
if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS)) == phy_status, 10))
|
||||
if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10))
|
||||
WARN(phy_status != tmp,
|
||||
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
|
||||
tmp, phy_status, dev_priv->chv_phy_control);
|
||||
@ -1147,6 +1176,9 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
|
||||
phy, dev_priv->chv_phy_control);
|
||||
|
||||
/* PHY is fully reset now, so we can enable the PHY state asserts */
|
||||
dev_priv->chv_phy_assert[phy] = true;
|
||||
|
||||
assert_chv_phy_status(dev_priv);
|
||||
}
|
||||
|
||||
@ -1156,6 +1188,16 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
|
||||
enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
|
||||
u32 reg, val, expected, actual;
|
||||
|
||||
/*
|
||||
* The BIOS can leave the PHY is some weird state
|
||||
* where it doesn't fully power down some parts.
|
||||
* Disable the asserts until the PHY has been fully
|
||||
* reset (ie. the power well has been disabled at
|
||||
* least once).
|
||||
*/
|
||||
if (!dev_priv->chv_phy_assert[phy])
|
||||
return;
|
||||
|
||||
if (ch == DPIO_CH0)
|
||||
reg = _CHV_CMN_DW0_CH0;
|
||||
else
|
||||
@ -1823,7 +1865,6 @@ static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
|
||||
|
||||
/* Make sure we're not suspended first. */
|
||||
pm_runtime_get_sync(device);
|
||||
pm_runtime_disable(device);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1912,6 +1953,10 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
|
||||
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1);
|
||||
|
||||
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
|
||||
|
||||
dev_priv->chv_phy_assert[DPIO_PHY0] = false;
|
||||
} else {
|
||||
dev_priv->chv_phy_assert[DPIO_PHY0] = true;
|
||||
}
|
||||
|
||||
if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) {
|
||||
@ -1930,6 +1975,10 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
|
||||
PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0);
|
||||
|
||||
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
|
||||
|
||||
dev_priv->chv_phy_assert[DPIO_PHY1] = false;
|
||||
} else {
|
||||
dev_priv->chv_phy_assert[DPIO_PHY1] = true;
|
||||
}
|
||||
|
||||
I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
|
||||
@ -2115,8 +2164,6 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
|
||||
if (!HAS_RUNTIME_PM(dev))
|
||||
return;
|
||||
|
||||
pm_runtime_set_active(device);
|
||||
|
||||
/*
|
||||
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
|
||||
* requirement.
|
||||
|
@ -106,6 +106,11 @@ struct intel_sdvo {
|
||||
uint32_t color_range;
|
||||
bool color_range_auto;
|
||||
|
||||
/**
|
||||
* HDMI user specified aspect ratio
|
||||
*/
|
||||
enum hdmi_picture_aspect aspect_ratio;
|
||||
|
||||
/**
|
||||
* This is set if we're going to treat the device as TV-out.
|
||||
*
|
||||
@ -603,11 +608,11 @@ log_fail:
|
||||
return false;
|
||||
}
|
||||
|
||||
static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
|
||||
static int intel_sdvo_get_pixel_multiplier(const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
if (mode->clock >= 100000)
|
||||
if (adjusted_mode->crtc_clock >= 100000)
|
||||
return 1;
|
||||
else if (mode->clock >= 50000)
|
||||
else if (adjusted_mode->crtc_clock >= 50000)
|
||||
return 2;
|
||||
else
|
||||
return 4;
|
||||
@ -1181,6 +1186,10 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
|
||||
if (intel_sdvo->is_tv)
|
||||
i9xx_adjust_sdvo_tv_clock(pipe_config);
|
||||
|
||||
/* Set user selected PAR to incoming mode's member */
|
||||
if (intel_sdvo->is_hdmi)
|
||||
adjusted_mode->picture_aspect_ratio = intel_sdvo->aspect_ratio;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1189,8 +1198,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
|
||||
struct drm_device *dev = intel_encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
|
||||
struct drm_display_mode *adjusted_mode =
|
||||
&crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
struct drm_display_mode *mode = &crtc->config->base.mode;
|
||||
struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
|
||||
u32 sdvox;
|
||||
@ -2044,6 +2052,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (property == connector->dev->mode_config.aspect_ratio_property) {
|
||||
switch (val) {
|
||||
case DRM_MODE_PICTURE_ASPECT_NONE:
|
||||
intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
|
||||
break;
|
||||
case DRM_MODE_PICTURE_ASPECT_4_3:
|
||||
intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
|
||||
break;
|
||||
case DRM_MODE_PICTURE_ASPECT_16_9:
|
||||
intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
goto done;
|
||||
}
|
||||
|
||||
#define CHECK_PROPERTY(name, NAME) \
|
||||
if (intel_sdvo_connector->name == property) { \
|
||||
if (intel_sdvo_connector->cur_##name == temp_value) return 0; \
|
||||
@ -2383,6 +2408,8 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
|
||||
intel_attach_broadcast_rgb_property(&connector->base.base);
|
||||
intel_sdvo->color_range_auto = true;
|
||||
}
|
||||
intel_attach_aspect_ratio_property(&connector->base.base);
|
||||
intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
|
||||
}
|
||||
|
||||
static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
|
||||
|
@ -53,13 +53,15 @@ format_is_yuv(uint32_t format)
|
||||
}
|
||||
}
|
||||
|
||||
static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
|
||||
static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
|
||||
int usecs)
|
||||
{
|
||||
/* paranoia */
|
||||
if (!mode->crtc_htotal)
|
||||
if (!adjusted_mode->crtc_htotal)
|
||||
return 1;
|
||||
|
||||
return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
|
||||
return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock,
|
||||
1000 * adjusted_mode->crtc_htotal);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -79,19 +81,19 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
|
||||
void intel_pipe_update_start(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
|
||||
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
long timeout = msecs_to_jiffies_timeout(1);
|
||||
int scanline, min, max, vblank_start;
|
||||
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
vblank_start = mode->crtc_vblank_start;
|
||||
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vblank_start = adjusted_mode->crtc_vblank_start;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
|
||||
vblank_start = DIV_ROUND_UP(vblank_start, 2);
|
||||
|
||||
/* FIXME needs to be calibrated sensibly */
|
||||
min = vblank_start - usecs_to_scanlines(mode, 100);
|
||||
min = vblank_start - usecs_to_scanlines(adjusted_mode, 100);
|
||||
max = vblank_start - 1;
|
||||
|
||||
local_irq_disable();
|
||||
@ -190,7 +192,6 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
const int pipe = intel_plane->pipe;
|
||||
const int plane = intel_plane->plane + 1;
|
||||
u32 plane_ctl, stride_div, stride;
|
||||
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
const struct drm_intel_sprite_colorkey *key =
|
||||
&to_intel_plane_state(drm_plane->state)->ckey;
|
||||
unsigned long surf_addr;
|
||||
@ -209,10 +210,6 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
|
||||
rotation = drm_plane->state->rotation;
|
||||
plane_ctl |= skl_plane_ctl_rotation(rotation);
|
||||
|
||||
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
|
||||
fb->pixel_format);
|
||||
|
||||
@ -294,8 +291,6 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
|
||||
|
||||
I915_WRITE(PLANE_SURF(pipe, plane), 0);
|
||||
POSTING_READ(PLANE_SURF(pipe, plane));
|
||||
|
||||
intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -538,10 +533,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
sprctl |= SPRITE_PIPE_CSC_ENABLE;
|
||||
|
||||
intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size,
|
||||
true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
@ -675,10 +666,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (IS_GEN6(dev))
|
||||
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
|
||||
|
||||
intel_update_sprite_watermarks(plane, crtc, src_w, src_h,
|
||||
pixel_size, true,
|
||||
src_w != crtc_w || src_h != crtc_h);
|
||||
|
||||
/* Sizes are 0 based */
|
||||
src_w--;
|
||||
src_h--;
|
||||
|
@ -1429,21 +1429,21 @@ static int ironlake_do_reset(struct drm_device *dev)
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret;
|
||||
|
||||
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
|
||||
I915_WRITE(ILK_GDSR,
|
||||
ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
|
||||
ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
|
||||
ret = wait_for((I915_READ(ILK_GDSR) &
|
||||
ILK_GRDOM_RESET_ENABLE) == 0, 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
|
||||
I915_WRITE(ILK_GDSR,
|
||||
ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
|
||||
ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) &
|
||||
ret = wait_for((I915_READ(ILK_GDSR) &
|
||||
ILK_GRDOM_RESET_ENABLE) == 0, 500);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0);
|
||||
I915_WRITE(ILK_GDSR, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -24,27 +24,55 @@
|
||||
#ifndef _I915_COMPONENT_H_
|
||||
#define _I915_COMPONENT_H_
|
||||
|
||||
/* MAX_PORT is the number of port
|
||||
* It must be sync with I915_MAX_PORTS defined i915_drv.h
|
||||
* 5 should be enough as only HSW, BDW, SKL need such fix.
|
||||
*/
|
||||
#define MAX_PORTS 5
|
||||
|
||||
/**
|
||||
* struct i915_audio_component_ops - callbacks defined in gfx driver
|
||||
* @owner: the module owner
|
||||
* @get_power: get the POWER_DOMAIN_AUDIO power well
|
||||
* @put_power: put the POWER_DOMAIN_AUDIO power well
|
||||
* @codec_wake_override: Enable/Disable generating the codec wake signal
|
||||
* @get_cdclk_freq: get the Core Display Clock in KHz
|
||||
* @sync_audio_rate: set n/cts based on the sample rate
|
||||
*/
|
||||
struct i915_audio_component_ops {
|
||||
struct module *owner;
|
||||
void (*get_power)(struct device *);
|
||||
void (*put_power)(struct device *);
|
||||
void (*codec_wake_override)(struct device *, bool enable);
|
||||
int (*get_cdclk_freq)(struct device *);
|
||||
int (*sync_audio_rate)(struct device *, int port, int rate);
|
||||
};
|
||||
|
||||
struct i915_audio_component_audio_ops {
|
||||
void *audio_ptr;
|
||||
/**
|
||||
* Call from i915 driver, notifying the HDA driver that
|
||||
* pin sense and/or ELD information has changed.
|
||||
* @audio_ptr: HDA driver object
|
||||
* @port: Which port has changed (PORTA / PORTB / PORTC etc)
|
||||
*/
|
||||
void (*pin_eld_notify)(void *audio_ptr, int port);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct i915_audio_component - used for audio video interaction
|
||||
* @dev: the device from gfx driver
|
||||
* @aud_sample_rate: the array of audio sample rate per port
|
||||
* @ops: callback for audio driver calling
|
||||
* @audio_ops: Call from i915 driver
|
||||
*/
|
||||
struct i915_audio_component {
|
||||
struct device *dev;
|
||||
int aud_sample_rate[MAX_PORTS];
|
||||
|
||||
const struct i915_audio_component_ops {
|
||||
struct module *owner;
|
||||
void (*get_power)(struct device *);
|
||||
void (*put_power)(struct device *);
|
||||
void (*codec_wake_override)(struct device *, bool enable);
|
||||
int (*get_cdclk_freq)(struct device *);
|
||||
} *ops;
|
||||
const struct i915_audio_component_ops *ops;
|
||||
|
||||
const struct i915_audio_component_audio_ops {
|
||||
void *audio_ptr;
|
||||
/**
|
||||
* Call from i915 driver, notifying the HDA driver that
|
||||
* pin sense and/or ELD information has changed.
|
||||
* @audio_ptr: HDA driver object
|
||||
* @port: Which port has changed (PORTA / PORTB / PORTC etc)
|
||||
*/
|
||||
void (*pin_eld_notify)(void *audio_ptr, int port);
|
||||
} *audio_ops;
|
||||
const struct i915_audio_component_audio_ops *audio_ops;
|
||||
};
|
||||
|
||||
#endif /* _I915_COMPONENT_H_ */
|
||||
|
@ -690,7 +690,8 @@ struct drm_i915_gem_exec_object2 {
|
||||
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
|
||||
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
|
||||
#define EXEC_OBJECT_WRITE (1<<2)
|
||||
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
|
||||
#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
|
||||
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_SUPPORTS_48B_ADDRESS<<1)
|
||||
__u64 flags;
|
||||
|
||||
__u64 rsvd1;
|
||||
|
@ -1775,6 +1775,16 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
|
||||
return non_pcm;
|
||||
}
|
||||
|
||||
/* There is a fixed mapping between audio pin node and display port
|
||||
* on current Intel platforms:
|
||||
* Pin Widget 5 - PORT B (port = 1 in i915 driver)
|
||||
* Pin Widget 6 - PORT C (port = 2 in i915 driver)
|
||||
* Pin Widget 7 - PORT D (port = 3 in i915 driver)
|
||||
*/
|
||||
static int intel_pin2port(hda_nid_t pin_nid)
|
||||
{
|
||||
return pin_nid - 4;
|
||||
}
|
||||
|
||||
/*
|
||||
* HDMI callbacks
|
||||
@ -1791,6 +1801,8 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
|
||||
int pin_idx = hinfo_to_pin_index(codec, hinfo);
|
||||
struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
|
||||
hda_nid_t pin_nid = per_pin->pin_nid;
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct i915_audio_component *acomp = codec->bus->core.audio_component;
|
||||
bool non_pcm;
|
||||
int pinctl;
|
||||
|
||||
@ -1807,6 +1819,13 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
|
||||
intel_not_share_assigned_cvt(codec, pin_nid, per_pin->mux_idx);
|
||||
}
|
||||
|
||||
/* Call sync_audio_rate to set the N/CTS/M manually if necessary */
|
||||
/* Todo: add DP1.2 MST audio support later */
|
||||
if (acomp && acomp->ops && acomp->ops->sync_audio_rate)
|
||||
acomp->ops->sync_audio_rate(acomp->dev,
|
||||
intel_pin2port(pin_nid),
|
||||
runtime->rate);
|
||||
|
||||
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
|
||||
mutex_lock(&per_pin->lock);
|
||||
per_pin->channels = substream->runtime->channels;
|
||||
|
Loading…
Reference in New Issue
Block a user