mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
drm-fixes-2021-12-03:
drm fixes for 5.16-rc4 dma-buf: - memory leak fix msm: - kasan found memory overwrite - mmap flags - fencing error bug - ioctl NULL ptr - uninit var - devfreqless devices fix - dsi lanes fix - dp: avoid unpowered aux xfers amdgpu: - IP discovery based enumeration fixes - vkms fixes - DSC fixes for DP MST - Audio fix for hotplug with tiled displays - Misc display fixes - DP tunneling fix - DP fix - Aldebaran fix amdkfd: - Locking fix - Static checker fix - Fix double free i915: - backlight regression - Intel HDR backlight detection fix - revert TGL workaround that caused hangs virtio-gpu: - switch back to drm_poll vc4: - memory leak - error check fix - HVS modesetting fixes -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmGpOTQACgkQDHTzWXnE hr6IcQ/9ER8UJPyOFEbbBkTOxTShkv2O4eMoP4nuReepAnoTTbNbrs+IvSesUgwb xbD22mytTmsxoPGDgK9X2W1JvyoHmsNjnNcy0eBF/34MU2AeRDJg8KqtwY3oIOaY do0q2VptW2Zujy5lKJJV6lukjO6W4JhgLWq3KlS/P7wyVbe5nRcBbOo2sFJ1WS2l AKtkJqzquAoDzhChN5TnNTey06WsXGION8yDD/cyxBBzvL0C5vUw8q+AKNvM5sm6 spHxj1Kz9wu2pMeZC/6wxzJnf/Go2im1Bal7wcrBaSZcFXVyH5Llp24TWlqj5PQL oEDhlUym5AwEMvlldAE6jwDPLt71tFnSpLQwKcrgPQbRMeXTrnP2Y7mJPYfWZiiY UHwlYXURJtq6BAEn00W+SZ1TeTjkODWl5/m+Ci4+u4h//Mbk0fdU7w1JZPzEHaOy dqmmxXhqgUbCgi6lrCTcLrZcZFCHq9GdZkyK76QrYAQTB5Tz/txl2d3TWFZ2N+fW uZbwNvjDhi1wubmJcwSUIItbjZTdIsIQ845EbevoTw9FIanLfA+XQhafYYGwUyMb i7ZO6pqYiH2/OZDguQJDmvGLdd0VwS9QyMBj/meFA9FZFujV0Vrzoz6Vj6a13Gcv q+/YL1FzPhMKs3g1P1quFHqhyvp+aZV/+2NalGWdBAGfcN1TIr8= =xcZd -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2021-12-03-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Bit of an uptick in patch count this week, though it's all relatively small overall. I suspect msm has been queuing up a few fixes to skew it here. Otherwise amdgpu has a scattered bunch of small fixes, and then some vc4, i915. virtio-gpu changes an rc1 introduced uAPI mistake, and makes it operate more like other drivers. This should be fine as no userspace relies on the behaviour yet. Summary: dma-buf: - memory leak fix msm: - kasan found memory overwrite - mmap flags - fencing error bug - ioctl NULL ptr - uninit var - devfreqless devices fix - dsi lanes fix - dp: avoid unpowered aux xfers amdgpu: - IP discovery based enumeration fixes - vkms fixes - DSC fixes for DP MST - Audio fix for hotplug with tiled displays - Misc display fixes - DP tunneling fix - DP fix - Aldebaran fix amdkfd: - Locking fix - Static checker fix - Fix double free i915: - backlight regression - Intel HDR backlight detection fix - revert TGL workaround that caused hangs virtio-gpu: - switch back to drm_poll vc4: - memory leak - error check fix - HVS modesetting fixes" * tag 'drm-fixes-2021-12-03-1' of git://anongit.freedesktop.org/drm/drm: (41 commits) Revert "drm/i915: Implement Wa_1508744258" drm/amdkfd: process_info lock not needed for svm drm/amdgpu: adjust the kfd reset sequence in reset sriov function drm/amd/display: add connector type check for CRC source set drm/amdkfd: fix double free mem structure drm/amdkfd: set "r = 0" explicitly before goto drm/amd/display: Add work around for tunneled MST. drm/amd/display: Fix for the no Audio bug with Tiled Displays drm/amd/display: Clear DPCD lane settings after repeater training drm/amd/display: Allow DSC on supported MST branch devices drm/amdgpu: Don't halt RLC on GFX suspend drm/amdgpu: fix the missed handling for SDMA2 and SDMA3 drm/amdgpu: check atomic flag to differeniate with legacy path drm/amdgpu: cancel the correct hrtimer on exit drm/amdgpu/sriov/vcn: add new vcn ip revision check case for SIENNA_CICHLID drm/i915/dp: Perform 30ms delay after source OUI write dma-buf: system_heap: Use 'for_each_sgtable_sg' in pages free flow drm/i915: Add support for panels with VESA backlights with PWM enable/disable drm/vc4: kms: Fix previous HVS commit wait drm/vc4: kms: Don't duplicate pending commit ...
This commit is contained in:
commit
5f58da2bef
@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
|
||||
int i;
|
||||
|
||||
table = &buffer->sg_table;
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
for_each_sgtable_sg(table, sg, i) {
|
||||
struct page *page = sg_page(sg);
|
||||
|
||||
__free_pages(page, compound_order(page));
|
||||
|
@ -1396,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
struct sg_table *sg = NULL;
|
||||
uint64_t user_addr = 0;
|
||||
struct amdgpu_bo *bo;
|
||||
struct drm_gem_object *gobj;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
u32 domain, alloc_domain;
|
||||
u64 alloc_flags;
|
||||
int ret;
|
||||
@ -1506,14 +1506,16 @@ allocate_init_user_pages_failed:
|
||||
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
|
||||
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
|
||||
err_node_allow:
|
||||
drm_gem_object_put(gobj);
|
||||
/* Don't unreserve system mem limit twice */
|
||||
goto err_reserve_limit;
|
||||
err_bo_create:
|
||||
unreserve_mem_limit(adev, size, alloc_domain, !!sg);
|
||||
err_reserve_limit:
|
||||
mutex_destroy(&(*mem)->lock);
|
||||
kfree(*mem);
|
||||
if (gobj)
|
||||
drm_gem_object_put(gobj);
|
||||
else
|
||||
kfree(*mem);
|
||||
err:
|
||||
if (sg) {
|
||||
sg_free_table(sg);
|
||||
|
@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
|
||||
/* disable all interrupts */
|
||||
amdgpu_irq_disable_all(adev);
|
||||
if (adev->mode_info.mode_config_initialized){
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
|
||||
drm_helper_force_disable_all(adev_to_drm(adev));
|
||||
else
|
||||
drm_atomic_helper_shutdown(adev_to_drm(adev));
|
||||
@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
{
|
||||
int r;
|
||||
|
||||
amdgpu_amdkfd_pre_reset(adev);
|
||||
|
||||
if (from_hypervisor)
|
||||
r = amdgpu_virt_request_full_gpu(adev, true);
|
||||
else
|
||||
@ -4316,6 +4318,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
amdgpu_amdkfd_post_reset(adev);
|
||||
|
||||
error:
|
||||
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
||||
@ -5030,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
||||
|
||||
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
|
||||
|
||||
amdgpu_amdkfd_pre_reset(tmp_adev);
|
||||
if (!amdgpu_sriov_vf(tmp_adev))
|
||||
amdgpu_amdkfd_pre_reset(tmp_adev);
|
||||
|
||||
/*
|
||||
* Mark these ASICs to be reseted as untracked first
|
||||
@ -5129,7 +5133,7 @@ skip_hw_reset:
|
||||
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
|
||||
}
|
||||
|
||||
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
|
||||
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
|
||||
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
|
||||
}
|
||||
|
||||
@ -5148,9 +5152,9 @@ skip_hw_reset:
|
||||
|
||||
skip_sched_resume:
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||
/* unlock kfd */
|
||||
if (!need_emergency_restart)
|
||||
amdgpu_amdkfd_post_reset(tmp_adev);
|
||||
/* unlock kfd: SRIOV would do it separately */
|
||||
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
|
||||
amdgpu_amdkfd_post_reset(tmp_adev);
|
||||
|
||||
/* kfd_post_reset will do nothing if kfd device is not initialized,
|
||||
* need to bring up kfd here if it's not be initialized before
|
||||
|
@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = {
|
||||
[HDP_HWIP] = HDP_HWID,
|
||||
[SDMA0_HWIP] = SDMA0_HWID,
|
||||
[SDMA1_HWIP] = SDMA1_HWID,
|
||||
[SDMA2_HWIP] = SDMA2_HWID,
|
||||
[SDMA3_HWIP] = SDMA3_HWID,
|
||||
[MMHUB_HWIP] = MMHUB_HWID,
|
||||
[ATHUB_HWIP] = ATHUB_HWID,
|
||||
[NBIO_HWIP] = NBIF_HWID,
|
||||
@ -918,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
|
||||
case IP_VERSION(3, 0, 64):
|
||||
case IP_VERSION(3, 1, 1):
|
||||
case IP_VERSION(3, 0, 2):
|
||||
case IP_VERSION(3, 0, 192):
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
|
||||
|
@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
|
||||
break;
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 64):
|
||||
case IP_VERSION(3, 0, 192):
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
|
||||
fw_name = FIRMWARE_SIENNA_CICHLID;
|
||||
else
|
||||
|
@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle)
|
||||
int i = 0;
|
||||
|
||||
for (i = 0; i < adev->mode_info.num_crtc; i++)
|
||||
if (adev->mode_info.crtcs[i])
|
||||
hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
|
||||
if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function)
|
||||
hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer);
|
||||
|
||||
kfree(adev->mode_info.bios_hardcoded_edid);
|
||||
kfree(adev->amdgpu_vkms_output);
|
||||
|
@ -4060,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle)
|
||||
|
||||
gfx_v9_0_cp_enable(adev, false);
|
||||
|
||||
/* Skip suspend with A+A reset */
|
||||
if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
|
||||
dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
|
||||
/* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
|
||||
if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
|
||||
(adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
|
||||
dev_dbg(adev->dev, "Skipping RLC halt\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
|
||||
switch (adev->ip_versions[UVD_HWIP][0]) {
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 64):
|
||||
case IP_VERSION(3, 0, 192):
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
if (encode)
|
||||
*codecs = &sriov_sc_video_codecs_encode;
|
||||
|
@ -1574,7 +1574,6 @@ retry_flush_work:
|
||||
static void svm_range_restore_work(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct amdkfd_process_info *process_info;
|
||||
struct svm_range_list *svms;
|
||||
struct svm_range *prange;
|
||||
struct kfd_process *p;
|
||||
@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work)
|
||||
* the lifetime of this thread, kfd_process and mm will be valid.
|
||||
*/
|
||||
p = container_of(svms, struct kfd_process, svms);
|
||||
process_info = p->kgd_process_info;
|
||||
mm = p->mm;
|
||||
if (!mm)
|
||||
return;
|
||||
|
||||
mutex_lock(&process_info->lock);
|
||||
svm_range_list_lock_and_flush_work(svms, mm);
|
||||
mutex_lock(&svms->lock);
|
||||
|
||||
@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work)
|
||||
out_reschedule:
|
||||
mutex_unlock(&svms->lock);
|
||||
mmap_write_unlock(mm);
|
||||
mutex_unlock(&process_info->lock);
|
||||
|
||||
/* If validation failed, reschedule another attempt */
|
||||
if (evicted_ranges) {
|
||||
@ -2614,6 +2610,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
||||
|
||||
if (atomic_read(&svms->drain_pagefaults)) {
|
||||
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2623,6 +2620,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
||||
mm = get_task_mm(p->lead_thread);
|
||||
if (!mm) {
|
||||
pr_debug("svms 0x%p failed to get mm\n", svms);
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2660,6 +2658,7 @@ retry_write_locked:
|
||||
|
||||
if (svm_range_skip_recover(prange)) {
|
||||
amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
|
||||
r = 0;
|
||||
goto out_unlock_range;
|
||||
}
|
||||
|
||||
@ -2668,6 +2667,7 @@ retry_write_locked:
|
||||
if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
|
||||
pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
|
||||
svms, prange->start, prange->last);
|
||||
r = 0;
|
||||
goto out_unlock_range;
|
||||
}
|
||||
|
||||
@ -3177,7 +3177,6 @@ static int
|
||||
svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
||||
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
|
||||
{
|
||||
struct amdkfd_process_info *process_info = p->kgd_process_info;
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct list_head update_list;
|
||||
struct list_head insert_list;
|
||||
@ -3196,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
|
||||
|
||||
svms = &p->svms;
|
||||
|
||||
mutex_lock(&process_info->lock);
|
||||
|
||||
svm_range_list_lock_and_flush_work(svms, mm);
|
||||
|
||||
r = svm_range_is_valid(p, start, size);
|
||||
@ -3273,8 +3270,6 @@ out_unlock_range:
|
||||
mutex_unlock(&svms->lock);
|
||||
mmap_read_unlock(mm);
|
||||
out:
|
||||
mutex_unlock(&process_info->lock);
|
||||
|
||||
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
|
||||
&p->svms, start, start + size - 1, r);
|
||||
|
||||
|
@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
|
||||
(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
|
||||
DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
|
||||
ret = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
|
||||
|
@ -36,6 +36,8 @@
|
||||
#include "dm_helpers.h"
|
||||
|
||||
#include "dc_link_ddc.h"
|
||||
#include "ddc_service_types.h"
|
||||
#include "dpcd_defs.h"
|
||||
|
||||
#include "i2caux_interface.h"
|
||||
#include "dmub_cmd.h"
|
||||
@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
static bool needs_dsc_aux_workaround(struct dc_link *link)
|
||||
{
|
||||
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
|
||||
(link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
|
||||
link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct dc_sink *dc_sink = aconnector->dc_sink;
|
||||
@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
|
||||
u8 *dsc_branch_dec_caps = NULL;
|
||||
|
||||
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
|
||||
#if defined(CONFIG_HP_HOOK_WORKAROUND)
|
||||
|
||||
/*
|
||||
* drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
|
||||
* because it only check the dsc/fec caps of the "port variable" and not the dock
|
||||
@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
|
||||
* Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
|
||||
*
|
||||
*/
|
||||
|
||||
if (!aconnector->dsc_aux && !port->parent->port_parent)
|
||||
if (!aconnector->dsc_aux && !port->parent->port_parent &&
|
||||
needs_dsc_aux_workaround(aconnector->dc_link))
|
||||
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
|
||||
#endif
|
||||
|
||||
if (!aconnector->dsc_aux)
|
||||
return false;
|
||||
|
||||
|
@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link,
|
||||
dal_ddc_service_set_transaction_type(link->ddc,
|
||||
sink_caps->transaction_type);
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
/* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
|
||||
* reports DSC support.
|
||||
*/
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
|
||||
link->type == dc_connection_mst_branch &&
|
||||
link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
|
||||
link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
|
||||
!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
|
||||
link->wa_flags.dpia_mst_dsc_always_on = true;
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_HDCP)
|
||||
/* In case of fallback to SST when topology discovery below fails
|
||||
* HDCP caps will be querried again later by the upper layer (caller
|
||||
@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
|
||||
LINK_INFO("link=%d, mst branch is now Disconnected\n",
|
||||
link->link_index);
|
||||
|
||||
/* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
|
||||
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
|
||||
link->wa_flags.dpia_mst_dsc_always_on = false;
|
||||
|
||||
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
|
||||
|
||||
link->mst_stream_alloc_table.stream_count = 0;
|
||||
|
@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
|
||||
}
|
||||
|
||||
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
|
||||
lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
|
||||
lt_settings->dpcd_lane_settings[lane].raw = 0;
|
||||
}
|
||||
|
||||
if (status == LINK_TRAINING_SUCCESS) {
|
||||
|
@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(
|
||||
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
|
||||
return false;
|
||||
|
||||
// Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
|
||||
if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state(
|
||||
|
||||
if (!new_ctx)
|
||||
return DC_ERROR_UNEXPECTED;
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
|
||||
/*
|
||||
* Update link encoder to stream assignment.
|
||||
* TODO: Split out reason allocation from validation.
|
||||
*/
|
||||
if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
|
||||
dc->res_pool->funcs->link_encs_assign(
|
||||
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
|
||||
#endif
|
||||
|
||||
if (dc->res_pool->funcs->validate_global) {
|
||||
result = dc->res_pool->funcs->validate_global(dc, new_ctx);
|
||||
@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state(
|
||||
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
|
||||
result = DC_FAIL_BANDWIDTH_VALIDATE;
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
/*
|
||||
* Only update link encoder to stream assignment after bandwidth validation passed.
|
||||
* TODO: Split out assignment and validation.
|
||||
*/
|
||||
if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
|
||||
dc->res_pool->funcs->link_encs_assign(
|
||||
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
|
||||
#endif
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -508,7 +508,8 @@ union dpia_debug_options {
|
||||
uint32_t disable_dpia:1;
|
||||
uint32_t force_non_lttpr:1;
|
||||
uint32_t extend_aux_rd_interval:1;
|
||||
uint32_t reserved:29;
|
||||
uint32_t disable_mst_dsc_work_around:1;
|
||||
uint32_t reserved:28;
|
||||
} bits;
|
||||
uint32_t raw;
|
||||
};
|
||||
|
@ -191,6 +191,8 @@ struct dc_link {
|
||||
bool dp_skip_DID2;
|
||||
bool dp_skip_reset_segment;
|
||||
bool dp_mot_reset_segment;
|
||||
/* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
|
||||
bool dpia_mst_dsc_always_on;
|
||||
} wa_flags;
|
||||
struct link_mst_stream_allocation_table mst_stream_alloc_table;
|
||||
|
||||
|
@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
|
||||
dev_err(adev->dev, "Failed to disable smu features.\n");
|
||||
}
|
||||
|
||||
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
|
||||
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
|
||||
adev->gfx.rlc.funcs->stop)
|
||||
adev->gfx.rlc.funcs->stop(adev);
|
||||
|
||||
|
@ -1640,6 +1640,9 @@ struct intel_dp {
|
||||
struct intel_dp_pcon_frl frl;
|
||||
|
||||
struct intel_psr psr;
|
||||
|
||||
/* When we last wrote the OUI for eDP */
|
||||
unsigned long last_oui_write;
|
||||
};
|
||||
|
||||
enum lspcon_vendor {
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/timekeeping.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/byteorder.h>
|
||||
@ -1955,6 +1956,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
|
||||
|
||||
if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
|
||||
drm_err(&i915->drm, "Failed to write source OUI\n");
|
||||
|
||||
intel_dp->last_oui_write = jiffies;
|
||||
}
|
||||
|
||||
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
|
||||
wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
|
||||
}
|
||||
|
||||
/* If the device supports it, try to set the power state appropriately */
|
||||
|
@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_dp_phy_test(struct intel_encoder *encoder);
|
||||
|
||||
void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
|
||||
|
||||
#endif /* __INTEL_DP_H__ */
|
||||
|
@ -36,6 +36,7 @@
|
||||
|
||||
#include "intel_backlight.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_aux_backlight.h"
|
||||
|
||||
/* TODO:
|
||||
@ -106,6 +107,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
|
||||
int ret;
|
||||
u8 tcon_cap[4];
|
||||
|
||||
intel_dp_wait_source_oui(intel_dp);
|
||||
|
||||
ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
|
||||
if (ret != sizeof(tcon_cap))
|
||||
return false;
|
||||
@ -204,6 +207,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
|
||||
int ret;
|
||||
u8 old_ctrl, ctrl;
|
||||
|
||||
intel_dp_wait_source_oui(intel_dp);
|
||||
|
||||
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
|
||||
if (ret != 1) {
|
||||
drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
|
||||
@ -293,6 +298,13 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
|
||||
struct intel_panel *panel = &connector->panel;
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
|
||||
if (!panel->backlight.edp.vesa.info.aux_enable) {
|
||||
u32 pwm_level = intel_backlight_invert_pwm_level(connector,
|
||||
panel->backlight.pwm_level_max);
|
||||
|
||||
panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
|
||||
}
|
||||
|
||||
drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
|
||||
}
|
||||
|
||||
@ -304,6 +316,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
|
||||
|
||||
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
|
||||
|
||||
if (!panel->backlight.edp.vesa.info.aux_enable)
|
||||
panel->backlight.pwm_funcs->disable(old_conn_state,
|
||||
intel_backlight_invert_pwm_level(connector, 0));
|
||||
}
|
||||
|
||||
static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
|
||||
@ -321,6 +337,15 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!panel->backlight.edp.vesa.info.aux_enable) {
|
||||
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
|
||||
if (ret < 0) {
|
||||
drm_err(&i915->drm,
|
||||
"Failed to setup PWM backlight controls for eDP backlight: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
panel->backlight.max = panel->backlight.edp.vesa.info.max;
|
||||
panel->backlight.min = 0;
|
||||
if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
|
||||
@ -340,12 +365,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
|
||||
/* TODO: We currently only support AUX only backlight configurations, not backlights which
|
||||
* require a mix of PWM and AUX controls to work. In the mean time, these machines typically
|
||||
* work just fine using normal PWM controls anyway.
|
||||
*/
|
||||
if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
|
||||
drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
|
||||
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
|
||||
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
|
||||
return true;
|
||||
}
|
||||
|
@ -621,13 +621,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
|
||||
FF_MODE2_GS_TIMER_MASK,
|
||||
FF_MODE2_GS_TIMER_224,
|
||||
0, false);
|
||||
|
||||
/*
|
||||
* Wa_14012131227:dg1
|
||||
* Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
|
||||
*/
|
||||
wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
|
||||
GEN9_RHWO_OPTIMIZATION_DISABLE);
|
||||
}
|
||||
|
||||
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
|
||||
|
@ -4,8 +4,8 @@ config DRM_MSM
|
||||
tristate "MSM DRM"
|
||||
depends on DRM
|
||||
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
|
||||
depends on COMMON_CLK
|
||||
depends on IOMMU_SUPPORT
|
||||
depends on (OF && COMMON_CLK) || COMPILE_TEST
|
||||
depends on QCOM_OCMEM || QCOM_OCMEM=n
|
||||
depends on QCOM_LLCC || QCOM_LLCC=n
|
||||
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
|
||||
|
@ -23,8 +23,10 @@ msm-y := \
|
||||
hdmi/hdmi_i2c.o \
|
||||
hdmi/hdmi_phy.o \
|
||||
hdmi/hdmi_phy_8960.o \
|
||||
hdmi/hdmi_phy_8996.o \
|
||||
hdmi/hdmi_phy_8x60.o \
|
||||
hdmi/hdmi_phy_8x74.o \
|
||||
hdmi/hdmi_pll_8960.o \
|
||||
edp/edp.o \
|
||||
edp/edp_aux.o \
|
||||
edp/edp_bridge.o \
|
||||
@ -37,6 +39,7 @@ msm-y := \
|
||||
disp/mdp4/mdp4_dtv_encoder.o \
|
||||
disp/mdp4/mdp4_lcdc_encoder.o \
|
||||
disp/mdp4/mdp4_lvds_connector.o \
|
||||
disp/mdp4/mdp4_lvds_pll.o \
|
||||
disp/mdp4/mdp4_irq.o \
|
||||
disp/mdp4/mdp4_kms.o \
|
||||
disp/mdp4/mdp4_plane.o \
|
||||
@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
|
||||
dp/dp_audio.o
|
||||
|
||||
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
|
||||
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
|
||||
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
|
||||
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
|
||||
|
||||
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
|
||||
|
||||
|
@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||
struct msm_gpu *gpu = &adreno_gpu->base;
|
||||
u32 gpu_scid, cntl1_regval = 0;
|
||||
u32 cntl1_regval = 0;
|
||||
|
||||
if (IS_ERR(a6xx_gpu->llc_mmio))
|
||||
return;
|
||||
|
||||
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
|
||||
gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
|
||||
u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
|
||||
|
||||
gpu_scid &= 0x1f;
|
||||
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
|
||||
(gpu_scid << 15) | (gpu_scid << 20);
|
||||
|
||||
/* On A660, the SCID programming for UCHE traffic is done in
|
||||
* A6XX_GBIF_SCACHE_CNTL0[14:10]
|
||||
*/
|
||||
if (adreno_is_a660_family(adreno_gpu))
|
||||
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
|
||||
(1 << 8), (gpu_scid << 10) | (1 << 8));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
|
||||
}
|
||||
|
||||
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
|
||||
|
||||
/* On A660, the SCID programming for UCHE traffic is done in
|
||||
* A6XX_GBIF_SCACHE_CNTL0[14:10]
|
||||
*/
|
||||
if (adreno_is_a660_family(adreno_gpu))
|
||||
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
|
||||
(1 << 8), (gpu_scid << 10) | (1 << 8));
|
||||
}
|
||||
|
||||
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
|
||||
@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
|
||||
return (unsigned long)busy_time;
|
||||
}
|
||||
|
||||
void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
|
||||
static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
|
@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
|
||||
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
|
||||
|
||||
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
|
||||
2, sizeof(*a6xx_state->gmu_registers));
|
||||
3, sizeof(*a6xx_state->gmu_registers));
|
||||
|
||||
if (!a6xx_state->gmu_registers)
|
||||
return;
|
||||
|
||||
a6xx_state->nr_gmu_registers = 2;
|
||||
a6xx_state->nr_gmu_registers = 3;
|
||||
|
||||
/* Get the CX GMU registers from AHB */
|
||||
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
|
||||
|
@ -33,6 +33,7 @@ struct dp_aux_private {
|
||||
bool read;
|
||||
bool no_send_addr;
|
||||
bool no_send_stop;
|
||||
bool initted;
|
||||
u32 offset;
|
||||
u32 segment;
|
||||
|
||||
@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
|
||||
}
|
||||
|
||||
mutex_lock(&aux->mutex);
|
||||
if (!aux->initted) {
|
||||
ret = -EIO;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
dp_aux_update_offset_and_segment(aux, msg);
|
||||
dp_aux_transfer_helper(aux, msg, true);
|
||||
@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
|
||||
}
|
||||
|
||||
aux->cmd_busy = false;
|
||||
|
||||
exit:
|
||||
mutex_unlock(&aux->mutex);
|
||||
|
||||
return ret;
|
||||
@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
|
||||
|
||||
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
|
||||
|
||||
mutex_lock(&aux->mutex);
|
||||
|
||||
dp_catalog_aux_enable(aux->catalog, true);
|
||||
aux->retry_cnt = 0;
|
||||
aux->initted = true;
|
||||
|
||||
mutex_unlock(&aux->mutex);
|
||||
}
|
||||
|
||||
void dp_aux_deinit(struct drm_dp_aux *dp_aux)
|
||||
@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
|
||||
|
||||
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
|
||||
|
||||
mutex_lock(&aux->mutex);
|
||||
|
||||
aux->initted = false;
|
||||
dp_catalog_aux_enable(aux->catalog, false);
|
||||
|
||||
mutex_unlock(&aux->mutex);
|
||||
}
|
||||
|
||||
int dp_aux_register(struct drm_dp_aux *dp_aux)
|
||||
|
@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
|
||||
if (!prop) {
|
||||
DRM_DEV_DEBUG(dev,
|
||||
"failed to find data lane mapping, using default\n");
|
||||
/* Set the number of date lanes to 4 by default. */
|
||||
msm_host->num_data_lanes = 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
|
||||
goto free_priv;
|
||||
|
||||
pm_runtime_get_sync(&gpu->pdev->dev);
|
||||
msm_gpu_hw_init(gpu);
|
||||
show_priv->state = gpu->funcs->gpu_state_get(gpu);
|
||||
pm_runtime_put_sync(&gpu->pdev->dev);
|
||||
|
||||
|
@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
|
||||
ktime_t timeout)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct drm_msm_wait_fence *args = data;
|
||||
ktime_t timeout = to_ktime(args->timeout);
|
||||
struct msm_gpu_submitqueue *queue;
|
||||
struct msm_gpu *gpu = priv->gpu;
|
||||
struct dma_fence *fence;
|
||||
int ret;
|
||||
|
||||
if (args->pad) {
|
||||
DRM_ERROR("invalid pad: %08x\n", args->pad);
|
||||
if (fence_id > queue->last_fence) {
|
||||
DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
|
||||
fence_id, queue->last_fence);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!gpu)
|
||||
return 0;
|
||||
|
||||
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
|
||||
if (!queue)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* Map submitqueue scoped "seqno" (which is actually an idr key)
|
||||
* back to underlying dma-fence
|
||||
@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
||||
ret = mutex_lock_interruptible(&queue->lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
fence = idr_find(&queue->fence_idr, args->fence);
|
||||
fence = idr_find(&queue->fence_idr, fence_id);
|
||||
if (fence)
|
||||
fence = dma_fence_get_rcu(fence);
|
||||
mutex_unlock(&queue->lock);
|
||||
@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
dma_fence_put(fence);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct drm_msm_wait_fence *args = data;
|
||||
struct msm_gpu_submitqueue *queue;
|
||||
int ret;
|
||||
|
||||
if (args->pad) {
|
||||
DRM_ERROR("invalid pad: %08x\n", args->pad);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!priv->gpu)
|
||||
return 0;
|
||||
|
||||
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
|
||||
if (!queue)
|
||||
return -ENOENT;
|
||||
|
||||
ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
|
||||
|
||||
msm_submitqueue_put(queue);
|
||||
|
||||
return ret;
|
||||
|
@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
vma->vm_flags &= ~VM_PFNMAP;
|
||||
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
|
||||
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
|
||||
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
|
||||
|
||||
return 0;
|
||||
@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
||||
break;
|
||||
fallthrough;
|
||||
default:
|
||||
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
|
||||
DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
|
||||
(flags & MSM_BO_CACHE_MASK));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
args->nr_cmds);
|
||||
if (IS_ERR(submit)) {
|
||||
ret = PTR_ERR(submit);
|
||||
submit = NULL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
drm_sched_entity_push_job(&submit->base);
|
||||
|
||||
args->fence = submit->fence_id;
|
||||
queue->last_fence = submit->fence_id;
|
||||
|
||||
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
|
||||
msm_process_post_deps(post_deps, args->nr_out_syncobjs,
|
||||
|
@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
|
||||
* @ring_nr: the ringbuffer used by this submitqueue, which is determined
|
||||
* by the submitqueue's priority
|
||||
* @faults: the number of GPU hangs associated with this submitqueue
|
||||
* @last_fence: the sequence number of the last allocated fence (for error
|
||||
* checking)
|
||||
* @ctx: the per-drm_file context associated with the submitqueue (ie.
|
||||
* which set of pgtables do submits jobs associated with the
|
||||
* submitqueue use)
|
||||
@ -374,6 +376,7 @@ struct msm_gpu_submitqueue {
|
||||
u32 flags;
|
||||
u32 ring_nr;
|
||||
int faults;
|
||||
uint32_t last_fence;
|
||||
struct msm_file_private *ctx;
|
||||
struct list_head node;
|
||||
struct idr fence_idr;
|
||||
|
@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
|
||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||
struct dev_pm_opp *opp;
|
||||
|
||||
/*
|
||||
* Note that devfreq_recommended_opp() can modify the freq
|
||||
* to something that actually is in the opp table:
|
||||
*/
|
||||
opp = devfreq_recommended_opp(dev, freq, flags);
|
||||
|
||||
/*
|
||||
@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
|
||||
*/
|
||||
if (gpu->devfreq.idle_freq) {
|
||||
gpu->devfreq.idle_freq = *freq;
|
||||
dev_pm_opp_put(opp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
|
||||
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
|
||||
unsigned long idle_freq, target_freq = 0;
|
||||
|
||||
if (!df->devfreq)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Hold devfreq lock to synchronize with get_dev_status()/
|
||||
* target() callbacks
|
||||
@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
|
||||
{
|
||||
struct msm_gpu_devfreq *df = &gpu->devfreq;
|
||||
|
||||
if (!df->devfreq)
|
||||
return;
|
||||
|
||||
msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
|
||||
HRTIMER_MODE_ABS);
|
||||
HRTIMER_MODE_REL);
|
||||
}
|
||||
|
@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
struct drm_device *dev = state->dev;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_hvs *hvs = vc4->hvs;
|
||||
struct drm_crtc_state *old_crtc_state;
|
||||
struct drm_crtc_state *new_crtc_state;
|
||||
struct drm_crtc *crtc;
|
||||
struct vc4_hvs_state *old_hvs_state;
|
||||
unsigned int channel;
|
||||
int i;
|
||||
|
||||
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
|
||||
}
|
||||
|
||||
if (vc4->hvs->hvs5)
|
||||
clk_set_min_rate(hvs->core_clk, 500000000);
|
||||
|
||||
old_hvs_state = vc4_hvs_get_old_global_state(state);
|
||||
if (!old_hvs_state)
|
||||
if (IS_ERR(old_hvs_state))
|
||||
return;
|
||||
|
||||
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
|
||||
struct vc4_crtc_state *vc4_crtc_state =
|
||||
to_vc4_crtc_state(old_crtc_state);
|
||||
unsigned int channel = vc4_crtc_state->assigned_channel;
|
||||
for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
|
||||
struct drm_crtc_commit *commit;
|
||||
int ret;
|
||||
|
||||
if (channel == VC4_HVS_CHANNEL_DISABLED)
|
||||
continue;
|
||||
|
||||
if (!old_hvs_state->fifo_state[channel].in_use)
|
||||
continue;
|
||||
|
||||
ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
|
||||
commit = old_hvs_state->fifo_state[channel].pending_commit;
|
||||
if (!commit)
|
||||
continue;
|
||||
|
||||
ret = drm_crtc_commit_wait(commit);
|
||||
if (ret)
|
||||
drm_err(dev, "Timed out waiting for commit\n");
|
||||
|
||||
drm_crtc_commit_put(commit);
|
||||
old_hvs_state->fifo_state[channel].pending_commit = NULL;
|
||||
}
|
||||
|
||||
if (vc4->hvs->hvs5)
|
||||
clk_set_min_rate(hvs->core_clk, 500000000);
|
||||
|
||||
drm_atomic_helper_commit_modeset_disables(dev, state);
|
||||
|
||||
vc4_ctm_commit(vc4, state);
|
||||
@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
|
||||
unsigned int i;
|
||||
|
||||
hvs_state = vc4_hvs_get_new_global_state(state);
|
||||
if (!hvs_state)
|
||||
return -EINVAL;
|
||||
if (WARN_ON(IS_ERR(hvs_state)))
|
||||
return PTR_ERR(hvs_state);
|
||||
|
||||
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
struct vc4_crtc_state *vc4_crtc_state =
|
||||
@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
|
||||
|
||||
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
|
||||
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
|
||||
|
||||
if (!old_state->fifo_state[i].pending_commit)
|
||||
continue;
|
||||
|
||||
state->fifo_state[i].pending_commit =
|
||||
drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
|
||||
}
|
||||
|
||||
return &state->base;
|
||||
@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
|
||||
unsigned int i;
|
||||
|
||||
hvs_new_state = vc4_hvs_get_global_state(state);
|
||||
if (!hvs_new_state)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(hvs_new_state))
|
||||
return PTR_ERR(hvs_new_state);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
|
||||
if (!hvs_new_state->fifo_state[i].in_use)
|
||||
|
@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
|
||||
schedule_work(&vgdev->config_changed_work);
|
||||
}
|
||||
|
||||
static __poll_t virtio_gpu_poll(struct file *filp,
|
||||
struct poll_table_struct *wait)
|
||||
{
|
||||
struct drm_file *drm_file = filp->private_data;
|
||||
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
|
||||
struct drm_device *dev = drm_file->minor->dev;
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct drm_pending_event *e = NULL;
|
||||
__poll_t mask = 0;
|
||||
|
||||
if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
|
||||
return drm_poll(filp, wait);
|
||||
|
||||
poll_wait(filp, &drm_file->event_wait, wait);
|
||||
|
||||
if (!list_empty(&drm_file->event_list)) {
|
||||
spin_lock_irq(&dev->event_lock);
|
||||
e = list_first_entry(&drm_file->event_list,
|
||||
struct drm_pending_event, link);
|
||||
drm_file->event_space += e->event->length;
|
||||
list_del(&e->link);
|
||||
spin_unlock_irq(&dev->event_lock);
|
||||
|
||||
kfree(e);
|
||||
mask |= EPOLLIN | EPOLLRDNORM;
|
||||
}
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
static struct virtio_device_id id_table[] = {
|
||||
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
|
||||
{ 0 },
|
||||
@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
|
||||
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
|
||||
MODULE_AUTHOR("Alon Levy");
|
||||
|
||||
static const struct file_operations virtio_gpu_driver_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
.poll = virtio_gpu_poll,
|
||||
.read = drm_read,
|
||||
.llseek = noop_llseek,
|
||||
.mmap = drm_gem_mmap
|
||||
};
|
||||
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
|
||||
|
||||
static const struct drm_driver driver = {
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
|
||||
|
@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver {
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
|
||||
struct virtio_gpu_fence_event {
|
||||
struct drm_pending_event base;
|
||||
struct drm_event event;
|
||||
|
@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
|
||||
if (!e)
|
||||
return -ENOMEM;
|
||||
|
||||
e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
|
||||
e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
|
||||
e->event.length = sizeof(e->event);
|
||||
|
||||
ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
|
||||
|
@ -196,6 +196,13 @@ struct drm_virtgpu_context_init {
|
||||
__u64 ctx_set_params;
|
||||
};
|
||||
|
||||
/*
|
||||
* Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
|
||||
* effect. The event size is sizeof(drm_event), since there is no additional
|
||||
* payload.
|
||||
*/
|
||||
#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
|
||||
|
||||
#define DRM_IOCTL_VIRTGPU_MAP \
|
||||
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user