drm-fixes for -rc3

- fix for ttm list corruption in radeon, reported by a few people
 - fixes for amdgpu, i915, msm
 - dma-buf use-after free fix
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEb4nG6jLu8Y5XI+PfTA9ye/CYqnEFAl/4gBAACgkQTA9ye/CY
 qnFb4w//Z7ZeRcEjmHwOvKod6uqIRuEgn2naZgHWSY1ZaB8Mqtbv3ORF52oA8VbH
 d156DaUvK4BrQhr8zoMqu43iuzCx8UskQTAnosRutGUtWEN86G2Dy5BfqYygMAnp
 s0h9kw+O1b3sRlusbOnjYl4oaysRQBQntx7TVyXeWAp6BjZF/NILPc16e85rfPFa
 DIThWLy4gOfkXp/iNp+rIoEVrxCNixzCwiRCFwHiQGwuHsYDbsTWuH3OWg2rzT/V
 ePXa28/J8YL7qG4+f/jaVIMxmBeJhlMPMrPgKXALb3uhJvAoJ+LV4fH61k1EeEbu
 o5b/U8jEFPUwK1mtlLcJBJf68jrWdXXZe6XMRewpWjz2RLaZj8oWQGlzlG6jASfP
 OZTnIXVXpgNAIT6xj0j8jJ40hdgFRHfEhr2eclF6WUsmipwd3vt99LH6nzlaQQ+I
 T9ExivgcTwyHag/WoalVtbkq4Vmqb51tx/WaDPbuSAKYHuJyZWavZnvSihAakyFG
 QAs+o+72SgU1CksMp6zawGu7buzPUoYtkp5lzMDmxOg4FWym7tLrEFflPBym3WcS
 p8fXxPsC3Uu3cs6/0pMAKy+sMCVwU87clTP2GGmWTvDjsdMq+xuMn9J1PsZtofXr
 Cf+qc7GSpwB07ophyl8aZgIYcf3gX2A2XLVBQ06L6e8JVDNTYds=
 =Vyly
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2021-01-08' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Daniel Vetter:
 "Looks like people are back from the break, usual small pile of fixes
  all over. Next week Dave should be back.

  The only thing pending I'm aware of is a "this shouldn't have become
  uapi" reverts for amdgpu, but they're already on the list and not that
  important really so can wait another week.

  Summary:

   - fix for ttm list corruption in radeon, reported by a few people

   - fixes for amdgpu, i915, msm

   - dma-buf use-after free fix"

* tag 'drm-fixes-2021-01-08' of git://anongit.freedesktop.org/drm/drm: (29 commits)
  drm/msm: Only enable A6xx LLCC code on A6xx
  drm/msm: Add modparam to allow vram carveout
  drm/msm: Call msm_init_vram before binding the gpu
  drm/msm/dp: postpone irq_hpd event during connection pending state
  drm/ttm: unexport ttm_pool_init/fini
  drm/radeon: stop re-init the TTM page pool
  dmabuf: fix use-after-free of dmabuf's file->f_inode
  Revert "drm/amd/display: Fix memory leaks in S3 resume"
  drm/amdgpu/display: drop DCN support for aarch64
  drm/amdgpu: enable ras eeprom support for sienna cichlid
  drm/amdgpu: fix no bad_pages issue after umc ue injection
  drm/amdgpu: fix potential memory leak during navi12 deinitialization
  drm/amd/display: Fix unused variable warning
  drm/amd/pm: improve the fine grain tuning function for RV/RV2/PCO
  drm/amd/pm: fix the failure when change power profile for renoir
  drm/amdgpu: fix a GPU hang issue when remove device
  drm/amdgpu: fix a memory protection fault when remove amdgpu device
  drm/amdgpu: switched to cached noretry setting for vangogh
  drm/amd/display: fix sysfs amdgpu_current_backlight_pwm NULL pointer issue
  drm/amd/pm: updated PM to I2C controller port on sienna cichlid
  ...
This commit is contained in:
Linus Torvalds 2021-01-08 15:12:08 -08:00
commit 43d3d587d2
47 changed files with 303 additions and 178 deletions

View File

@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
dmabuf->ops->release(dmabuf);
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
kfree(dmabuf);
}
static int dma_buf_file_release(struct inode *inode, struct file *file)
{
struct dma_buf *dmabuf;
if (!is_dma_buf_file(file))
return -EINVAL;
dmabuf = file->private_data;
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
return 0;
}
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
.release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,

View File

@ -2548,11 +2548,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
amdgpu_amdkfd_device_fini(adev);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
amdgpu_amdkfd_device_fini(adev);
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)

View File

@ -563,7 +563,7 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@ -1315,8 +1315,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->hdcp_context.hdcp_initialized)
return 0;
if (!psp->hdcp_context.hdcp_initialized) {
if (psp->hdcp_context.hdcp_shared_buf)
goto out;
else
return 0;
}
ret = psp_hdcp_unload(psp);
if (ret)
@ -1324,6 +1328,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
psp->hdcp_context.hdcp_initialized = false;
out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
&psp->hdcp_context.hdcp_shared_mc_addr,
@ -1462,8 +1467,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
if (!psp->dtm_context.dtm_initialized)
return 0;
if (!psp->dtm_context.dtm_initialized) {
if (psp->dtm_context.dtm_shared_buf)
goto out;
else
return 0;
}
ret = psp_dtm_unload(psp);
if (ret)
@ -1471,6 +1480,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
psp->dtm_context.dtm_initialized = false;
out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
&psp->dtm_context.dtm_shared_mc_addr,
@ -2589,11 +2599,10 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
psp->asd_fw_version = le32_to_cpu(desc->fw_version);
psp->asd_fw_version = le32_to_cpu(desc->fw_version);
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);

View File

@ -1518,7 +1518,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
int i = 0;
int ret = 0;
int ret = 0, status;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
@ -1543,12 +1543,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
ret = amdgpu_vram_mgr_query_page_status(
status = amdgpu_vram_mgr_query_page_status(
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
data->bps[i].retired_page);
if (ret == -EBUSY)
if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
else if (ret == -ENOENT)
else if (status == -ENOENT)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}

View File

@ -30,6 +30,7 @@
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@ -62,7 +63,8 @@
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
(adev->asic_type == CHIP_ARCTURUS))
(adev->asic_type == CHIP_ARCTURUS) ||
(adev->asic_type == CHIP_SIENNA_CICHLID))
return true;
return false;
@ -100,6 +102,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
case CHIP_SIENNA_CICHLID:
*i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
break;
default:
return false;
}

View File

@ -310,7 +310,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
!amdgpu_noretry);
!adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,

View File

@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and

View File

@ -2386,8 +2386,7 @@ void amdgpu_dm_update_connector_after_detect(
drm_connector_update_edid_property(connector,
aconnector->edid);
aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
drm_connector_list_update(connector);
drm_add_edid_modes(connector, aconnector->edid);
if (aconnector->dc_link->aux_mode)
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
@ -8379,8 +8378,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
}
#ifdef CONFIG_DEBUG_FS
if (new_crtc_state->active &&
if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
/**
* Frontend may have changed so reapply the CRC capture
@ -8401,7 +8399,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
}
#endif
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)

View File

@ -46,13 +46,13 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
}
/* amdgpu_dm_crc.c */
#ifdef CONFIG_DEBUG_FS
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
struct dm_crtc_state *dm_old_crtc_state);
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source);
#ifdef CONFIG_DEBUG_FS
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *src_name,

View File

@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
calcs_ccflags := -mhard-float -maltivec
endif
ifdef CONFIG_ARM64
calcs_rcflags := -mgeneral-regs-only
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1

View File

@ -104,13 +104,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
# prevent build errors:
# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
# this file is unused on arm64, just like on ppc64
ifdef CONFIG_ARM64
CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
endif
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
@ -125,13 +118,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
# prevent build errors:
# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
# this file is unused on arm64, just like on ppc64
ifdef CONFIG_ARM64
CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := -mgeneral-regs-only
endif
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
@ -146,13 +132,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
# prevent build errors:
# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
# this file is unused on arm64, just like on ppc64
ifdef CONFIG_ARM64
CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := -mgeneral-regs-only
endif
AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)

View File

@ -2487,9 +2487,14 @@ enum dc_status dc_link_validate_mode_timing(
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
struct dc *dc = link->ctx->dc;
struct dc *dc = NULL;
struct abm *abm = NULL;
if (!link || !link->ctx)
return NULL;
dc = link->ctx->dc;
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx.stream;

View File

@ -31,11 +31,4 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
# fix:
# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
# aarch64 does not support soft-float, so use hard-float and handle this in code
ifdef CONFIG_ARM64
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
endif
AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)

View File

@ -1534,15 +1534,8 @@ static bool dcn10_resource_construct(
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
#if defined(CONFIG_ARM64)
/* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
DC_FP_START();
dcn10_resource_construct_fp(dc);
DC_FP_END();
#else
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
#endif
pool->base.pp_smu = dcn10_pp_smu_create(ctx);

View File

@ -17,10 +17,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
ifdef CONFIG_ARM64
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1

View File

@ -13,10 +13,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
endif
ifdef CONFIG_ARM64
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1

View File

@ -41,11 +41,6 @@ CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
endif
ifdef CONFIG_ARM64
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1

View File

@ -21,10 +21,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
endif
ifdef CONFIG_ARM64
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mgeneral-regs-only
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1

View File

@ -20,10 +20,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
endif
ifdef CONFIG_ARM64
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mgeneral-regs-only
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1

View File

@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
dml_ccflags := -mhard-float -maltivec
endif
ifdef CONFIG_ARM64
dml_rcflags := -mgeneral-regs-only
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1

View File

@ -10,10 +10,6 @@ ifdef CONFIG_PPC64
dsc_ccflags := -mhard-float -maltivec
endif
ifdef CONFIG_ARM64
dsc_rcflags := -mgeneral-regs-only
endif
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1

View File

@ -55,10 +55,6 @@
#include <asm/fpu/api.h>
#define DC_FP_START() kernel_fpu_begin()
#define DC_FP_END() kernel_fpu_end()
#elif defined(CONFIG_ARM64)
#include <asm/neon.h>
#define DC_FP_START() kernel_neon_begin()
#define DC_FP_END() kernel_neon_end()
#elif defined(CONFIG_PPC64)
#include <asm/switch_to.h>
#include <asm/cputable.h>

View File

@ -251,7 +251,7 @@ static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cl
smu10_data->gfx_actual_soft_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
smu10_data->gfx_actual_soft_min_freq,
clock,
NULL);
}
return 0;
@ -558,7 +558,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
/* disabled fine grain tuning function by default */
data->fine_grain_enabled = 0;
return result;
}
@ -597,6 +598,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
@ -613,6 +615,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_max_freq_limit/100,
@ -648,6 +658,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@ -658,6 +676,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
min_mclk,
@ -668,6 +694,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
SMU10_UMD_PSTATE_GFXCLK,
@ -703,6 +737,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@ -741,6 +783,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
data->fine_grain_enabled = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
data->gfx_actual_soft_min_freq = fine_grain_min_freq;
data->gfx_actual_soft_max_freq = fine_grain_max_freq;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_min_freq_limit/100,
@ -759,6 +809,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
data->fine_grain_enabled = 1;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@ -948,6 +999,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
uint32_t min_freq, max_freq = 0;
uint32_t ret = 0;
switch (type) {
case PP_SCLK:
@ -983,18 +1036,28 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
size = sprintf(buf, "%s:\n", "OD_SCLK");
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
if (ret)
return ret;
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
if (ret)
return ret;
size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
size += sprintf(buf + size, "1: %10uMhz\n",
(data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
uint32_t min_freq, max_freq = 0;
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
if (ret)
return ret;
ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
if (ret)
return ret;
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
@ -1414,23 +1477,96 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
uint32_t min_freq, max_freq = 0;
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
int ret = 0;
if (!hwmgr->od_enabled) {
pr_err("Fine grain not support\n");
return -EINVAL;
}
if (size != 2) {
pr_err("Input parameter number not correct\n");
if (!smu10_data->fine_grain_enabled) {
pr_err("Fine grain not started\n");
return -EINVAL;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
if (input[0] == 0)
smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
else if (input[0] == 1)
smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
else
if (size != 2) {
pr_err("Input parameter number not correct\n");
return -EINVAL;
}
if (input[0] == 0) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
if (input[1] < min_freq) {
pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
input[1], min_freq);
return -EINVAL;
}
smu10_data->gfx_actual_soft_min_freq = input[1];
} else if (input[0] == 1) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
if (input[1] > max_freq) {
pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
input[1], max_freq);
return -EINVAL;
}
smu10_data->gfx_actual_soft_max_freq = input[1];
} else {
return -EINVAL;
}
} else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
if (size != 0) {
pr_err("Input parameter number not correct\n");
return -EINVAL;
}
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
smu10_data->gfx_actual_soft_min_freq = min_freq;
smu10_data->gfx_actual_soft_max_freq = max_freq;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_freq,
NULL);
if (ret)
return ret;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
max_freq,
NULL);
if (ret)
return ret;
} else if (type == PP_OD_COMMIT_DPM_TABLE) {
if (size != 0) {
pr_err("Input parameter number not correct\n");
return -EINVAL;
}
if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
return -EINVAL;
}
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
smu10_data->gfx_actual_soft_min_freq,
NULL);
if (ret)
return ret;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
smu10_data->gfx_actual_soft_max_freq,
NULL);
if (ret)
return ret;
} else {
return -EINVAL;
}
return 0;

View File

@ -283,6 +283,7 @@ struct smu10_hwmgr {
uint32_t vclk_soft_min;
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
uint32_t gfx_actual_soft_max_freq;
uint32_t gfx_min_freq_limit;
uint32_t gfx_max_freq_limit; /* in 10Khz*/
@ -299,6 +300,8 @@ struct smu10_hwmgr {
bool need_min_deep_sleep_dcefclk;
uint32_t deep_sleep_dcefclk;
uint32_t num_active_display;
bool fine_grain_enabled;
};
struct pp_hwmgr;

View File

@ -2372,7 +2372,7 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t *req, bool write,
{
int i;
req->I2CcontrollerPort = 0;
req->I2CcontrollerPort = 1;
req->I2CSpeed = 2;
req->SlaveAddress = address;
req->NumCmds = numbytes;

View File

@ -252,7 +252,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
*value = metrics->CurrentSocketPower;
*value = (metrics->CurrentSocketPower << 8) /
1000 ;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->GfxTemperature / 100 *

View File

@ -188,6 +188,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level].Freq;
break;
case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= NUM_FCLK_DPM_LEVELS)
return -EINVAL;

View File

@ -225,6 +225,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
break;
case SMU_FCLK:
case SMU_MCLK:
case SMU_UCLK:
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;

View File

@ -1436,6 +1436,9 @@ struct intel_dp {
bool ycbcr_444_to_420;
} dfp;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
/* Display stream compression testing */
bool force_dsc_en;

View File

@ -1489,7 +1489,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
cpu_latency_qos_update_request(&i915->pm_qos, 0);
cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
intel_dp_check_edp(intel_dp);
@ -1622,7 +1622,7 @@ done:
ret = recv_bytes;
out:
cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
@ -1898,6 +1898,9 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
cpu_latency_qos_remove_request(&intel_dp->pm_qos);
kfree(intel_dp->aux.name);
}
@ -1950,6 +1953,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
}
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)

View File

@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_pool;
}
memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);

View File

@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \

View File

@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
unsigned long x, n;
unsigned long x, n, remain;
void *ptr;
/*
@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
length = round_up(length,
remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
for (n = offset >> PAGE_SHIFT; length; n++) {
int len = min(length, PAGE_SIZE - x);
for (n = offset >> PAGE_SHIFT; remain; n++) {
int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src);
ptr += len;
length -= len;
remain -= len;
x = 0;
}
}
i915_gem_object_unpin_pages(src_obj);
memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
/* dst_obj is returned with vmap pinned */
return dst;
}
@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2
static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
/**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
@ -1538,16 +1536,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */
}
}
if (shadow_needs_clflush(shadow->obj))
drm_clflush_virt_range(batch_end, 8);
}
if (shadow_needs_clflush(shadow->obj)) {
void *ptr = page_mask_bits(shadow->obj->mm.mapping);
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
}
i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);

View File

@ -578,8 +578,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@ -626,7 +624,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@ -648,8 +645,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**

View File

@ -891,9 +891,6 @@ struct drm_i915_private {
bool display_irqs_enabled;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;

View File

@ -534,8 +534,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
ret = -ENXIO;
goto fail;
if (!allow_vram_carveout) {
ret = -ENXIO;
goto fail;
}
}
return gpu;

View File

@ -564,8 +564,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
if (!allow_vram_carveout) {
ret = -ENXIO;
goto fail;
}
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");

View File

@ -692,8 +692,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
ret = -ENXIO;
goto fail;
if (!allow_vram_carveout) {
ret = -ENXIO;
goto fail;
}
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");

View File

@ -18,6 +18,10 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
bool allow_vram_carveout = false;
MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(2, 0, 0, 0),

View File

@ -191,8 +191,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct io_pgtable_domain_attr pgtbl_cfg;
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
@ -202,13 +200,18 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu)
return NULL;
/*
* This allows GPU to set the bus attributes required to use system
* cache on behalf of the iommu page table walker.
*/
if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
if (adreno_is_a6xx(adreno_gpu)) {
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct io_pgtable_domain_attr pgtbl_cfg;
/*
* This allows GPU to set the bus attributes required to use system
* cache on behalf of the iommu page table walker.
*/
if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
}
}
mmu = msm_iommu_new(&pdev->dev, iommu);

View File

@ -18,6 +18,7 @@
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@ -211,6 +212,11 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
{
return ((gpu->revn < 700 && gpu->revn > 599));
}
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;

View File

@ -693,6 +693,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
if (state == ST_CONNECT_PENDING) {
/* wait until ST_CONNECTED */
dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
mutex_unlock(&dp->event_mutex);
return 0;
}
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;

View File

@ -167,14 +167,20 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
rc = dp_panel_read_dpcd(dp_panel);
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
if (rc || !is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
if (rc) {
DRM_ERROR("read dpcd failed %d\n", rc);
return rc;
}
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
if (!is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
dp_panel->link_info.num_lanes);
return -EINVAL;
}
if (dp_panel->dfp_present) {
rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT,
&count, 1);

View File

@ -457,15 +457,15 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_mode_config_init(ddev);
ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret)
goto err_destroy_mdss;
ret = msm_init_vram(ddev);
if (ret)
goto err_msm_uninit;
dma_set_max_seg_size(dev, UINT_MAX);
msm_gem_shrinker_init(ddev);

View File

@ -96,6 +96,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!msm_gem_is_locked(obj));
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
@ -988,6 +990,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (msm_obj->pages)
kvfree(msm_obj->pages);
put_iova_vmas(obj);
/* dma_buf_detach() grabs resv lock, so we need to unlock
* prior to drm_prime_gem_destroy
*/
@ -997,11 +1001,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
} else {
msm_gem_vunmap(obj);
put_pages(obj);
put_iova_vmas(obj);
msm_gem_unlock(obj);
}
put_iova_vmas(obj);
drm_gem_object_release(obj);
kfree(msm_obj);
@ -1115,6 +1118,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
struct msm_gem_vma *vma;
struct page **pages;
drm_gem_private_object_init(dev, obj, size);
msm_gem_lock(obj);
vma = add_vma(obj, NULL);
@ -1126,9 +1131,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
drm_gem_private_object_init(dev, obj, size);
msm_gem_lock(obj);
pages = get_pages(obj);
msm_gem_unlock(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;

View File

@ -730,9 +730,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
rdev->mman.initialized = true;
ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
dma_addressing_limited(&rdev->pdev->dev));
r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");

View File

@ -507,7 +507,6 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
@ -525,7 +524,6 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
EXPORT_SYMBOL(ttm_pool_fini);
#ifdef CONFIG_DEBUG_FS
/* Count the number of pages available in a pool_type */