mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 14:14:01 +08:00
Merge tag 'drm-fixes-5.4-2019-10-02' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
drm-fixes-5.4-2019-10-02: amdgpu: - Enable bulk moves - Power metrics fixes for Navi - Fix S4 regression - Add query for tcc disabled mask - Fix several leaks in error paths - randconfig fixes - clang fixes Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexdeucher@gmail.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191002204909.3519-1-alexander.deucher@amd.com
This commit is contained in:
commit
0f83eb8888
@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
|
||||
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
|
||||
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
|
||||
amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
|
||||
amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
|
||||
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
|
||||
|
||||
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
|
||||
|
||||
|
@ -189,7 +189,7 @@ static int acp_hw_init(void *handle)
|
||||
u32 val = 0;
|
||||
u32 count = 0;
|
||||
struct device *dev;
|
||||
struct i2s_platform_data *i2s_pdata;
|
||||
struct i2s_platform_data *i2s_pdata = NULL;
|
||||
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
@ -231,20 +231,21 @@ static int acp_hw_init(void *handle)
|
||||
adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (adev->acp.acp_cell == NULL)
|
||||
return -ENOMEM;
|
||||
if (adev->acp.acp_cell == NULL) {
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
|
||||
if (adev->acp.acp_res == NULL) {
|
||||
kfree(adev->acp.acp_cell);
|
||||
return -ENOMEM;
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
|
||||
if (i2s_pdata == NULL) {
|
||||
kfree(adev->acp.acp_res);
|
||||
kfree(adev->acp.acp_cell);
|
||||
return -ENOMEM;
|
||||
r = -ENOMEM;
|
||||
goto failure;
|
||||
}
|
||||
|
||||
switch (adev->asic_type) {
|
||||
@ -341,14 +342,14 @@ static int acp_hw_init(void *handle)
|
||||
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
|
||||
ACP_DEVS);
|
||||
if (r)
|
||||
return r;
|
||||
goto failure;
|
||||
|
||||
for (i = 0; i < ACP_DEVS ; i++) {
|
||||
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
|
||||
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
|
||||
if (r) {
|
||||
dev_err(dev, "Failed to add dev to genpd\n");
|
||||
return r;
|
||||
goto failure;
|
||||
}
|
||||
}
|
||||
|
||||
@ -367,7 +368,8 @@ static int acp_hw_init(void *handle)
|
||||
break;
|
||||
if (--count == 0) {
|
||||
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
|
||||
return -ETIMEDOUT;
|
||||
r = -ETIMEDOUT;
|
||||
goto failure;
|
||||
}
|
||||
udelay(100);
|
||||
}
|
||||
@ -384,7 +386,8 @@ static int acp_hw_init(void *handle)
|
||||
break;
|
||||
if (--count == 0) {
|
||||
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
|
||||
return -ETIMEDOUT;
|
||||
r = -ETIMEDOUT;
|
||||
goto failure;
|
||||
}
|
||||
udelay(100);
|
||||
}
|
||||
@ -393,6 +396,13 @@ static int acp_hw_init(void *handle)
|
||||
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
|
||||
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
|
||||
return 0;
|
||||
|
||||
failure:
|
||||
kfree(i2s_pdata);
|
||||
kfree(adev->acp.acp_res);
|
||||
kfree(adev->acp.acp_cell);
|
||||
kfree(adev->acp.acp_genpd);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -81,9 +81,10 @@
|
||||
* - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
|
||||
* - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
|
||||
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
|
||||
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 34
|
||||
#define KMS_DRIVER_MINOR 35
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
|
||||
|
@ -165,6 +165,7 @@ struct amdgpu_gfx_config {
|
||||
uint32_t num_sc_per_sh;
|
||||
uint32_t num_packer_per_sc;
|
||||
uint32_t pa_sc_tile_steering_override;
|
||||
uint64_t tcc_disabled_mask;
|
||||
};
|
||||
|
||||
struct amdgpu_cu_info {
|
||||
|
@ -787,6 +787,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
dev_info.pa_sc_tile_steering_override =
|
||||
adev->gfx.config.pa_sc_tile_steering_override;
|
||||
|
||||
dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
|
||||
|
||||
return copy_to_user(out, &dev_info,
|
||||
min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
|
||||
}
|
||||
|
@ -603,14 +603,12 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
|
||||
struct ttm_bo_global *glob = adev->mman.bdev.glob;
|
||||
struct amdgpu_vm_bo_base *bo_base;
|
||||
|
||||
#if 0
|
||||
if (vm->bulk_moveable) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
|
||||
|
||||
|
@ -1691,6 +1691,17 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
|
||||
{
|
||||
/* TCCs are global (not instanced). */
|
||||
uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
|
||||
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
|
||||
|
||||
adev->gfx.config.tcc_disabled_mask =
|
||||
REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
|
||||
(REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
|
||||
}
|
||||
|
||||
static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp;
|
||||
@ -1702,6 +1713,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
|
||||
|
||||
gfx_v10_0_setup_rb(adev);
|
||||
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
|
||||
gfx_v10_0_get_tcc_info(adev);
|
||||
adev->gfx.config.pa_sc_tile_steering_override =
|
||||
gfx_v10_0_init_pa_sc_tile_steering_override(adev);
|
||||
|
||||
|
@ -317,10 +317,12 @@ static int nv_asic_reset(struct amdgpu_device *adev)
|
||||
struct smu_context *smu = &adev->smu;
|
||||
|
||||
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
|
||||
amdgpu_inc_vram_lost(adev);
|
||||
if (!adev->in_suspend)
|
||||
amdgpu_inc_vram_lost(adev);
|
||||
ret = smu_baco_reset(smu);
|
||||
} else {
|
||||
amdgpu_inc_vram_lost(adev);
|
||||
if (!adev->in_suspend)
|
||||
amdgpu_inc_vram_lost(adev);
|
||||
ret = nv_asic_mode1_reset(adev);
|
||||
}
|
||||
|
||||
|
@ -558,12 +558,14 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
|
||||
{
|
||||
switch (soc15_asic_reset_method(adev)) {
|
||||
case AMD_RESET_METHOD_BACO:
|
||||
amdgpu_inc_vram_lost(adev);
|
||||
if (!adev->in_suspend)
|
||||
amdgpu_inc_vram_lost(adev);
|
||||
return soc15_asic_baco_reset(adev);
|
||||
case AMD_RESET_METHOD_MODE2:
|
||||
return soc15_mode2_reset(adev);
|
||||
default:
|
||||
amdgpu_inc_vram_lost(adev);
|
||||
if (!adev->in_suspend)
|
||||
amdgpu_inc_vram_lost(adev);
|
||||
return soc15_asic_mode1_reset(adev);
|
||||
}
|
||||
}
|
||||
@ -771,8 +773,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
||||
#if defined(CONFIG_DRM_AMD_DC)
|
||||
else if (amdgpu_device_has_dc_support(adev))
|
||||
amdgpu_device_ip_block_add(adev, &dm_ip_block);
|
||||
#else
|
||||
# warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
|
||||
#endif
|
||||
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
|
||||
break;
|
||||
|
@ -2385,8 +2385,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
|
||||
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
|
||||
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
|
||||
if (adev->asic_type == CHIP_RENOIR)
|
||||
dm->dc->debug.disable_stutter = true;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
@ -6019,7 +6017,9 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||
int i;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
enum amdgpu_dm_pipe_crc_source source;
|
||||
#endif
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
|
||||
new_crtc_state, i) {
|
||||
|
@ -668,6 +668,7 @@ struct clock_source *dce100_clock_source_create(
|
||||
return &clk_src->base;
|
||||
}
|
||||
|
||||
kfree(clk_src);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -714,6 +714,7 @@ struct clock_source *dce110_clock_source_create(
|
||||
return &clk_src->base;
|
||||
}
|
||||
|
||||
kfree(clk_src);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -687,6 +687,7 @@ struct clock_source *dce112_clock_source_create(
|
||||
return &clk_src->base;
|
||||
}
|
||||
|
||||
kfree(clk_src);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -500,6 +500,7 @@ static struct clock_source *dce120_clock_source_create(
|
||||
return &clk_src->base;
|
||||
}
|
||||
|
||||
kfree(clk_src);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -701,6 +701,7 @@ struct clock_source *dce80_clock_source_create(
|
||||
return &clk_src->base;
|
||||
}
|
||||
|
||||
kfree(clk_src);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -786,6 +786,7 @@ struct clock_source *dcn10_clock_source_create(
|
||||
return &clk_src->base;
|
||||
}
|
||||
|
||||
kfree(clk_src);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1077,6 +1077,7 @@ struct clock_source *dcn20_clock_source_create(
|
||||
return &clk_src->base;
|
||||
}
|
||||
|
||||
kfree(clk_src);
|
||||
BREAK_TO_DEBUGGER();
|
||||
return NULL;
|
||||
}
|
||||
|
@ -3,7 +3,17 @@
|
||||
|
||||
DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4
|
||||
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
|
||||
cc_stack_align := -mpreferred-stack-boundary=4
|
||||
else ifneq ($(call cc-option, -mstack-alignment=16),)
|
||||
cc_stack_align := -mstack-alignment=16
|
||||
endif
|
||||
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align)
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
|
||||
endif
|
||||
|
||||
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
|
||||
|
||||
|
@ -39,9 +39,6 @@
|
||||
* ways. Unless there is something clearly wrong with it the code should
|
||||
* remain as-is as it provides us with a guarantee from HW that it is correct.
|
||||
*/
|
||||
|
||||
typedef unsigned int uint;
|
||||
|
||||
typedef struct {
|
||||
double DPPCLK;
|
||||
double DISPCLK;
|
||||
@ -4774,7 +4771,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
|
||||
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
|
||||
mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
|
||||
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
|
||||
uint m;
|
||||
unsigned int m;
|
||||
|
||||
locals->cursor_bw[k] = 0;
|
||||
locals->cursor_bw_pre[k] = 0;
|
||||
@ -5285,7 +5282,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
|
||||
double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
|
||||
double FullDETBufferingTimeYStutterCriticalPlane = 0;
|
||||
double TimeToFinishSwathTransferStutterCriticalPlane = 0;
|
||||
uint k, j;
|
||||
unsigned int k, j;
|
||||
|
||||
mode_lib->vba.TotalActiveDPP = 0;
|
||||
mode_lib->vba.TotalDCCActiveDPP = 0;
|
||||
@ -5507,7 +5504,7 @@ static void CalculateDCFCLKDeepSleep(
|
||||
double DPPCLK[],
|
||||
double *DCFCLKDeepSleep)
|
||||
{
|
||||
uint k;
|
||||
unsigned int k;
|
||||
double DisplayPipeLineDeliveryTimeLuma;
|
||||
double DisplayPipeLineDeliveryTimeChroma;
|
||||
//double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
|
||||
@ -5727,7 +5724,7 @@ static void CalculatePixelDeliveryTimes(
|
||||
double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
|
||||
{
|
||||
double req_per_swath_ub;
|
||||
uint k;
|
||||
unsigned int k;
|
||||
|
||||
for (k = 0; k < NumberOfActivePlanes; ++k) {
|
||||
if (VRatio[k] <= 1) {
|
||||
@ -5869,7 +5866,7 @@ static void CalculateMetaAndPTETimes(
|
||||
unsigned int dpte_groups_per_row_chroma_ub;
|
||||
unsigned int num_group_per_lower_vm_stage;
|
||||
unsigned int num_req_per_lower_vm_stage;
|
||||
uint k;
|
||||
unsigned int k;
|
||||
|
||||
for (k = 0; k < NumberOfActivePlanes; ++k) {
|
||||
if (GPUVMEnable == true) {
|
||||
|
@ -843,6 +843,8 @@ static int smu_sw_init(void *handle)
|
||||
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
|
||||
smu->smu_baco.platform_support = false;
|
||||
|
||||
mutex_init(&smu->sensor_lock);
|
||||
|
||||
smu->watermarks_bitmap = 0;
|
||||
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
|
@ -1018,6 +1018,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
|
||||
if (!data || !size)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->sensor_lock);
|
||||
switch (sensor) {
|
||||
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
|
||||
*(uint32_t *)data = pptable->FanMaximumRpm;
|
||||
@ -1044,6 +1045,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
|
||||
default:
|
||||
ret = smu_smc_read_sensor(smu, sensor, data, size);
|
||||
}
|
||||
mutex_unlock(&smu->sensor_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -344,6 +344,7 @@ struct smu_context
|
||||
const struct smu_funcs *funcs;
|
||||
const struct pptable_funcs *ppt_funcs;
|
||||
struct mutex mutex;
|
||||
struct mutex sensor_lock;
|
||||
uint64_t pool_size;
|
||||
|
||||
struct smu_table_context smu_table;
|
||||
|
@ -547,7 +547,7 @@ static int navi10_get_metrics_table(struct smu_context *smu,
|
||||
struct smu_table_context *smu_table= &smu->smu_table;
|
||||
int ret = 0;
|
||||
|
||||
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
|
||||
if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
|
||||
ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
|
||||
(void *)smu_table->metrics_table, false);
|
||||
if (ret) {
|
||||
@ -1386,6 +1386,7 @@ static int navi10_read_sensor(struct smu_context *smu,
|
||||
if(!data || !size)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->sensor_lock);
|
||||
switch (sensor) {
|
||||
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
|
||||
*(uint32_t *)data = pptable->FanMaximumRpm;
|
||||
@ -1409,6 +1410,7 @@ static int navi10_read_sensor(struct smu_context *smu,
|
||||
default:
|
||||
ret = smu_smc_read_sensor(smu, sensor, data, size);
|
||||
}
|
||||
mutex_unlock(&smu->sensor_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3023,6 +3023,7 @@ static int vega20_read_sensor(struct smu_context *smu,
|
||||
if(!data || !size)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&smu->sensor_lock);
|
||||
switch (sensor) {
|
||||
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
|
||||
*(uint32_t *)data = pptable->FanMaximumRpm;
|
||||
@ -3048,6 +3049,7 @@ static int vega20_read_sensor(struct smu_context *smu,
|
||||
default:
|
||||
ret = smu_smc_read_sensor(smu, sensor, data, size);
|
||||
}
|
||||
mutex_unlock(&smu->sensor_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1003,6 +1003,8 @@ struct drm_amdgpu_info_device {
|
||||
__u64 high_va_max;
|
||||
/* gfx10 pa_sc_tile_steering_override */
|
||||
__u32 pa_sc_tile_steering_override;
|
||||
/* disabled TCCs */
|
||||
__u64 tcc_disabled_mask;
|
||||
};
|
||||
|
||||
struct drm_amdgpu_info_hw_ip {
|
||||
|
Loading…
Reference in New Issue
Block a user