2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 12:14:01 +08:00

Merge tag 'drm-next-5.3-2019-06-27' of git://people.freedesktop.org/~agd5f/linux into drm-next

drm-next-5.3-2019-06-27:

amdgpu:
- Fix warning on 32 bit ARM
- Fix compilation on big endian
- Misc bug fixes

ttm:
- Live lock fix

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190628015555.3384-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2019-07-04 14:52:50 +10:00
commit 4cf643a392
10 changed files with 192 additions and 21 deletions

View File

@ -702,7 +702,7 @@ MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supp
* DOC: queue_preemption_timeout_ms (int)
* queue preemption timeout in ms (1 = Minimum, 9000 = default)
*/
int queue_preemption_timeout_ms;
int queue_preemption_timeout_ms = 9000;
module_param(queue_preemption_timeout_ms, int, 0644);
MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)");
#endif

View File

@ -2886,7 +2886,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
/* APU does not have its own dedicated memory */
if (!(adev->flags & AMD_IS_APU)) {
if (!(adev->flags & AMD_IS_APU) &&
(adev->asic_type != CHIP_VEGA10)) {
ret = device_create_file(adev->dev,
&dev_attr_mem_busy_percent);
if (ret) {
@ -2966,7 +2967,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
if (!(adev->flags & AMD_IS_APU))
if (!(adev->flags & AMD_IS_APU) &&
(adev->asic_type != CHIP_VEGA10))
device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
if (!(adev->flags & AMD_IS_APU))
device_remove_file(adev->dev, &dev_attr_pcie_bw);

View File

@ -2624,9 +2624,6 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
rb_bufsz = order_base_2(ring->ring_size / 8);
tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
#ifdef __BIG_ENDIAN
tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, BUF_SWAP, 1);
#endif
WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
/* Initialize the ring buffer's write pointers */
ring->wptr = 0;

View File

@ -1010,8 +1010,8 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
if (indirect)
psp_update_vcn_sram(adev, 0, adev->vcn.dpg_sram_gpu_addr,
(uint32_t)((uint64_t)adev->vcn.dpg_sram_curr_addr -
(uint64_t)adev->vcn.dpg_sram_cpu_addr));
(uint32_t)((uintptr_t)adev->vcn.dpg_sram_curr_addr -
(uintptr_t)adev->vcn.dpg_sram_cpu_addr));
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);

View File

@ -738,7 +738,6 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
if (ret)
return ret;
count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count != 0, "KFD reset ref. error");
atomic_set(&kfd->sram_ecc_flag, 0);

View File

@ -126,7 +126,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock
/* Maximum total throughput with all the slices combined. This is different from how DP spec specifies it.
* Our decoder's total throughput in Pix/s is equal to DISPCLK. This is then shared between slices.
* The value below is the absolute maximum value. The actual througput may be lower, but it'll always
* The value below is the absolute maximum value. The actual throughput may be lower, but it'll always
* be sufficient to process the input pixel rate fed into a single DSC engine.
*/
dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz;

View File

@ -47,7 +47,7 @@ static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_bl
*buff_block_size = 64 * 1024;
break;
default: {
dm_error("%s: DPCD DSC buffer size not recoginzed.\n", __func__);
dm_error("%s: DPCD DSC buffer size not recognized.\n", __func__);
return false;
}
}
@ -63,7 +63,7 @@ static bool dsc_line_buff_depth_from_dpcd(int dpcd_line_buff_bit_depth, int *lin
else if (dpcd_line_buff_bit_depth == 8)
*line_buff_bit_depth = 8;
else {
dm_error("%s: DPCD DSC buffer depth not recoginzed.\n", __func__);
dm_error("%s: DPCD DSC buffer depth not recognized.\n", __func__);
return false;
}
@ -123,7 +123,7 @@ static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput)
*throughput = 1000;
break;
default: {
dm_error("%s: DPCD DSC througput mode not recoginzed.\n", __func__);
dm_error("%s: DPCD DSC throughput mode not recognized.\n", __func__);
return false;
}
}
@ -152,7 +152,7 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp
*bpp_increment_div = 1;
break;
default: {
dm_error("%s: DPCD DSC bits-per-pixel increment not recoginzed.\n", __func__);
dm_error("%s: DPCD DSC bits-per-pixel increment not recognized.\n", __func__);
return false;
}
}

View File

@ -820,6 +820,10 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
ret = smu_get_clk_info_from_vbios(smu);
if (ret)
return ret;
/*
* check if the format_revision in vbios is up to pptable header
* version, and the structure size is not 0.

View File

@ -574,15 +574,19 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
struct smu_power_gate *power_gate = &smu_power->power_gate;
if (enable && power_gate->uvd_gated) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
if (ret)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
if (ret)
return ret;
}
power_gate->uvd_gated = false;
} else {
if (!enable && !power_gate->uvd_gated) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
if (ret)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
if (ret)
return ret;
}
power_gate->uvd_gated = true;
}
}
@ -1300,6 +1304,169 @@ static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_
return 0;
}
static int navi10_get_ppfeature_status(struct smu_context *smu,
char *buf)
{
static const char *ppfeature_name[] = {
"DPM_PREFETCHER",
"DPM_GFXCLK",
"DPM_GFX_PACE",
"DPM_UCLK",
"DPM_SOCCLK",
"DPM_MP0CLK",
"DPM_LINK",
"DPM_DCEFCLK",
"MEM_VDDCI_SCALING",
"MEM_MVDD_SCALING",
"DS_GFXCLK",
"DS_SOCCLK",
"DS_LCLK",
"DS_DCEFCLK",
"DS_UCLK",
"GFX_ULV",
"FW_DSTATE",
"GFXOFF",
"BACO",
"VCN_PG",
"JPEG_PG",
"USB_PG",
"RSMU_SMN_CG",
"PPT",
"TDC",
"GFX_EDC",
"APCC_PLUS",
"GTHR",
"ACDC",
"VR0HOT",
"VR1HOT",
"FW_CTF",
"FAN_CONTROL",
"THERMAL",
"GFX_DCS",
"RM",
"LED_DISPLAY",
"GFX_SS",
"OUT_OF_BAND_MONITOR",
"TEMP_DEPENDENT_VMIN",
"MMHUB_PG",
"ATHUB_PG"};
static const char *output_title[] = {
"FEATURES",
"BITMASK",
"ENABLEMENT"};
uint64_t features_enabled;
uint32_t feature_mask[2];
int i;
int ret = 0;
int size = 0;
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
PP_ASSERT_WITH_CODE(!ret,
"[GetPPfeatureStatus] Failed to get enabled smc features!",
return ret);
features_enabled = (uint64_t)feature_mask[0] |
(uint64_t)feature_mask[1] << 32;
size += sprintf(buf + size, "Current ppfeatures: 0x%016llx\n", features_enabled);
size += sprintf(buf + size, "%-19s %-22s %s\n",
output_title[0],
output_title[1],
output_title[2]);
for (i = 0; i < (sizeof(ppfeature_name) / sizeof(ppfeature_name[0])); i++) {
size += sprintf(buf + size, "%-19s 0x%016llx %6s\n",
ppfeature_name[i],
1ULL << i,
(features_enabled & (1ULL << i)) ? "Y" : "N");
}
return size;
}
static int navi10_enable_smc_features(struct smu_context *smu,
bool enabled,
uint64_t feature_masks)
{
struct smu_feature *feature = &smu->smu_feature;
uint32_t feature_low, feature_high;
uint32_t feature_mask[2];
int ret = 0;
feature_low = (uint32_t)(feature_masks & 0xFFFFFFFF);
feature_high = (uint32_t)((feature_masks & 0xFFFFFFFF00000000ULL) >> 32);
if (enabled) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
feature_low);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
feature_high);
if (ret)
return ret;
} else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
feature_low);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
feature_high);
if (ret)
return ret;
}
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
return ret;
mutex_lock(&feature->mutex);
bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
feature->feature_num);
mutex_unlock(&feature->mutex);
return 0;
}
static int navi10_set_ppfeature_status(struct smu_context *smu,
uint64_t new_ppfeature_masks)
{
uint64_t features_enabled;
uint32_t feature_mask[2];
uint64_t features_to_enable;
uint64_t features_to_disable;
int ret = 0;
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
PP_ASSERT_WITH_CODE(!ret,
"[SetPPfeatureStatus] Failed to get enabled smc features!",
return ret);
features_enabled = (uint64_t)feature_mask[0] |
(uint64_t)feature_mask[1] << 32;
features_to_disable =
features_enabled & ~new_ppfeature_masks;
features_to_enable =
~features_enabled & new_ppfeature_masks;
pr_debug("features_to_disable 0x%llx\n", features_to_disable);
pr_debug("features_to_enable 0x%llx\n", features_to_enable);
if (features_to_disable) {
ret = navi10_enable_smc_features(smu, false, features_to_disable);
PP_ASSERT_WITH_CODE(!ret,
"[SetPPfeatureStatus] Failed to disable smc features!",
return ret);
}
if (features_to_enable) {
ret = navi10_enable_smc_features(smu, true, features_to_enable);
PP_ASSERT_WITH_CODE(!ret,
"[SetPPfeatureStatus] Failed to enable smc features!",
return ret);
}
return 0;
}
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@ -1333,6 +1500,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.set_watermarks_table = navi10_set_watermarks_table,
.read_sensor = navi10_read_sensor,
.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
.get_ppfeature_status = navi10_get_ppfeature_status,
.set_ppfeature_status = navi10_set_ppfeature_status,
};
void navi10_set_ppt_funcs(struct smu_context *smu)

View File

@ -827,7 +827,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
if (!r)
reservation_object_unlock(busy_bo->resv);
return r == -EDEADLK ? -EAGAIN : r;
return r == -EDEADLK ? -EBUSY : r;
}
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,