2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-05 03:44:03 +08:00

drm fixes for 5.16-rc3

amdgpu:
 - SRIOV fixes
 - dma-buf double free fix
 - Display fixes for GPU resets
 - Fix DSC powergating regression
 - GPU TSC fixes
 - Interrupt handler overflow fixes
 - Endian fix in IP discovery table handling
 - Aldebaran ASPM fix
 - Fix overclocking regression on older asics
 - Backlight/ACPI fix
 
 amdkfd:
 - SVM fixes
 - VMA removal race fix
 
 hyperv:
 - removal fix
 
 aspeed:
 - vga_pw sysfs file fix
 
 vc4:
 - error checking fix
 
 nouveau:
 - support GA106
 - fix a few error checks
 
 i915:
 - fix wakeref handling around PXP suspend
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmGgLvEACgkQDHTzWXnE
 hr7uYQ//WaR0AyCTMUnvUJ1/QhMw3BH3F2EJEWLuw+NEugYgE41SvXaF9dtJD2ex
 xokyFENHP2QhrUdMiUL9E7oto2jfDf5R2SZTp7oyR60P/cVXlVpOFRxr904hp7pS
 pTV4jMX029ajlvzGriRSykq5ceEXfTj0garTdNZUw4EzarhDEC+dBkmA+7TuI3Mz
 uT/CgU4QU7f0tDZ4SiMv5I342wU24rKjT04I3Uhg9uW9Ce7CvGIhiQrYA6v6VVL9
 Q26EkLDi88u9kPlsBug/tSVu71NN+c2sM/aOfnIrGGa4cY2H9acgjW/qZ9/9NNbS
 RevHy7K905Z8i4op8XNtrBzpTGTu2TXbE6XvnynyC5AeCTcf0Gou/7JWraCaHyYV
 6T+U35dbkEJ0lCKphB1azZPLaKihliZZDPZJ2rUsO0ED3fBzEwUl6gisFk41eXCr
 5W45q5eXOuZBrBEwyuxSbihXlseVOepgG7NqVucxVqEUKh8BvwHeeg4OFz1WtkTK
 NICNXnF4dLhpLLSCgIfvshWAavq+Nr6uqP/krGKs6Gb8FvF+gjDxBvXfNZCC12Ms
 8bAfda6Jau9k1J2/DdAJYMB5pmwmuioz8/n60Eq7/FyEs4e3VrahSZHuDRxRqKeN
 h9wiIdRBJ3O/DwA4impiPyk5yOBP3TVdh4xbf502vdDW9U5uCEY=
 =twr7
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2021-11-26' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "No idea if turkey comes before pull request processing, but here's the
  regular week's fixes. A bunch for amdgpu, nouveau adds support for a
  new GPU (like a PCI ID addition), and a scattering of fixes across
  i915/hyperv/aspeed/vc4.

  Specifics:

  amdgpu:
   - SRIOV fixes
   - dma-buf double free fix
   - Display fixes for GPU resets
   - Fix DSC powergating regression
   - GPU TSC fixes
   - Interrupt handler overflow fixes
   - Endian fix in IP discovery table handling
   - Aldebaran ASPM fix
   - Fix overclocking regression on older asics
   - Backlight/ACPI fix

  amdkfd:
   - SVM fixes
   - VMA removal race fix

  hyperv:
   - removal fix

  aspeed:
   - vga_pw sysfs file fix

  vc4:
   - error checking fix

  nouveau:
   - support GA106
   - fix a few error checks

  i915:
   - fix wakeref handling around PXP suspend"

* tag 'drm-fixes-2021-11-26' of git://anongit.freedesktop.org/drm/drm: (25 commits)
  drm/amd/display: update bios scratch when setting backlight
  drm/amdgpu/pm: fix powerplay OD interface
  drm/amdgpu: Skip ASPM programming on aldebaran
  drm/amdgpu: fix byteorder error in amdgpu discovery
  drm/amdgpu: enable Navi retry fault wptr overflow
  drm/amdgpu: enable Navi 48-bit IH timestamp counter
  drm/amdkfd: simplify drain retry fault
  drm/amdkfd: handle VMA remove race
  drm/amdkfd: process exit and retry fault race
  drm/amdgpu: IH process reset count when restart
  drm/amdgpu/gfx9: switch to golden tsc registers for renoir+
  drm/amdgpu/gfx10: add wraparound gpu counter check for APUs as well
  drm/amdgpu: move kfd post_reset out of reset_sriov function
  drm/amd/display: Fixed DSC would not PG after removing DSC stream
  drm/amd/display: Reset link encoder assignments for GPU reset
  drm/amd/display: Set plane update flags for all planes in reset
  drm/amd/display: Fix DPIA outbox timeout after GPU reset
  drm/amdgpu: Fix double free of dmabuf
  drm/amdgpu: Fix MMIO HDP flush on SRIOV
  drm/i915/gt: Hold RPM wakelock during PXP suspend
  ...
This commit is contained in:
Linus Torvalds 2021-11-25 18:21:20 -08:00
commit a4849f6000
38 changed files with 378 additions and 213 deletions

View File

@ -3,7 +3,7 @@ VERSION = 5
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc2
NAME = Trick or Treat
NAME = Gobble Gobble
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"

View File

@ -646,12 +646,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
if (IS_ERR(gobj))
return PTR_ERR(gobj);
/* Import takes an extra reference on the dmabuf. Drop it now to
* avoid leaking it. We only need the one reference in
* kgd_mem->dmabuf.
*/
dma_buf_put(mem->dmabuf);
*bo = gem_to_amdgpu_bo(gobj);
(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
(*bo)->parent = amdgpu_bo_ref(mem->bo);

View File

@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
WREG32(adev->bios_scratch_reg_offset + 3, tmp);
}
void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
u32 backlight_level)
{
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2);
tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
ATOM_S2_CURRENT_BL_LEVEL_MASK;
WREG32(adev->bios_scratch_reg_offset + 2, tmp);
}
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
{
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);

View File

@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
u32 backlight_level);
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);

View File

@ -4316,7 +4316,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
amdgpu_amdkfd_post_reset(adev);
error:
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
@ -5089,7 +5088,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
/* Actual ASIC resets if needed.*/
/* TODO Implement XGMI hive reset logic for SRIOV */
/* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_device_reset_sriov(adev, job ? false : true);
if (r)
@ -5149,8 +5148,8 @@ skip_hw_reset:
skip_sched_resume:
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
/* unlock kfd */
if (!need_emergency_restart)
amdgpu_amdkfd_post_reset(tmp_adev);
/* kfd_post_reset will do nothing if kfd device is not initialized,

View File

@ -248,8 +248,8 @@ get_from_vram:
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
size = bhdr->binary_size - offset;
checksum = bhdr->binary_checksum;
size = le16_to_cpu(bhdr->binary_size) - offset;
checksum = le16_to_cpu(bhdr->binary_checksum);
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
size, checksum)) {
@ -270,7 +270,7 @@ get_from_vram:
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
ihdr->size, checksum)) {
le16_to_cpu(ihdr->size), checksum)) {
DRM_ERROR("invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
@ -282,7 +282,7 @@ get_from_vram:
ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
ghdr->size, checksum)) {
le32_to_cpu(ghdr->size), checksum)) {
DRM_ERROR("invalid gc data table checksum\n");
r = -EINVAL;
goto out;
@ -489,10 +489,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
for (i = 0; i < 32; i++) {
if (le32_to_cpu(harvest_info->list[i].hw_id) == 0)
if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
break;
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID:
vcn_harvest_count++;
if (harvest_info->list[i].number_instance == 0)

View File

@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
*/
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
unsigned int count;
u32 wptr;
if (!ih->enabled || adev->shutdown)
@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
wptr = amdgpu_ih_get_wptr(adev, ih);
restart_ih:
count = AMDGPU_IH_MAX_NUM_IVS;
DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */

View File

@ -7707,8 +7707,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
preempt_disable();
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
* roughly every 42 seconds.
*/
if (hi_check != clock_hi) {
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
clock_hi = hi_check;
}
preempt_enable();
clock = clock_lo | (clock_hi << 32ULL);
break;
default:
preempt_disable();

View File

@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025
#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@ -4238,19 +4243,38 @@ failed_kiq_read:
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
uint64_t clock, clock_lo, clock_hi, hi_check;
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(9, 3, 0):
preempt_disable();
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
* roughly every 42 seconds.
*/
if (hi_check != clock_hi) {
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
clock_hi = hi_check;
}
preempt_enable();
clock = clock_lo | (clock_hi << 32ULL);
break;
default:
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
}
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
break;
}
mutex_unlock(&adev->gfx.gpu_clock_mutex);
amdgpu_gfx_off_ctrl(adev, true);
return clock;
}

View File

@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev,
tmp = navi10_ih_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
if (ih == &adev->irq.ih1) {
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
if (ih == &adev->irq.ih1)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
}
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
u32 ih_chicken;
u32 tmp;
int ret;
int i;
@ -363,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
ih[0]->doorbell_index);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
CLIENT18_IS_STORM_CLIENT, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
pci_set_master(adev->pdev);
/* enable interrupts */
@ -420,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
wptr = le32_to_cpu(*ih->wptr_cpu);
if (ih == &adev->irq.ih) {
/* Only ring0 supports writeback. On other rings fall back
* to register-based code with overflow checking below.
*/
wptr = le32_to_cpu(*ih->wptr_cpu);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
}
ih_regs = &ih->ih_regs;
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
/* Double check that the overflow wasn't already cleared. */
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@ -513,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t wptr = cpu_to_le32(entry->src_data[0]);
switch (entry->ring_id) {
case 1:
*adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
case 2:
*adev->irq.ih2.wptr_cpu = wptr;
schedule_work(&adev->irq.ih2_work);
break;
default: break;

View File

@ -359,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1

View File

@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CI_CNTL, data);
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)

View File

@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset =
SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {

View File

@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data);
}
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {

View File

@ -362,7 +362,9 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
@ -692,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
{
uint32_t def, data;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
return;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;

View File

@ -731,8 +731,10 @@ static int nv_common_early_init(void *handle)
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
if (!amdgpu_sriov_vf(adev)) {
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
}
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &nv_pcie_rreg;
@ -1032,7 +1034,7 @@ static int nv_common_hw_init(void *handle)
* for the purpose of expose those registers
* to process space
*/
if (adev->nbio.funcs->remap_hdp_registers)
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
nv_enable_doorbell_aperture(adev, true);

View File

@ -971,8 +971,10 @@ static int soc15_common_early_init(void *handle)
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
if (!amdgpu_sriov_vf(adev)) {
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
}
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &soc15_pcie_rreg;
@ -1285,7 +1287,7 @@ static int soc15_common_hw_init(void *handle)
* for the purpose of expose those registers
* to process space
*/
if (adev->nbio.funcs->remap_hdp_registers)
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */

View File

@ -766,7 +766,7 @@ struct svm_range_list {
struct list_head deferred_range_list;
spinlock_t deferred_list_lock;
atomic_t evicted_ranges;
bool drain_pagefaults;
atomic_t drain_pagefaults;
struct delayed_work restore_work;
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
struct task_struct *faulting_task;

View File

@ -1968,10 +1968,16 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
struct kfd_process_device *pdd;
struct amdgpu_device *adev;
struct kfd_process *p;
int drain;
uint32_t i;
p = container_of(svms, struct kfd_process, svms);
restart:
drain = atomic_read(&svms->drain_pagefaults);
if (!drain)
return;
for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
pdd = p->pdds[i];
if (!pdd)
@ -1983,6 +1989,8 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
}
if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
goto restart;
}
static void svm_range_deferred_list_work(struct work_struct *work)
@ -1990,43 +1998,41 @@ static void svm_range_deferred_list_work(struct work_struct *work)
struct svm_range_list *svms;
struct svm_range *prange;
struct mm_struct *mm;
struct kfd_process *p;
svms = container_of(work, struct svm_range_list, deferred_list_work);
pr_debug("enter svms 0x%p\n", svms);
p = container_of(svms, struct kfd_process, svms);
/* Avoid mm is gone when inserting mmu notifier */
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("svms 0x%p process mm gone\n", svms);
return;
}
retry:
mmap_write_lock(mm);
/* Checking for the need to drain retry faults must be inside
* mmap write lock to serialize with munmap notifiers.
*/
if (unlikely(atomic_read(&svms->drain_pagefaults))) {
mmap_write_unlock(mm);
svm_range_drain_retry_fault(svms);
goto retry;
}
spin_lock(&svms->deferred_list_lock);
while (!list_empty(&svms->deferred_range_list)) {
prange = list_first_entry(&svms->deferred_range_list,
struct svm_range, deferred_list);
spin_unlock(&svms->deferred_list_lock);
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
prange->start, prange->last, prange->work_item.op);
mm = prange->work_item.mm;
retry:
mmap_write_lock(mm);
mutex_lock(&svms->lock);
/* Checking for the need to drain retry faults must be in
* mmap write lock to serialize with munmap notifiers.
*
* Remove from deferred_list must be inside mmap write lock,
* otherwise, svm_range_list_lock_and_flush_work may hold mmap
* write lock, and continue because deferred_list is empty, then
* deferred_list handle is blocked by mmap write lock.
*/
spin_lock(&svms->deferred_list_lock);
if (unlikely(svms->drain_pagefaults)) {
svms->drain_pagefaults = false;
spin_unlock(&svms->deferred_list_lock);
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
svm_range_drain_retry_fault(svms);
goto retry;
}
list_del_init(&prange->deferred_list);
spin_unlock(&svms->deferred_list_lock);
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
prange->start, prange->last, prange->work_item.op);
mutex_lock(&svms->lock);
mutex_lock(&prange->migrate_mutex);
while (!list_empty(&prange->child_list)) {
struct svm_range *pchild;
@ -2042,12 +2048,13 @@ retry:
svm_range_handle_list_op(svms, prange);
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
spin_lock(&svms->deferred_list_lock);
}
spin_unlock(&svms->deferred_list_lock);
mmap_write_unlock(mm);
mmput(mm);
pr_debug("exit svms 0x%p\n", svms);
}
@ -2056,12 +2063,6 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
struct mm_struct *mm, enum svm_work_list_ops op)
{
spin_lock(&svms->deferred_list_lock);
/* Make sure pending page faults are drained in the deferred worker
* before the range is freed to avoid straggler interrupts on
* unmapped memory causing "phantom faults".
*/
if (op == SVM_OP_UNMAP_RANGE)
svms->drain_pagefaults = true;
/* if prange is on the deferred list */
if (!list_empty(&prange->deferred_list)) {
pr_debug("update exist prange 0x%p work op %d\n", prange, op);
@ -2140,6 +2141,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
prange, prange->start, prange->last, start, last);
/* Make sure pending page faults are drained in the deferred worker
* before the range is freed to avoid straggler interrupts on
* unmapped memory causing "phantom faults".
*/
atomic_inc(&svms->drain_pagefaults);
unmap_parent = start <= prange->start && last >= prange->last;
list_for_each_entry(pchild, &prange->child_list, child_list) {
@ -2559,20 +2566,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
}
static bool
svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault)
svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
{
unsigned long requested = VM_READ;
struct vm_area_struct *vma;
if (write_fault)
requested |= VM_WRITE;
vma = find_vma(mm, addr << PAGE_SHIFT);
if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
pr_debug("address 0x%llx VMA is removed\n", addr);
return true;
}
pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
vma->vm_flags);
return (vma->vm_flags & requested) == requested;
@ -2590,6 +2590,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
int32_t best_loc;
int32_t gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
struct vm_area_struct *vma;
int r = 0;
if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
@ -2600,7 +2601,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
p = kfd_lookup_process_by_pasid(pasid);
if (!p) {
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
return -ESRCH;
return 0;
}
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
@ -2611,10 +2612,17 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
if (atomic_read(&svms->drain_pagefaults)) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
goto out;
}
/* p->lead_thread is available as kfd_process_wq_release flush the work
* before releasing task ref.
*/
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("svms 0x%p failed to get mm\n", svms);
r = -ESRCH;
goto out;
}
@ -2663,7 +2671,17 @@ retry_write_locked:
goto out_unlock_range;
}
if (!svm_fault_allowed(mm, addr, write_fault)) {
/* __do_munmap removed VMA, return success as we are handling stale
* retry fault.
*/
vma = find_vma(mm, addr << PAGE_SHIFT);
if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
pr_debug("address 0x%llx VMA is removed\n", addr);
r = 0;
goto out_unlock_range;
}
if (!svm_fault_allowed(vma, write_fault)) {
pr_debug("fault addr 0x%llx no %s permission\n", addr,
write_fault ? "write" : "read");
r = -EPERM;
@ -2741,6 +2759,14 @@ void svm_range_list_fini(struct kfd_process *p)
/* Ensure list work is finished before process is destroyed */
flush_work(&p->svms.deferred_list_work);
/*
* Ensure no retry fault comes in afterwards, as page fault handler will
* not find kfd process and take mm lock to recover fault.
*/
atomic_inc(&p->svms.drain_pagefaults);
svm_range_drain_retry_fault(&p->svms);
list_for_each_entry_safe(prange, next, &p->svms.list, list) {
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
@ -2761,6 +2787,7 @@ int svm_range_list_init(struct kfd_process *p)
mutex_init(&svms->lock);
INIT_LIST_HEAD(&svms->list);
atomic_set(&svms->evicted_ranges, 0);
atomic_set(&svms->drain_pagefaults, 0);
INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
INIT_LIST_HEAD(&svms->deferred_range_list);

View File

@ -51,6 +51,7 @@
#include <drm/drm_hdcp.h>
#endif
#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
@ -2561,6 +2562,22 @@ static int dm_resume(void *handle)
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
/*
* The dc->current_state is backed up into dm->cached_dc_state
* before we commit 0 streams.
*
* DC will clear link encoder assignments on the real state
* but the changes won't propagate over to the copy we made
* before the 0 streams commit.
*
* DC expects that link encoder assignments are *not* valid
* when committing a state, so as a workaround it needs to be
* cleared here.
*/
link_enc_cfg_init(dm->dc, dc_state);
amdgpu_dm_outbox_init(adev);
r = dm_dmub_hw_init(adev);
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@ -2572,8 +2589,8 @@ static int dm_resume(void *handle)
for (i = 0; i < dc_state->stream_count; i++) {
dc_state->streams[i]->mode_changed = true;
for (j = 0; j < dc_state->stream_status->plane_count; j++) {
dc_state->stream_status->plane_states[j]->update_flags.raw
for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
dc_state->stream_status[i].plane_states[j]->update_flags.raw
= 0xffffffff;
}
}
@ -3909,6 +3926,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
caps = dm->backlight_caps[bl_idx];
dm->brightness[bl_idx] = user_brightness;
/* update scratch register */
if (bl_idx == 0)
amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];

View File

@ -1637,7 +1637,7 @@ void dcn10_reset_hw_ctx_wrap(
dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
hws->funcs.enable_stream_gating(dc, pipe_ctx);
hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}

View File

@ -2270,7 +2270,7 @@ void dcn20_reset_hw_ctx_wrap(
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
hws->funcs.enable_stream_gating(dc, pipe_ctx);
hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}

View File

@ -602,7 +602,7 @@ void dcn31_reset_hw_ctx_wrap(
dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
hws->funcs.enable_stream_gating(dc, pipe_ctx);
hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}

View File

@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t min_freq, max_freq = 0;
uint32_t ret = 0;
phm_get_sysfs_buf(&buf, &size);
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
else
i = 1;
size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
size += sprintf(buf + size, "0: %uMhz %s\n",
data->gfx_min_freq_limit/100,
i == 0 ? "*" : "");
size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
size += sprintf(buf + size, "1: %uMhz %s\n",
i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
i == 1 ? "*" : "");
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
size += sprintf(buf + size, "2: %uMhz %s\n",
data->gfx_max_freq_limit/100,
i == 2 ? "*" : "");
break;
@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
for (i = 0; i < mclk_table->count; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i,
mclk_table->entries[i].clk / 100,
((mclk_table->entries[i].clk / 100)
@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
size += sprintf(buf + size, "1: %10uMhz\n",
(data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
min_freq, max_freq);
}
break;

View File

@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
int size = 0;
uint32_t i, now, clock, pcie_speed;
phm_get_sysfs_buf(&buf, &size);
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < sclk_table->count; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < mclk_table->count; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < pcie_table->count; i++)
size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
size += sprintf(buf + size, "%d: %s %s\n", i,
(pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
(pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
(pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
for (i = 0; i < odn_sclk_table->num_of_pl; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
i, odn_sclk_table->entries[i].clock/100,
odn_sclk_table->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sprintf(buf + size, "%s:\n", "OD_MCLK");
for (i = 0; i < odn_mclk_table->num_of_pl; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
i, odn_mclk_table->entries[i].clock/100,
odn_mclk_table->entries[i].vddc);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
}

View File

@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t i, now;
int size = 0;
phm_get_sysfs_buf(&buf, &size);
switch (type) {
case PP_SCLK:
now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
CURR_SCLK_INDEX);
for (i = 0; i < sclk_table->count; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->entries[i].clk / 100,
(i == now) ? "*" : "");
break;
@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
CURR_MCLK_INDEX);
for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
(SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
break;

View File

@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0, count = 0;
phm_get_sysfs_buf(&buf, &size);
switch (type) {
case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled)
@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
else
count = sclk_table->count;
for (i = 0; i < count; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
for (i = 0; i < mclk_table->count; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
for (i = 0; i < soc_table->count; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, soc_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
for (i = 0; i < dcef_table->count; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, dcef_table->dpm_levels[i].value / 100,
(dcef_table->dpm_levels[i].value / 100 == now) ?
"*" : "");
@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
size += sprintf(buf + size, "%d: %s %s %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (hwmgr->od_enabled) {
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk / 100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sprintf(buf + size, "%s:\n", "OD_MCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
for (i = 0; i < podn_vdd_dep->count; i++)
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk/100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
}

View File

@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0;
struct pp_clock_levels_with_latency clocks;
phm_get_sysfs_buf(&buf, &size);
switch (type) {
case PP_SCLK:
PP_ASSERT_WITH_CODE(
@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get gfx clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get memory clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get soc clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get dcef clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;

View File

@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
int ret = 0;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
phm_get_sysfs_buf(&buf, &size);
switch (type) {
case PP_SCLK:
ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_sclks(hwmgr, &clocks)) {
size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_memclocks(hwmgr, &clocks)) {
size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_socclocks(hwmgr, &clocks)) {
size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
for (i = 0; i < fclk_dpm_table->count; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, fclk_dpm_table->dpm_levels[i].value,
fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
break;
@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_dcefclocks(hwmgr, &clocks)) {
size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
od_table->GfxclkFmin);
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
size += sprintf(buf + size, "1: %10uMhz\n",
od_table->GfxclkFmax);
}
break;
case OD_MCLK:
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
size += sprintf(buf + size, "%s:\n", "OD_MCLK");
size += sprintf(buf + size, "1: %10uMhz\n",
od_table->UclkFmax);
}
@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE");
size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
od_table->GfxclkFreq1,
od_table->GfxclkVolt1 / VOLTAGE_SCALE);
size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
od_table->GfxclkFreq2,
od_table->GfxclkVolt2 / VOLTAGE_SCALE);
size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
od_table->GfxclkFreq3,
od_table->GfxclkVolt3 / VOLTAGE_SCALE);
}
@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_RANGE:
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
}
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
}
@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
}

View File

@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
if (rc)
return rc;
return sprintf(buf, "%u\n", reg & 1);
return sprintf(buf, "%u\n", reg);
}
static DEVICE_ATTR_RO(vga_pw);

View File

@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
struct hyperv_drm_device *hv = to_hv(dev);
struct pci_dev *pdev;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
/*
* Free allocated MMIO memory only on Gen2 VMs.
* On Gen1 VMs, release the PCI device
*/
if (efi_enabled(EFI_BOOT)) {
vmbus_free_mmio(hv->mem->start, hv->fb_size);
} else {
pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
if (!pdev) {
drm_err(dev, "Unable to find PCI Hyper-V video\n");
return -ENODEV;
}
pci_release_region(pdev, 0);
pci_dev_put(pdev);
}
return 0;
}

View File

@ -301,7 +301,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
user_forcewake(gt, true);
wait_for_suspend(gt);
intel_pxp_suspend(&gt->pxp, false);
intel_pxp_suspend_prepare(&gt->pxp);
}
static suspend_state_t pm_suspend_target(void)
@ -326,6 +326,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
GEM_BUG_ON(gt->awake);
intel_uc_suspend(&gt->uc);
intel_pxp_suspend(&gt->pxp);
/*
* On disabling the device, we want to turn off HW access to memory
@ -353,7 +354,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
intel_pxp_suspend(&gt->pxp, true);
intel_pxp_runtime_suspend(&gt->pxp);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@ -371,7 +372,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
if (ret)
return ret;
intel_pxp_resume(&gt->pxp);
intel_pxp_runtime_resume(&gt->pxp);
return 0;
}

View File

@ -7,26 +7,29 @@
#include "intel_pxp_irq.h"
#include "intel_pxp_pm.h"
#include "intel_pxp_session.h"
#include "i915_drv.h"
void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
{
if (!intel_pxp_is_enabled(pxp))
return;
pxp->arb_is_valid = false;
/*
* Contexts using protected objects keep a runtime PM reference, so we
* can only runtime suspend when all of them have been either closed
* or banned. Therefore, there is no need to invalidate in that
* scenario.
*/
if (!runtime)
intel_pxp_invalidate(pxp);
intel_pxp_invalidate(pxp);
}
intel_pxp_fini_hw(pxp);
void intel_pxp_suspend(struct intel_pxp *pxp)
{
intel_wakeref_t wakeref;
pxp->hw_state_invalidated = false;
if (!intel_pxp_is_enabled(pxp))
return;
with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) {
intel_pxp_fini_hw(pxp);
pxp->hw_state_invalidated = false;
}
}
void intel_pxp_resume(struct intel_pxp *pxp)
@ -44,3 +47,15 @@ void intel_pxp_resume(struct intel_pxp *pxp)
intel_pxp_init_hw(pxp);
}
void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
{
if (!intel_pxp_is_enabled(pxp))
return;
pxp->arb_is_valid = false;
intel_pxp_fini_hw(pxp);
pxp->hw_state_invalidated = false;
}

View File

@ -9,16 +9,29 @@
#include "intel_pxp_types.h"
#ifdef CONFIG_DRM_I915_PXP
void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime);
void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
void intel_pxp_suspend(struct intel_pxp *pxp);
void intel_pxp_resume(struct intel_pxp *pxp);
void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
#else
static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
{
}
static inline void intel_pxp_suspend(struct intel_pxp *pxp)
{
}
static inline void intel_pxp_resume(struct intel_pxp *pxp)
{
}
#endif
static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
{
}
#endif
static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
{
intel_pxp_resume(pxp);
}
#endif /* __INTEL_PXP_PM_H__ */

View File

@ -2626,6 +2626,27 @@ nv174_chipset = {
.fifo = { 0x00000001, ga102_fifo_new },
};
static const struct nvkm_device_chip
nv176_chipset = {
.name = "GA106",
.bar = { 0x00000001, tu102_bar_new },
.bios = { 0x00000001, nvkm_bios_new },
.devinit = { 0x00000001, ga100_devinit_new },
.fb = { 0x00000001, ga102_fb_new },
.gpio = { 0x00000001, ga102_gpio_new },
.i2c = { 0x00000001, gm200_i2c_new },
.imem = { 0x00000001, nv50_instmem_new },
.mc = { 0x00000001, ga100_mc_new },
.mmu = { 0x00000001, tu102_mmu_new },
.pci = { 0x00000001, gp100_pci_new },
.privring = { 0x00000001, gm200_privring_new },
.timer = { 0x00000001, gk20a_timer_new },
.top = { 0x00000001, ga100_top_new },
.disp = { 0x00000001, ga102_disp_new },
.dma = { 0x00000001, gv100_dma_new },
.fifo = { 0x00000001, ga102_fifo_new },
};
static const struct nvkm_device_chip
nv177_chipset = {
.name = "GA107",
@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {

View File

@ -207,11 +207,13 @@ int
gm200_acr_wpr_parse(struct nvkm_acr *acr)
{
const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
wpr_header_dump(&acr->subdev, hdr);
if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
return -ENOMEM;
lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
if (IS_ERR(lsfw))
return PTR_ERR(lsfw);
}
return 0;

View File

@ -161,11 +161,13 @@ int
gp102_acr_wpr_parse(struct nvkm_acr *acr)
{
const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
wpr_header_v1_dump(&acr->subdev, hdr);
if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
return -ENOMEM;
lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
if (IS_ERR(lsfw))
return PTR_ERR(lsfw);
}
return 0;

View File

@ -391,7 +391,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
return NULL;
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);