mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 08:44:21 +08:00
Merge branch 'drm-next-3.9' of git://people.freedesktop.org/~agd5f/linux into drm-next
Alex writes: - CS ioctl cleanup and unification. Unification of a lot of functionality that was duplicated across multiple generates of hardware. - Add support for Oland GPUs - Deprecate UMS support. Mesa and the ddx dropped support for UMS and apparently very few people still use it since the UMS CS ioctl was broken for several kernels and no one reported it. It was fixed in 3.8/stable. - Rework GPU reset. Use the status registers to determine what blocks to reset. This better matches the recommended reset programming model. This also allows us to properly reset blocks besides GFX and DMA. - Switch the VM set page code to use an IB rather than the ring. This fixes overflow issues when doing large page table updates using a small ring like DMA. - Several small cleanups and bug fixes. * 'drm-next-3.9' of git://people.freedesktop.org/~agd5f/linux: (38 commits) drm/radeon/dce6: fix display powergating drm/radeon: add Oland pci ids drm/radeon: radeon-asic updates for Oland drm/radeon: add ucode loading support for Oland drm/radeon: fill in gpu init for Oland drm/radeon: add Oland chip family drm/radeon: switch back to using the DMA ring for VM PT updates drm/radeon: use IBs for VM page table updates v2 drm/radeon: don't reset the MC on IGPs/APUs drm/radeon: use the reset mask to determine if rings are hung drm/radeon: halt engines before disabling MC (si) drm/radeon: halt engines before disabling MC (cayman/TN) drm/radeon: halt engines before disabling MC (evergreen) drm/radeon: halt engines before disabling MC (6xx/7xx) drm/radeon: use status regs to determine what to reset (si) drm/radeon: use status regs to determine what to reset (cayman) drm/radeon: use status regs to determine what to reset (evergreen) drm/radeon: use status regs to determine what to reset (6xx/7xx) drm/radeon: rework GPU reset on cayman/TN drm/radeon: rework GPU reset on cayman/TN ...
This commit is contained in:
commit
73ccd6962f
@ -96,6 +96,7 @@ config DRM_RADEON
|
||||
select DRM_TTM
|
||||
select POWER_SUPPLY
|
||||
select HWMON
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
help
|
||||
Choose this option if you have an ATI Radeon graphics card. There
|
||||
are both PCI and AGP versions. You don't need to choose this to
|
||||
|
@ -1,31 +1,8 @@
|
||||
config DRM_RADEON_KMS
|
||||
bool "Enable modesetting on radeon by default - NEW DRIVER"
|
||||
config DRM_RADEON_UMS
|
||||
bool "Enable userspace modesetting on radeon (DEPRECATED)"
|
||||
depends on DRM_RADEON
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
help
|
||||
Choose this option if you want kernel modesetting enabled by default.
|
||||
Choose this option if you still need userspace modesetting.
|
||||
|
||||
This is a completely new driver. It's only part of the existing drm
|
||||
for compatibility reasons. It requires an entirely different graphics
|
||||
stack above it and works very differently from the old drm stack.
|
||||
i.e. don't enable this unless you know what you are doing it may
|
||||
cause issues or bugs compared to the previous userspace driver stack.
|
||||
|
||||
When kernel modesetting is enabled the IOCTL of radeon/drm
|
||||
driver are considered as invalid and an error message is printed
|
||||
in the log and they return failure.
|
||||
|
||||
KMS enabled userspace will use new API to talk with the radeon/drm
|
||||
driver. The new API provide functions to create/destroy/share/mmap
|
||||
buffer object which are then managed by the kernel memory manager
|
||||
(here TTM). In order to submit command to the GPU the userspace
|
||||
provide a buffer holding the command stream, along this buffer
|
||||
userspace have to provide a list of buffer object used by the
|
||||
command stream. The kernel radeon driver will then place buffer
|
||||
in GPU accessible memory and will update command stream to reflect
|
||||
the position of the different buffers.
|
||||
|
||||
The kernel will also perform security check on command stream
|
||||
provided by the user, we want to catch and forbid any illegal use
|
||||
of the GPU such as DMA into random system memory or into memory
|
||||
not owned by the process supplying the command stream.
|
||||
Userspace modesetting is deprecated for quite some time now, so
|
||||
enable this only if you have ancient versions of the DDX drivers.
|
||||
|
@ -56,8 +56,12 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
|
||||
|
||||
$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
|
||||
|
||||
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
|
||||
radeon_irq.o r300_cmdbuf.o r600_cp.o
|
||||
radeon-y := radeon_drv.o
|
||||
|
||||
# add UMS driver
|
||||
radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
|
||||
radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o
|
||||
|
||||
# add KMS driver
|
||||
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
|
||||
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
|
||||
@ -67,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
|
||||
radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
|
||||
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
|
||||
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
|
||||
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
|
||||
r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
|
||||
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
|
||||
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
|
||||
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
|
||||
|
@ -252,8 +252,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
radeon_crtc->enabled = true;
|
||||
/* adjust pm to dpms changes BEFORE enabling crtcs */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
|
||||
atombios_powergate_crtc(crtc, ATOM_DISABLE);
|
||||
atombios_enable_crtc(crtc, ATOM_ENABLE);
|
||||
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
|
||||
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
|
||||
@ -271,8 +269,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
|
||||
atombios_enable_crtc(crtc, ATOM_DISABLE);
|
||||
radeon_crtc->enabled = false;
|
||||
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
|
||||
atombios_powergate_crtc(crtc, ATOM_ENABLE);
|
||||
/* adjust pm to dpms changes AFTER disabling crtcs */
|
||||
radeon_pm_compute_clocks(rdev);
|
||||
break;
|
||||
@ -1844,6 +1840,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
|
||||
int i;
|
||||
|
||||
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
|
||||
if (ASIC_IS_DCE6(rdev))
|
||||
atombios_powergate_crtc(crtc, ATOM_ENABLE);
|
||||
|
||||
for (i = 0; i < rdev->num_crtc; i++) {
|
||||
if (rdev->mode_info.crtcs[i] &&
|
||||
|
@ -2308,17 +2308,275 @@ int evergreen_mc_init(struct radeon_device *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
|
||||
{
|
||||
u32 srbm_status;
|
||||
u32 grbm_status;
|
||||
u32 grbm_status_se0, grbm_status_se1;
|
||||
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE0));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE1));
|
||||
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
|
||||
RREG32(SRBM_STATUS));
|
||||
dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
|
||||
RREG32(SRBM_STATUS2));
|
||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT1));
|
||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT2));
|
||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
||||
RREG32(CP_BUSY_STAT));
|
||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
||||
RREG32(CP_STAT));
|
||||
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG));
|
||||
if (rdev->family >= CHIP_CAYMAN) {
|
||||
dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG + 0x800));
|
||||
}
|
||||
}
|
||||
|
||||
srbm_status = RREG32(SRBM_STATUS);
|
||||
grbm_status = RREG32(GRBM_STATUS);
|
||||
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
|
||||
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
|
||||
if (!(grbm_status & GUI_ACTIVE)) {
|
||||
bool evergreen_is_display_hung(struct radeon_device *rdev)
|
||||
{
|
||||
u32 crtc_hung = 0;
|
||||
u32 crtc_status[6];
|
||||
u32 i, j, tmp;
|
||||
|
||||
for (i = 0; i < rdev->num_crtc; i++) {
|
||||
if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
|
||||
crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
|
||||
crtc_hung |= (1 << i);
|
||||
}
|
||||
}
|
||||
|
||||
for (j = 0; j < 10; j++) {
|
||||
for (i = 0; i < rdev->num_crtc; i++) {
|
||||
if (crtc_hung & (1 << i)) {
|
||||
tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
|
||||
if (tmp != crtc_status[i])
|
||||
crtc_hung &= ~(1 << i);
|
||||
}
|
||||
}
|
||||
if (crtc_hung == 0)
|
||||
return false;
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask = 0;
|
||||
u32 tmp;
|
||||
|
||||
/* GRBM_STATUS */
|
||||
tmp = RREG32(GRBM_STATUS);
|
||||
if (tmp & (PA_BUSY | SC_BUSY |
|
||||
SH_BUSY | SX_BUSY |
|
||||
TA_BUSY | VGT_BUSY |
|
||||
DB_BUSY | CB_BUSY |
|
||||
SPI_BUSY | VGT_BUSY_NO_DMA))
|
||||
reset_mask |= RADEON_RESET_GFX;
|
||||
|
||||
if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
|
||||
CP_BUSY | CP_COHERENCY_BUSY))
|
||||
reset_mask |= RADEON_RESET_CP;
|
||||
|
||||
if (tmp & GRBM_EE_BUSY)
|
||||
reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
|
||||
|
||||
/* DMA_STATUS_REG */
|
||||
tmp = RREG32(DMA_STATUS_REG);
|
||||
if (!(tmp & DMA_IDLE))
|
||||
reset_mask |= RADEON_RESET_DMA;
|
||||
|
||||
/* SRBM_STATUS2 */
|
||||
tmp = RREG32(SRBM_STATUS2);
|
||||
if (tmp & DMA_BUSY)
|
||||
reset_mask |= RADEON_RESET_DMA;
|
||||
|
||||
/* SRBM_STATUS */
|
||||
tmp = RREG32(SRBM_STATUS);
|
||||
if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
|
||||
reset_mask |= RADEON_RESET_RLC;
|
||||
|
||||
if (tmp & IH_BUSY)
|
||||
reset_mask |= RADEON_RESET_IH;
|
||||
|
||||
if (tmp & SEM_BUSY)
|
||||
reset_mask |= RADEON_RESET_SEM;
|
||||
|
||||
if (tmp & GRBM_RQ_PENDING)
|
||||
reset_mask |= RADEON_RESET_GRBM;
|
||||
|
||||
if (tmp & VMC_BUSY)
|
||||
reset_mask |= RADEON_RESET_VMC;
|
||||
|
||||
if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
|
||||
MCC_BUSY | MCD_BUSY))
|
||||
reset_mask |= RADEON_RESET_MC;
|
||||
|
||||
if (evergreen_is_display_hung(rdev))
|
||||
reset_mask |= RADEON_RESET_DISPLAY;
|
||||
|
||||
/* VM_L2_STATUS */
|
||||
tmp = RREG32(VM_L2_STATUS);
|
||||
if (tmp & L2_BUSY)
|
||||
reset_mask |= RADEON_RESET_VMC;
|
||||
|
||||
return reset_mask;
|
||||
}
|
||||
|
||||
static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||
{
|
||||
struct evergreen_mc_save save;
|
||||
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
|
||||
u32 tmp;
|
||||
|
||||
if (reset_mask == 0)
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
|
||||
|
||||
evergreen_print_gpu_status_regs(rdev);
|
||||
|
||||
/* Disable CP parsing/prefetching */
|
||||
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA) {
|
||||
/* Disable DMA */
|
||||
tmp = RREG32(DMA_RB_CNTL);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL, tmp);
|
||||
}
|
||||
|
||||
udelay(50);
|
||||
|
||||
evergreen_mc_stop(rdev, &save);
|
||||
if (evergreen_mc_wait_for_idle(rdev)) {
|
||||
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
||||
}
|
||||
|
||||
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
|
||||
grbm_soft_reset |= SOFT_RESET_DB |
|
||||
SOFT_RESET_CB |
|
||||
SOFT_RESET_PA |
|
||||
SOFT_RESET_SC |
|
||||
SOFT_RESET_SPI |
|
||||
SOFT_RESET_SX |
|
||||
SOFT_RESET_SH |
|
||||
SOFT_RESET_TC |
|
||||
SOFT_RESET_TA |
|
||||
SOFT_RESET_VC |
|
||||
SOFT_RESET_VGT;
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_CP) {
|
||||
grbm_soft_reset |= SOFT_RESET_CP |
|
||||
SOFT_RESET_VGT;
|
||||
|
||||
srbm_soft_reset |= SOFT_RESET_GRBM;
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA)
|
||||
srbm_soft_reset |= SOFT_RESET_DMA;
|
||||
|
||||
if (reset_mask & RADEON_RESET_DISPLAY)
|
||||
srbm_soft_reset |= SOFT_RESET_DC;
|
||||
|
||||
if (reset_mask & RADEON_RESET_RLC)
|
||||
srbm_soft_reset |= SOFT_RESET_RLC;
|
||||
|
||||
if (reset_mask & RADEON_RESET_SEM)
|
||||
srbm_soft_reset |= SOFT_RESET_SEM;
|
||||
|
||||
if (reset_mask & RADEON_RESET_IH)
|
||||
srbm_soft_reset |= SOFT_RESET_IH;
|
||||
|
||||
if (reset_mask & RADEON_RESET_GRBM)
|
||||
srbm_soft_reset |= SOFT_RESET_GRBM;
|
||||
|
||||
if (reset_mask & RADEON_RESET_VMC)
|
||||
srbm_soft_reset |= SOFT_RESET_VMC;
|
||||
|
||||
if (!(rdev->flags & RADEON_IS_IGP)) {
|
||||
if (reset_mask & RADEON_RESET_MC)
|
||||
srbm_soft_reset |= SOFT_RESET_MC;
|
||||
}
|
||||
|
||||
if (grbm_soft_reset) {
|
||||
tmp = RREG32(GRBM_SOFT_RESET);
|
||||
tmp |= grbm_soft_reset;
|
||||
dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(GRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(GRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~grbm_soft_reset;
|
||||
WREG32(GRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(GRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
tmp |= srbm_soft_reset;
|
||||
dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~srbm_soft_reset;
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
/* Wait a little for things to settle down */
|
||||
udelay(50);
|
||||
|
||||
evergreen_mc_resume(rdev, &save);
|
||||
udelay(50);
|
||||
|
||||
evergreen_print_gpu_status_regs(rdev);
|
||||
}
|
||||
|
||||
int evergreen_asic_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask;
|
||||
|
||||
reset_mask = evergreen_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (reset_mask)
|
||||
r600_set_bios_scratch_engine_hung(rdev, true);
|
||||
|
||||
evergreen_gpu_soft_reset(rdev, reset_mask);
|
||||
|
||||
reset_mask = evergreen_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!reset_mask)
|
||||
r600_set_bios_scratch_engine_hung(rdev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* evergreen_gfx_is_lockup - Check if the GFX engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the GFX engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!(reset_mask & (RADEON_RESET_GFX |
|
||||
RADEON_RESET_COMPUTE |
|
||||
RADEON_RESET_CP))) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
@ -2327,135 +2585,26 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
|
||||
/**
|
||||
* evergreen_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 grbm_reset = 0;
|
||||
u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE0));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE1));
|
||||
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
|
||||
RREG32(SRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT1));
|
||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT2));
|
||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
||||
RREG32(CP_BUSY_STAT));
|
||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
||||
RREG32(CP_STAT));
|
||||
|
||||
/* Disable CP parsing/prefetching */
|
||||
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
|
||||
|
||||
/* reset all the gfx blocks */
|
||||
grbm_reset = (SOFT_RESET_CP |
|
||||
SOFT_RESET_CB |
|
||||
SOFT_RESET_DB |
|
||||
SOFT_RESET_PA |
|
||||
SOFT_RESET_SC |
|
||||
SOFT_RESET_SPI |
|
||||
SOFT_RESET_SH |
|
||||
SOFT_RESET_SX |
|
||||
SOFT_RESET_TC |
|
||||
SOFT_RESET_TA |
|
||||
SOFT_RESET_VC |
|
||||
SOFT_RESET_VGT);
|
||||
|
||||
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
|
||||
WREG32(GRBM_SOFT_RESET, grbm_reset);
|
||||
(void)RREG32(GRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(GRBM_SOFT_RESET, 0);
|
||||
(void)RREG32(GRBM_SOFT_RESET);
|
||||
|
||||
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE0));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE1));
|
||||
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
|
||||
RREG32(SRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT1));
|
||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT2));
|
||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
||||
RREG32(CP_BUSY_STAT));
|
||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
||||
RREG32(CP_STAT));
|
||||
}
|
||||
|
||||
static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG));
|
||||
|
||||
/* Disable DMA */
|
||||
tmp = RREG32(DMA_RB_CNTL);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL, tmp);
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG));
|
||||
}
|
||||
|
||||
static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||
{
|
||||
struct evergreen_mc_save save;
|
||||
|
||||
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
|
||||
reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
|
||||
|
||||
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
|
||||
reset_mask &= ~RADEON_RESET_DMA;
|
||||
|
||||
if (reset_mask == 0)
|
||||
return 0;
|
||||
|
||||
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
|
||||
|
||||
evergreen_mc_stop(rdev, &save);
|
||||
if (evergreen_mc_wait_for_idle(rdev)) {
|
||||
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
||||
if (!(reset_mask & RADEON_RESET_DMA)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
|
||||
evergreen_gpu_soft_reset_gfx(rdev);
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA)
|
||||
evergreen_gpu_soft_reset_dma(rdev);
|
||||
|
||||
/* Wait a little for things to settle down */
|
||||
udelay(50);
|
||||
|
||||
evergreen_mc_resume(rdev, &save);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int evergreen_asic_reset(struct radeon_device *rdev)
|
||||
{
|
||||
return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
|
||||
RADEON_RESET_COMPUTE |
|
||||
RADEON_RESET_DMA));
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/* Interrupts */
|
||||
@ -3280,14 +3429,14 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring = &rdev->ring[fence->ring];
|
||||
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
|
||||
/* write the fence */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
|
||||
radeon_ring_write(ring, addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
/* generate an interrupt */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
|
||||
/* flush HDP */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
|
||||
radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
|
||||
radeon_ring_write(ring, 1);
|
||||
}
|
||||
@ -3310,7 +3459,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||
while ((next_rptr & 7) != 5)
|
||||
next_rptr++;
|
||||
next_rptr += 3;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
|
||||
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
|
||||
radeon_ring_write(ring, next_rptr);
|
||||
@ -3320,8 +3469,8 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||
* Pad as necessary with NOPs.
|
||||
*/
|
||||
while ((ring->wptr & 7) != 5)
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
|
||||
radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
|
||||
radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
|
||||
|
||||
@ -3380,7 +3529,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
|
||||
if (cur_size_in_dw > 0xFFFFF)
|
||||
cur_size_in_dw = 0xFFFFF;
|
||||
size_in_dw -= cur_size_in_dw;
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
|
||||
radeon_ring_write(ring, dst_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, src_offset & 0xfffffffc);
|
||||
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
|
||||
@ -3488,7 +3637,7 @@ static int evergreen_startup(struct radeon_device *rdev)
|
||||
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
|
||||
DMA_RB_RPTR, DMA_RB_WPTR,
|
||||
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
|
||||
2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -223,6 +223,7 @@
|
||||
#define EVERGREEN_CRTC_STATUS 0x6e8c
|
||||
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
|
||||
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
|
||||
#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
|
||||
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
|
||||
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
|
||||
|
||||
|
@ -729,6 +729,18 @@
|
||||
#define WAIT_UNTIL 0x8040
|
||||
|
||||
#define SRBM_STATUS 0x0E50
|
||||
#define RLC_RQ_PENDING (1 << 3)
|
||||
#define GRBM_RQ_PENDING (1 << 5)
|
||||
#define VMC_BUSY (1 << 8)
|
||||
#define MCB_BUSY (1 << 9)
|
||||
#define MCB_NON_DISPLAY_BUSY (1 << 10)
|
||||
#define MCC_BUSY (1 << 11)
|
||||
#define MCD_BUSY (1 << 12)
|
||||
#define SEM_BUSY (1 << 14)
|
||||
#define RLC_BUSY (1 << 15)
|
||||
#define IH_BUSY (1 << 17)
|
||||
#define SRBM_STATUS2 0x0EC4
|
||||
#define DMA_BUSY (1 << 5)
|
||||
#define SRBM_SOFT_RESET 0x0E60
|
||||
#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
|
||||
#define SOFT_RESET_BIF (1 << 1)
|
||||
@ -924,20 +936,23 @@
|
||||
#define CAYMAN_DMA1_CNTL 0xd82c
|
||||
|
||||
/* async DMA packets */
|
||||
#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
|
||||
(((t) & 0x1) << 23) | \
|
||||
(((s) & 0x1) << 22) | \
|
||||
(((n) & 0xFFFFF) << 0))
|
||||
#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \
|
||||
(((sub_cmd) & 0xFF) << 20) |\
|
||||
(((n) & 0xFFFFF) << 0))
|
||||
#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
|
||||
#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
|
||||
#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
|
||||
|
||||
/* async DMA Packet types */
|
||||
#define DMA_PACKET_WRITE 0x2
|
||||
#define DMA_PACKET_COPY 0x3
|
||||
#define DMA_PACKET_INDIRECT_BUFFER 0x4
|
||||
#define DMA_PACKET_SEMAPHORE 0x5
|
||||
#define DMA_PACKET_FENCE 0x6
|
||||
#define DMA_PACKET_TRAP 0x7
|
||||
#define DMA_PACKET_SRBM_WRITE 0x9
|
||||
#define DMA_PACKET_CONSTANT_FILL 0xd
|
||||
#define DMA_PACKET_NOP 0xf
|
||||
#define DMA_PACKET_WRITE 0x2
|
||||
#define DMA_PACKET_COPY 0x3
|
||||
#define DMA_PACKET_INDIRECT_BUFFER 0x4
|
||||
#define DMA_PACKET_SEMAPHORE 0x5
|
||||
#define DMA_PACKET_FENCE 0x6
|
||||
#define DMA_PACKET_TRAP 0x7
|
||||
#define DMA_PACKET_SRBM_WRITE 0x9
|
||||
#define DMA_PACKET_CONSTANT_FILL 0xd
|
||||
#define DMA_PACKET_NOP 0xf
|
||||
|
||||
/* PCIE link stuff */
|
||||
#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
|
||||
@ -980,16 +995,7 @@
|
||||
/*
|
||||
* PM4
|
||||
*/
|
||||
#define PACKET_TYPE0 0
|
||||
#define PACKET_TYPE1 1
|
||||
#define PACKET_TYPE2 2
|
||||
#define PACKET_TYPE3 3
|
||||
|
||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
|
||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
|
||||
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
|
||||
(((reg) >> 2) & 0xFFFF) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
#define CP_PACKET2 0x80000000
|
||||
@ -998,7 +1004,7 @@
|
||||
|
||||
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
||||
|
||||
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
|
||||
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
|
||||
(((op) & 0xFF) << 8) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "ni_reg.h"
|
||||
#include "cayman_blit_shaders.h"
|
||||
|
||||
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
|
||||
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
|
||||
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
||||
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
||||
extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
|
||||
@ -1310,120 +1312,90 @@ void cayman_dma_fini(struct radeon_device *rdev)
|
||||
radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
|
||||
}
|
||||
|
||||
static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
|
||||
{
|
||||
u32 grbm_reset = 0;
|
||||
|
||||
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE0));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE1));
|
||||
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
|
||||
RREG32(SRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT1));
|
||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT2));
|
||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
||||
RREG32(CP_BUSY_STAT));
|
||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
||||
RREG32(CP_STAT));
|
||||
|
||||
/* Disable CP parsing/prefetching */
|
||||
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
|
||||
|
||||
/* reset all the gfx blocks */
|
||||
grbm_reset = (SOFT_RESET_CP |
|
||||
SOFT_RESET_CB |
|
||||
SOFT_RESET_DB |
|
||||
SOFT_RESET_GDS |
|
||||
SOFT_RESET_PA |
|
||||
SOFT_RESET_SC |
|
||||
SOFT_RESET_SPI |
|
||||
SOFT_RESET_SH |
|
||||
SOFT_RESET_SX |
|
||||
SOFT_RESET_TC |
|
||||
SOFT_RESET_TA |
|
||||
SOFT_RESET_VGT |
|
||||
SOFT_RESET_IA);
|
||||
|
||||
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
|
||||
WREG32(GRBM_SOFT_RESET, grbm_reset);
|
||||
(void)RREG32(GRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(GRBM_SOFT_RESET, 0);
|
||||
(void)RREG32(GRBM_SOFT_RESET);
|
||||
|
||||
dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE0));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE1));
|
||||
dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
|
||||
RREG32(SRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT1));
|
||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT2));
|
||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
||||
RREG32(CP_BUSY_STAT));
|
||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
||||
RREG32(CP_STAT));
|
||||
|
||||
}
|
||||
|
||||
static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
|
||||
static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask = 0;
|
||||
u32 tmp;
|
||||
|
||||
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
|
||||
return;
|
||||
/* GRBM_STATUS */
|
||||
tmp = RREG32(GRBM_STATUS);
|
||||
if (tmp & (PA_BUSY | SC_BUSY |
|
||||
SH_BUSY | SX_BUSY |
|
||||
TA_BUSY | VGT_BUSY |
|
||||
DB_BUSY | CB_BUSY |
|
||||
GDS_BUSY | SPI_BUSY |
|
||||
IA_BUSY | IA_BUSY_NO_DMA))
|
||||
reset_mask |= RADEON_RESET_GFX;
|
||||
|
||||
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG));
|
||||
if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
|
||||
CP_BUSY | CP_COHERENCY_BUSY))
|
||||
reset_mask |= RADEON_RESET_CP;
|
||||
|
||||
/* dma0 */
|
||||
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
|
||||
if (tmp & GRBM_EE_BUSY)
|
||||
reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
|
||||
|
||||
/* dma1 */
|
||||
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
|
||||
/* DMA_STATUS_REG 0 */
|
||||
tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
|
||||
if (!(tmp & DMA_IDLE))
|
||||
reset_mask |= RADEON_RESET_DMA;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
/* DMA_STATUS_REG 1 */
|
||||
tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
|
||||
if (!(tmp & DMA_IDLE))
|
||||
reset_mask |= RADEON_RESET_DMA1;
|
||||
|
||||
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG));
|
||||
/* SRBM_STATUS2 */
|
||||
tmp = RREG32(SRBM_STATUS2);
|
||||
if (tmp & DMA_BUSY)
|
||||
reset_mask |= RADEON_RESET_DMA;
|
||||
|
||||
if (tmp & DMA1_BUSY)
|
||||
reset_mask |= RADEON_RESET_DMA1;
|
||||
|
||||
/* SRBM_STATUS */
|
||||
tmp = RREG32(SRBM_STATUS);
|
||||
if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
|
||||
reset_mask |= RADEON_RESET_RLC;
|
||||
|
||||
if (tmp & IH_BUSY)
|
||||
reset_mask |= RADEON_RESET_IH;
|
||||
|
||||
if (tmp & SEM_BUSY)
|
||||
reset_mask |= RADEON_RESET_SEM;
|
||||
|
||||
if (tmp & GRBM_RQ_PENDING)
|
||||
reset_mask |= RADEON_RESET_GRBM;
|
||||
|
||||
if (tmp & VMC_BUSY)
|
||||
reset_mask |= RADEON_RESET_VMC;
|
||||
|
||||
if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
|
||||
MCC_BUSY | MCD_BUSY))
|
||||
reset_mask |= RADEON_RESET_MC;
|
||||
|
||||
if (evergreen_is_display_hung(rdev))
|
||||
reset_mask |= RADEON_RESET_DISPLAY;
|
||||
|
||||
/* VM_L2_STATUS */
|
||||
tmp = RREG32(VM_L2_STATUS);
|
||||
if (tmp & L2_BUSY)
|
||||
reset_mask |= RADEON_RESET_VMC;
|
||||
|
||||
return reset_mask;
|
||||
}
|
||||
|
||||
static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||
static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||
{
|
||||
struct evergreen_mc_save save;
|
||||
|
||||
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
|
||||
reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
|
||||
|
||||
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
|
||||
reset_mask &= ~RADEON_RESET_DMA;
|
||||
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
|
||||
u32 tmp;
|
||||
|
||||
if (reset_mask == 0)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
|
||||
|
||||
evergreen_print_gpu_status_regs(rdev);
|
||||
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
RREG32(0x14F8));
|
||||
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
@ -1433,29 +1405,158 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
RREG32(0x14DC));
|
||||
|
||||
/* Disable CP parsing/prefetching */
|
||||
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA) {
|
||||
/* dma0 */
|
||||
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA1) {
|
||||
/* dma1 */
|
||||
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
|
||||
}
|
||||
|
||||
udelay(50);
|
||||
|
||||
evergreen_mc_stop(rdev, &save);
|
||||
if (evergreen_mc_wait_for_idle(rdev)) {
|
||||
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
||||
}
|
||||
|
||||
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
|
||||
cayman_gpu_soft_reset_gfx(rdev);
|
||||
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
|
||||
grbm_soft_reset = SOFT_RESET_CB |
|
||||
SOFT_RESET_DB |
|
||||
SOFT_RESET_GDS |
|
||||
SOFT_RESET_PA |
|
||||
SOFT_RESET_SC |
|
||||
SOFT_RESET_SPI |
|
||||
SOFT_RESET_SH |
|
||||
SOFT_RESET_SX |
|
||||
SOFT_RESET_TC |
|
||||
SOFT_RESET_TA |
|
||||
SOFT_RESET_VGT |
|
||||
SOFT_RESET_IA;
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_CP) {
|
||||
grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
|
||||
|
||||
srbm_soft_reset |= SOFT_RESET_GRBM;
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA)
|
||||
cayman_gpu_soft_reset_dma(rdev);
|
||||
srbm_soft_reset |= SOFT_RESET_DMA;
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA1)
|
||||
srbm_soft_reset |= SOFT_RESET_DMA1;
|
||||
|
||||
if (reset_mask & RADEON_RESET_DISPLAY)
|
||||
srbm_soft_reset |= SOFT_RESET_DC;
|
||||
|
||||
if (reset_mask & RADEON_RESET_RLC)
|
||||
srbm_soft_reset |= SOFT_RESET_RLC;
|
||||
|
||||
if (reset_mask & RADEON_RESET_SEM)
|
||||
srbm_soft_reset |= SOFT_RESET_SEM;
|
||||
|
||||
if (reset_mask & RADEON_RESET_IH)
|
||||
srbm_soft_reset |= SOFT_RESET_IH;
|
||||
|
||||
if (reset_mask & RADEON_RESET_GRBM)
|
||||
srbm_soft_reset |= SOFT_RESET_GRBM;
|
||||
|
||||
if (reset_mask & RADEON_RESET_VMC)
|
||||
srbm_soft_reset |= SOFT_RESET_VMC;
|
||||
|
||||
if (!(rdev->flags & RADEON_IS_IGP)) {
|
||||
if (reset_mask & RADEON_RESET_MC)
|
||||
srbm_soft_reset |= SOFT_RESET_MC;
|
||||
}
|
||||
|
||||
if (grbm_soft_reset) {
|
||||
tmp = RREG32(GRBM_SOFT_RESET);
|
||||
tmp |= grbm_soft_reset;
|
||||
dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(GRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(GRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~grbm_soft_reset;
|
||||
WREG32(GRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(GRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
tmp |= srbm_soft_reset;
|
||||
dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~srbm_soft_reset;
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
/* Wait a little for things to settle down */
|
||||
udelay(50);
|
||||
|
||||
evergreen_mc_resume(rdev, &save);
|
||||
return 0;
|
||||
udelay(50);
|
||||
|
||||
evergreen_print_gpu_status_regs(rdev);
|
||||
}
|
||||
|
||||
int cayman_asic_reset(struct radeon_device *rdev)
|
||||
{
|
||||
return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
|
||||
RADEON_RESET_COMPUTE |
|
||||
RADEON_RESET_DMA));
|
||||
u32 reset_mask;
|
||||
|
||||
reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (reset_mask)
|
||||
r600_set_bios_scratch_engine_hung(rdev, true);
|
||||
|
||||
cayman_gpu_soft_reset(rdev, reset_mask);
|
||||
|
||||
reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!reset_mask)
|
||||
r600_set_bios_scratch_engine_hung(rdev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_gfx_is_lockup - Check if the GFX engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the GFX engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!(reset_mask & (RADEON_RESET_GFX |
|
||||
RADEON_RESET_COMPUTE |
|
||||
RADEON_RESET_CP))) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
/* force CP activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1464,18 +1565,20 @@ int cayman_asic_reset(struct radeon_device *rdev)
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up (cayman-SI).
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 dma_status_reg;
|
||||
u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
|
||||
u32 mask;
|
||||
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
|
||||
mask = RADEON_RESET_DMA;
|
||||
else
|
||||
dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
|
||||
if (dma_status_reg & DMA_IDLE) {
|
||||
mask = RADEON_RESET_DMA1;
|
||||
|
||||
if (!(reset_mask & mask)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
@ -1843,19 +1946,21 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
|
||||
* cayman_vm_set_page - update the page tables using the CP
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using the CP (cayman-si).
|
||||
* Update the page tables using the CP (cayman/TN).
|
||||
*/
|
||||
void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
void cayman_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
@ -1866,9 +1971,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
if (ndw > 0x3FFF)
|
||||
ndw = 0x3FFF;
|
||||
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
|
||||
radeon_ring_write(ring, pe);
|
||||
radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
|
||||
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
@ -1880,8 +1985,8 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
radeon_ring_write(ring, value);
|
||||
radeon_ring_write(ring, upper_32_bits(value));
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -1891,9 +1996,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
|
||||
radeon_ring_write(ring, pe);
|
||||
radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
@ -1905,10 +2010,12 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
radeon_ring_write(ring, value);
|
||||
radeon_ring_write(ring, upper_32_bits(value));
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,16 @@
|
||||
#define RINGID(x) (((x) & 0x3) << 0)
|
||||
#define VMID(x) (((x) & 0x7) << 0)
|
||||
#define SRBM_STATUS 0x0E50
|
||||
#define RLC_RQ_PENDING (1 << 3)
|
||||
#define GRBM_RQ_PENDING (1 << 5)
|
||||
#define VMC_BUSY (1 << 8)
|
||||
#define MCB_BUSY (1 << 9)
|
||||
#define MCB_NON_DISPLAY_BUSY (1 << 10)
|
||||
#define MCC_BUSY (1 << 11)
|
||||
#define MCD_BUSY (1 << 12)
|
||||
#define SEM_BUSY (1 << 14)
|
||||
#define RLC_BUSY (1 << 15)
|
||||
#define IH_BUSY (1 << 17)
|
||||
|
||||
#define SRBM_SOFT_RESET 0x0E60
|
||||
#define SOFT_RESET_BIF (1 << 1)
|
||||
@ -68,6 +78,10 @@
|
||||
#define SOFT_RESET_REGBB (1 << 22)
|
||||
#define SOFT_RESET_ORB (1 << 23)
|
||||
|
||||
#define SRBM_STATUS2 0x0EC4
|
||||
#define DMA_BUSY (1 << 5)
|
||||
#define DMA1_BUSY (1 << 6)
|
||||
|
||||
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
|
||||
#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
|
||||
#define RESPONSE_TYPE_MASK 0x000000F0
|
||||
@ -474,16 +488,7 @@
|
||||
/*
|
||||
* PM4
|
||||
*/
|
||||
#define PACKET_TYPE0 0
|
||||
#define PACKET_TYPE1 1
|
||||
#define PACKET_TYPE2 2
|
||||
#define PACKET_TYPE3 3
|
||||
|
||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
|
||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
|
||||
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
|
||||
(((reg) >> 2) & 0xFFFF) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
#define CP_PACKET2 0x80000000
|
||||
@ -492,7 +497,7 @@
|
||||
|
||||
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
||||
|
||||
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
|
||||
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
|
||||
(((op) & 0xFF) << 8) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
|
||||
|
@ -1215,11 +1215,11 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc *reloc;
|
||||
u32 value;
|
||||
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1233,7 +1233,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
|
||||
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
|
||||
if (reg == RADEON_SRC_PITCH_OFFSET) {
|
||||
DRM_ERROR("Cannot src blit from microtiled surface\n");
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return -EINVAL;
|
||||
}
|
||||
tile_flags |= RADEON_DST_TILE_MICRO;
|
||||
@ -1263,16 +1263,16 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
|
||||
if (c > 16) {
|
||||
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
|
||||
pkt->opcode);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return -EINVAL;
|
||||
}
|
||||
track->num_arrays = c;
|
||||
for (i = 0; i < (c - 1); i+=2, idx+=3) {
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for packet3 %d\n",
|
||||
pkt->opcode);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
idx_value = radeon_get_ib_value(p, idx);
|
||||
@ -1281,11 +1281,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
|
||||
track->arrays[i + 0].esize = idx_value >> 8;
|
||||
track->arrays[i + 0].robj = reloc->robj;
|
||||
track->arrays[i + 0].esize &= 0x7F;
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for packet3 %d\n",
|
||||
pkt->opcode);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
|
||||
@ -1294,11 +1294,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
|
||||
track->arrays[i + 1].esize &= 0x7F;
|
||||
}
|
||||
if (c & 1) {
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for packet3 %d\n",
|
||||
pkt->opcode);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
idx_value = radeon_get_ib_value(p, idx);
|
||||
@ -1355,67 +1355,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void r100_cs_dump_packet(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt)
|
||||
{
|
||||
volatile uint32_t *ib;
|
||||
unsigned i;
|
||||
unsigned idx;
|
||||
|
||||
ib = p->ib.ptr;
|
||||
idx = pkt->idx;
|
||||
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
|
||||
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* r100_cs_packet_parse() - parse cp packet and point ib index to next packet
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @pkt: where to store packet informations
|
||||
*
|
||||
* Assume that chunk_ib_index is properly set. Will return -EINVAL
|
||||
* if packet is bigger than remaining ib size. or if packets is unknown.
|
||||
**/
|
||||
int r100_cs_packet_parse(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx)
|
||||
{
|
||||
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
|
||||
uint32_t header;
|
||||
|
||||
if (idx >= ib_chunk->length_dw) {
|
||||
DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
|
||||
idx, ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
header = radeon_get_ib_value(p, idx);
|
||||
pkt->idx = idx;
|
||||
pkt->type = CP_PACKET_GET_TYPE(header);
|
||||
pkt->count = CP_PACKET_GET_COUNT(header);
|
||||
switch (pkt->type) {
|
||||
case PACKET_TYPE0:
|
||||
pkt->reg = CP_PACKET0_GET_REG(header);
|
||||
pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
|
||||
break;
|
||||
case PACKET_TYPE3:
|
||||
pkt->opcode = CP_PACKET3_GET_OPCODE(header);
|
||||
break;
|
||||
case PACKET_TYPE2:
|
||||
pkt->count = -1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
|
||||
DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
|
||||
pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* r100_cs_packet_next_vline() - parse userspace VLINE packet
|
||||
* @parser: parser structure holding parsing context.
|
||||
@ -1444,7 +1383,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
ib = p->ib.ptr;
|
||||
|
||||
/* parse the wait until */
|
||||
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
|
||||
r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1461,7 +1400,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
}
|
||||
|
||||
/* jump over the NOP */
|
||||
r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
|
||||
r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1471,7 +1410,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
|
||||
header = radeon_get_ib_value(p, h_idx);
|
||||
crtc_id = radeon_get_ib_value(p, h_idx + 5);
|
||||
reg = CP_PACKET0_GET_REG(header);
|
||||
reg = R100_CP_PACKET0_GET_REG(header);
|
||||
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||
if (!obj) {
|
||||
DRM_ERROR("cannot find crtc %d\n", crtc_id);
|
||||
@ -1506,54 +1445,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @data: pointer to relocation data
|
||||
* @offset_start: starting offset
|
||||
* @offset_mask: offset mask (to align start offset on)
|
||||
* @reloc: reloc informations
|
||||
*
|
||||
* Check next packet is relocation packet3, do bo validation and compute
|
||||
* GPU offset using the provided start.
|
||||
**/
|
||||
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc)
|
||||
{
|
||||
struct radeon_cs_chunk *relocs_chunk;
|
||||
struct radeon_cs_packet p3reloc;
|
||||
unsigned idx;
|
||||
int r;
|
||||
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
DRM_ERROR("No relocation chunk !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
*cs_reloc = NULL;
|
||||
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
r = r100_cs_packet_parse(p, &p3reloc, p->idx);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
p->idx += p3reloc.count + 2;
|
||||
if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
|
||||
DRM_ERROR("No packet3 for relocation for packet at %d.\n",
|
||||
p3reloc.idx);
|
||||
r100_cs_dump_packet(p, &p3reloc);
|
||||
return -EINVAL;
|
||||
}
|
||||
idx = radeon_get_ib_value(p, p3reloc.idx + 1);
|
||||
if (idx >= relocs_chunk->length_dw) {
|
||||
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
|
||||
idx, relocs_chunk->length_dw);
|
||||
r100_cs_dump_packet(p, &p3reloc);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* FIXME: we assume reloc size is 4 dwords */
|
||||
*cs_reloc = p->relocs_ptr[(idx / 4)];
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int r100_get_vtx_size(uint32_t vtx_fmt)
|
||||
{
|
||||
int vtx_size;
|
||||
@ -1631,7 +1522,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
break;
|
||||
@ -1644,11 +1535,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
||||
return r;
|
||||
break;
|
||||
case RADEON_RB3D_DEPTHOFFSET:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->zb.robj = reloc->robj;
|
||||
@ -1657,11 +1548,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||
break;
|
||||
case RADEON_RB3D_COLOROFFSET:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->cb[0].robj = reloc->robj;
|
||||
@ -1673,11 +1564,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
||||
case RADEON_PP_TXOFFSET_1:
|
||||
case RADEON_PP_TXOFFSET_2:
|
||||
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||
@ -1700,11 +1591,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
||||
case RADEON_PP_CUBIC_OFFSET_T0_3:
|
||||
case RADEON_PP_CUBIC_OFFSET_T0_4:
|
||||
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->textures[0].cube_info[i].offset = idx_value;
|
||||
@ -1718,11 +1609,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
||||
case RADEON_PP_CUBIC_OFFSET_T1_3:
|
||||
case RADEON_PP_CUBIC_OFFSET_T1_4:
|
||||
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->textures[1].cube_info[i].offset = idx_value;
|
||||
@ -1736,11 +1627,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
||||
case RADEON_PP_CUBIC_OFFSET_T2_3:
|
||||
case RADEON_PP_CUBIC_OFFSET_T2_4:
|
||||
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->textures[2].cube_info[i].offset = idx_value;
|
||||
@ -1754,11 +1645,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
||||
track->zb_dirty = true;
|
||||
break;
|
||||
case RADEON_RB3D_COLORPITCH:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||
@ -1825,11 +1716,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
||||
track->zb_dirty = true;
|
||||
break;
|
||||
case RADEON_RB3D_ZPASS_ADDR:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||
@ -1986,10 +1877,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
|
||||
return r;
|
||||
break;
|
||||
case PACKET3_INDX_BUFFER:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
|
||||
@ -2000,10 +1891,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
|
||||
break;
|
||||
case 0x23:
|
||||
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
|
||||
@ -2100,37 +1991,36 @@ int r100_cs_parse(struct radeon_cs_parser *p)
|
||||
r100_cs_track_clear(p->rdev, track);
|
||||
p->track = track;
|
||||
do {
|
||||
r = r100_cs_packet_parse(p, &pkt, p->idx);
|
||||
r = radeon_cs_packet_parse(p, &pkt, p->idx);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
p->idx += pkt.count + 2;
|
||||
switch (pkt.type) {
|
||||
case PACKET_TYPE0:
|
||||
if (p->rdev->family >= CHIP_R200)
|
||||
r = r100_cs_parse_packet0(p, &pkt,
|
||||
p->rdev->config.r100.reg_safe_bm,
|
||||
p->rdev->config.r100.reg_safe_bm_size,
|
||||
&r200_packet0_check);
|
||||
else
|
||||
r = r100_cs_parse_packet0(p, &pkt,
|
||||
p->rdev->config.r100.reg_safe_bm,
|
||||
p->rdev->config.r100.reg_safe_bm_size,
|
||||
&r100_packet0_check);
|
||||
break;
|
||||
case PACKET_TYPE2:
|
||||
break;
|
||||
case PACKET_TYPE3:
|
||||
r = r100_packet3_check(p, &pkt);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown packet type %d !\n",
|
||||
pkt.type);
|
||||
return -EINVAL;
|
||||
case RADEON_PACKET_TYPE0:
|
||||
if (p->rdev->family >= CHIP_R200)
|
||||
r = r100_cs_parse_packet0(p, &pkt,
|
||||
p->rdev->config.r100.reg_safe_bm,
|
||||
p->rdev->config.r100.reg_safe_bm_size,
|
||||
&r200_packet0_check);
|
||||
else
|
||||
r = r100_cs_parse_packet0(p, &pkt,
|
||||
p->rdev->config.r100.reg_safe_bm,
|
||||
p->rdev->config.r100.reg_safe_bm_size,
|
||||
&r100_packet0_check);
|
||||
break;
|
||||
case RADEON_PACKET_TYPE2:
|
||||
break;
|
||||
case RADEON_PACKET_TYPE3:
|
||||
r = r100_packet3_check(p, &pkt);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown packet type %d !\n",
|
||||
pkt.type);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (r) {
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
return 0;
|
||||
}
|
||||
|
@ -81,10 +81,6 @@ struct r100_cs_track {
|
||||
|
||||
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
|
||||
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
|
||||
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc);
|
||||
void r100_cs_dump_packet(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt);
|
||||
|
||||
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
|
||||
|
||||
|
@ -64,17 +64,6 @@
|
||||
REG_SET(PACKET3_IT_OPCODE, (op)) | \
|
||||
REG_SET(PACKET3_COUNT, (n)))
|
||||
|
||||
#define PACKET_TYPE0 0
|
||||
#define PACKET_TYPE1 1
|
||||
#define PACKET_TYPE2 2
|
||||
#define PACKET_TYPE3 3
|
||||
|
||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
|
||||
#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
|
||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
|
||||
/* Registers */
|
||||
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
|
||||
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
|
||||
|
@ -162,7 +162,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
break;
|
||||
@ -175,11 +175,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
||||
return r;
|
||||
break;
|
||||
case RADEON_RB3D_DEPTHOFFSET:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->zb.robj = reloc->robj;
|
||||
@ -188,11 +188,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||
break;
|
||||
case RADEON_RB3D_COLOROFFSET:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->cb[0].robj = reloc->robj;
|
||||
@ -207,11 +207,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
||||
case R200_PP_TXOFFSET_4:
|
||||
case R200_PP_TXOFFSET_5:
|
||||
i = (reg - R200_PP_TXOFFSET_0) / 24;
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||
@ -260,11 +260,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
||||
case R200_PP_CUBIC_OFFSET_F5_5:
|
||||
i = (reg - R200_PP_TXOFFSET_0) / 24;
|
||||
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->textures[i].cube_info[face - 1].offset = idx_value;
|
||||
@ -278,11 +278,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
||||
track->zb_dirty = true;
|
||||
break;
|
||||
case RADEON_RB3D_COLORPITCH:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -355,11 +355,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
||||
track->zb_dirty = true;
|
||||
break;
|
||||
case RADEON_RB3D_ZPASS_ADDR:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||
|
@ -615,7 +615,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
break;
|
||||
@ -630,11 +630,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
case R300_RB3D_COLOROFFSET2:
|
||||
case R300_RB3D_COLOROFFSET3:
|
||||
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->cb[i].robj = reloc->robj;
|
||||
@ -643,11 +643,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||
break;
|
||||
case R300_ZB_DEPTHOFFSET:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->zb.robj = reloc->robj;
|
||||
@ -672,11 +672,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
case R300_TX_OFFSET_0+56:
|
||||
case R300_TX_OFFSET_0+60:
|
||||
i = (reg - R300_TX_OFFSET_0) >> 2;
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -745,11 +745,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
/* RB3D_COLORPITCH2 */
|
||||
/* RB3D_COLORPITCH3 */
|
||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -830,11 +830,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
case 0x4F24:
|
||||
/* ZB_DEPTHPITCH */
|
||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1045,11 +1045,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
track->tex_dirty = true;
|
||||
break;
|
||||
case R300_ZB_ZPASS_ADDR:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||
@ -1087,11 +1087,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
track->cb_dirty = true;
|
||||
break;
|
||||
case R300_RB3D_AARESOLVE_OFFSET:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
|
||||
idx, reg);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
track->aa.robj = reloc->robj;
|
||||
@ -1156,10 +1156,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
|
||||
return r;
|
||||
break;
|
||||
case PACKET3_INDX_BUFFER:
|
||||
r = r100_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
if (r) {
|
||||
DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
|
||||
r100_cs_dump_packet(p, pkt);
|
||||
radeon_cs_dump_packet(p, pkt);
|
||||
return r;
|
||||
}
|
||||
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
|
||||
@ -1257,21 +1257,21 @@ int r300_cs_parse(struct radeon_cs_parser *p)
|
||||
r100_cs_track_clear(p->rdev, track);
|
||||
p->track = track;
|
||||
do {
|
||||
r = r100_cs_packet_parse(p, &pkt, p->idx);
|
||||
r = radeon_cs_packet_parse(p, &pkt, p->idx);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
p->idx += pkt.count + 2;
|
||||
switch (pkt.type) {
|
||||
case PACKET_TYPE0:
|
||||
case RADEON_PACKET_TYPE0:
|
||||
r = r100_cs_parse_packet0(p, &pkt,
|
||||
p->rdev->config.r300.reg_safe_bm,
|
||||
p->rdev->config.r300.reg_safe_bm_size,
|
||||
&r300_packet0_check);
|
||||
break;
|
||||
case PACKET_TYPE2:
|
||||
case RADEON_PACKET_TYPE2:
|
||||
break;
|
||||
case PACKET_TYPE3:
|
||||
case RADEON_PACKET_TYPE3:
|
||||
r = r300_packet3_check(p, &pkt);
|
||||
break;
|
||||
default:
|
||||
|
@ -29,6 +29,8 @@
|
||||
*
|
||||
* Authors:
|
||||
* Nicolai Haehnle <prefect_@gmx.net>
|
||||
*
|
||||
* ------------------------ This file is DEPRECATED! -------------------------
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
@ -65,17 +65,6 @@
|
||||
REG_SET(PACKET3_IT_OPCODE, (op)) | \
|
||||
REG_SET(PACKET3_COUNT, (n)))
|
||||
|
||||
#define PACKET_TYPE0 0
|
||||
#define PACKET_TYPE1 1
|
||||
#define PACKET_TYPE2 2
|
||||
#define PACKET_TYPE3 3
|
||||
|
||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
|
||||
#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
|
||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
|
||||
/* Registers */
|
||||
#define R_000148_MC_FB_LOCATION 0x000148
|
||||
#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
|
||||
|
@ -355,6 +355,7 @@
|
||||
# define AVIVO_D1CRTC_V_BLANK (1 << 0)
|
||||
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
|
||||
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
|
||||
#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
|
||||
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
|
||||
|
||||
#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
|
||||
|
@ -94,6 +94,12 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
|
||||
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
|
||||
|
||||
static const u32 crtc_offsets[2] =
|
||||
{
|
||||
0,
|
||||
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
|
||||
};
|
||||
|
||||
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
|
||||
|
||||
/* r600,rv610,rv630,rv620,rv635,rv670 */
|
||||
@ -1254,169 +1260,301 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
|
||||
radeon_bo_unref(&rdev->vram_scratch.robj);
|
||||
}
|
||||
|
||||
/* We doesn't check that the GPU really needs a reset we simply do the
|
||||
* reset, it's up to the caller to determine if the GPU needs one. We
|
||||
* might add an helper function to check that.
|
||||
*/
|
||||
static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
|
||||
void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
|
||||
{
|
||||
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
|
||||
S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
|
||||
S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
|
||||
S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
|
||||
S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
|
||||
S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
|
||||
S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
|
||||
S_008010_GUI_ACTIVE(1);
|
||||
u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
|
||||
S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
|
||||
S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
|
||||
S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
|
||||
S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
|
||||
S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
|
||||
S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
|
||||
S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
|
||||
u32 tmp;
|
||||
u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
|
||||
|
||||
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
|
||||
RREG32(R_008010_GRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
|
||||
RREG32(R_008014_GRBM_STATUS2));
|
||||
dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
|
||||
RREG32(R_000E50_SRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT1));
|
||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT2));
|
||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
||||
RREG32(CP_BUSY_STAT));
|
||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
||||
RREG32(CP_STAT));
|
||||
|
||||
/* Disable CP parsing/prefetching */
|
||||
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
|
||||
|
||||
/* Check if any of the rendering block is busy and reset it */
|
||||
if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
|
||||
(RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
|
||||
tmp = S_008020_SOFT_RESET_CR(1) |
|
||||
S_008020_SOFT_RESET_DB(1) |
|
||||
S_008020_SOFT_RESET_CB(1) |
|
||||
S_008020_SOFT_RESET_PA(1) |
|
||||
S_008020_SOFT_RESET_SC(1) |
|
||||
S_008020_SOFT_RESET_SMX(1) |
|
||||
S_008020_SOFT_RESET_SPI(1) |
|
||||
S_008020_SOFT_RESET_SX(1) |
|
||||
S_008020_SOFT_RESET_SH(1) |
|
||||
S_008020_SOFT_RESET_TC(1) |
|
||||
S_008020_SOFT_RESET_TA(1) |
|
||||
S_008020_SOFT_RESET_VC(1) |
|
||||
S_008020_SOFT_RESET_VGT(1);
|
||||
dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
|
||||
RREG32(R_008020_GRBM_SOFT_RESET);
|
||||
mdelay(15);
|
||||
WREG32(R_008020_GRBM_SOFT_RESET, 0);
|
||||
}
|
||||
/* Reset CP (we always reset CP) */
|
||||
tmp = S_008020_SOFT_RESET_CP(1);
|
||||
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
|
||||
RREG32(R_008020_GRBM_SOFT_RESET);
|
||||
mdelay(15);
|
||||
WREG32(R_008020_GRBM_SOFT_RESET, 0);
|
||||
|
||||
dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
|
||||
RREG32(R_008010_GRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
|
||||
RREG32(R_008014_GRBM_STATUS2));
|
||||
dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
|
||||
RREG32(R_000E50_SRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT1));
|
||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT2));
|
||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
||||
RREG32(CP_BUSY_STAT));
|
||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
||||
RREG32(CP_STAT));
|
||||
|
||||
}
|
||||
|
||||
static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG));
|
||||
|
||||
/* Disable DMA */
|
||||
tmp = RREG32(DMA_RB_CNTL);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL, tmp);
|
||||
|
||||
/* Reset dma */
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
|
||||
if (hung)
|
||||
tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
|
||||
else
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
|
||||
|
||||
WREG32(R600_BIOS_3_SCRATCH, tmp);
|
||||
}
|
||||
|
||||
static void r600_print_gpu_status_regs(struct radeon_device *rdev)
|
||||
{
|
||||
dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
|
||||
RREG32(R_008010_GRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
|
||||
RREG32(R_008014_GRBM_STATUS2));
|
||||
dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
|
||||
RREG32(R_000E50_SRBM_STATUS));
|
||||
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT1));
|
||||
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
|
||||
RREG32(CP_STALLED_STAT2));
|
||||
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
|
||||
RREG32(CP_BUSY_STAT));
|
||||
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
|
||||
RREG32(CP_STAT));
|
||||
dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG));
|
||||
}
|
||||
|
||||
static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||
static bool r600_is_display_hung(struct radeon_device *rdev)
|
||||
{
|
||||
u32 crtc_hung = 0;
|
||||
u32 crtc_status[2];
|
||||
u32 i, j, tmp;
|
||||
|
||||
for (i = 0; i < rdev->num_crtc; i++) {
|
||||
if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
|
||||
crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
|
||||
crtc_hung |= (1 << i);
|
||||
}
|
||||
}
|
||||
|
||||
for (j = 0; j < 10; j++) {
|
||||
for (i = 0; i < rdev->num_crtc; i++) {
|
||||
if (crtc_hung & (1 << i)) {
|
||||
tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
|
||||
if (tmp != crtc_status[i])
|
||||
crtc_hung &= ~(1 << i);
|
||||
}
|
||||
}
|
||||
if (crtc_hung == 0)
|
||||
return false;
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask = 0;
|
||||
u32 tmp;
|
||||
|
||||
/* GRBM_STATUS */
|
||||
tmp = RREG32(R_008010_GRBM_STATUS);
|
||||
if (rdev->family >= CHIP_RV770) {
|
||||
if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
|
||||
G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
|
||||
G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
|
||||
G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
|
||||
G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
|
||||
reset_mask |= RADEON_RESET_GFX;
|
||||
} else {
|
||||
if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
|
||||
G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
|
||||
G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
|
||||
G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
|
||||
G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
|
||||
reset_mask |= RADEON_RESET_GFX;
|
||||
}
|
||||
|
||||
if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
|
||||
G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
|
||||
reset_mask |= RADEON_RESET_CP;
|
||||
|
||||
if (G_008010_GRBM_EE_BUSY(tmp))
|
||||
reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
|
||||
|
||||
/* DMA_STATUS_REG */
|
||||
tmp = RREG32(DMA_STATUS_REG);
|
||||
if (!(tmp & DMA_IDLE))
|
||||
reset_mask |= RADEON_RESET_DMA;
|
||||
|
||||
/* SRBM_STATUS */
|
||||
tmp = RREG32(R_000E50_SRBM_STATUS);
|
||||
if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
|
||||
reset_mask |= RADEON_RESET_RLC;
|
||||
|
||||
if (G_000E50_IH_BUSY(tmp))
|
||||
reset_mask |= RADEON_RESET_IH;
|
||||
|
||||
if (G_000E50_SEM_BUSY(tmp))
|
||||
reset_mask |= RADEON_RESET_SEM;
|
||||
|
||||
if (G_000E50_GRBM_RQ_PENDING(tmp))
|
||||
reset_mask |= RADEON_RESET_GRBM;
|
||||
|
||||
if (G_000E50_VMC_BUSY(tmp))
|
||||
reset_mask |= RADEON_RESET_VMC;
|
||||
|
||||
if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
|
||||
G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
|
||||
G_000E50_MCDW_BUSY(tmp))
|
||||
reset_mask |= RADEON_RESET_MC;
|
||||
|
||||
if (r600_is_display_hung(rdev))
|
||||
reset_mask |= RADEON_RESET_DISPLAY;
|
||||
|
||||
return reset_mask;
|
||||
}
|
||||
|
||||
static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||
{
|
||||
struct rv515_mc_save save;
|
||||
|
||||
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
|
||||
reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
|
||||
|
||||
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
|
||||
reset_mask &= ~RADEON_RESET_DMA;
|
||||
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
|
||||
u32 tmp;
|
||||
|
||||
if (reset_mask == 0)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
|
||||
|
||||
r600_print_gpu_status_regs(rdev);
|
||||
|
||||
/* Disable CP parsing/prefetching */
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
|
||||
else
|
||||
WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
|
||||
|
||||
/* disable the RLC */
|
||||
WREG32(RLC_CNTL, 0);
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA) {
|
||||
/* Disable DMA */
|
||||
tmp = RREG32(DMA_RB_CNTL);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL, tmp);
|
||||
}
|
||||
|
||||
mdelay(50);
|
||||
|
||||
rv515_mc_stop(rdev, &save);
|
||||
if (r600_mc_wait_for_idle(rdev)) {
|
||||
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
||||
}
|
||||
|
||||
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
|
||||
r600_gpu_soft_reset_gfx(rdev);
|
||||
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
|
||||
S_008020_SOFT_RESET_CB(1) |
|
||||
S_008020_SOFT_RESET_PA(1) |
|
||||
S_008020_SOFT_RESET_SC(1) |
|
||||
S_008020_SOFT_RESET_SPI(1) |
|
||||
S_008020_SOFT_RESET_SX(1) |
|
||||
S_008020_SOFT_RESET_SH(1) |
|
||||
S_008020_SOFT_RESET_TC(1) |
|
||||
S_008020_SOFT_RESET_TA(1) |
|
||||
S_008020_SOFT_RESET_VC(1) |
|
||||
S_008020_SOFT_RESET_VGT(1);
|
||||
else
|
||||
grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
|
||||
S_008020_SOFT_RESET_DB(1) |
|
||||
S_008020_SOFT_RESET_CB(1) |
|
||||
S_008020_SOFT_RESET_PA(1) |
|
||||
S_008020_SOFT_RESET_SC(1) |
|
||||
S_008020_SOFT_RESET_SMX(1) |
|
||||
S_008020_SOFT_RESET_SPI(1) |
|
||||
S_008020_SOFT_RESET_SX(1) |
|
||||
S_008020_SOFT_RESET_SH(1) |
|
||||
S_008020_SOFT_RESET_TC(1) |
|
||||
S_008020_SOFT_RESET_TA(1) |
|
||||
S_008020_SOFT_RESET_VC(1) |
|
||||
S_008020_SOFT_RESET_VGT(1);
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA)
|
||||
r600_gpu_soft_reset_dma(rdev);
|
||||
if (reset_mask & RADEON_RESET_CP) {
|
||||
grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
|
||||
S_008020_SOFT_RESET_VGT(1);
|
||||
|
||||
srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA) {
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
srbm_soft_reset |= RV770_SOFT_RESET_DMA;
|
||||
else
|
||||
srbm_soft_reset |= SOFT_RESET_DMA;
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_RLC)
|
||||
srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
|
||||
|
||||
if (reset_mask & RADEON_RESET_SEM)
|
||||
srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
|
||||
|
||||
if (reset_mask & RADEON_RESET_IH)
|
||||
srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
|
||||
|
||||
if (reset_mask & RADEON_RESET_GRBM)
|
||||
srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
|
||||
|
||||
if (!(rdev->flags & RADEON_IS_IGP)) {
|
||||
if (reset_mask & RADEON_RESET_MC)
|
||||
srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_VMC)
|
||||
srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
|
||||
|
||||
if (grbm_soft_reset) {
|
||||
tmp = RREG32(R_008020_GRBM_SOFT_RESET);
|
||||
tmp |= grbm_soft_reset;
|
||||
dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(R_008020_GRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~grbm_soft_reset;
|
||||
WREG32(R_008020_GRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(R_008020_GRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
tmp |= srbm_soft_reset;
|
||||
dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~srbm_soft_reset;
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
/* Wait a little for things to settle down */
|
||||
mdelay(1);
|
||||
|
||||
rv515_mc_resume(rdev, &save);
|
||||
udelay(50);
|
||||
|
||||
r600_print_gpu_status_regs(rdev);
|
||||
}
|
||||
|
||||
int r600_asic_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask;
|
||||
|
||||
reset_mask = r600_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (reset_mask)
|
||||
r600_set_bios_scratch_engine_hung(rdev, true);
|
||||
|
||||
r600_gpu_soft_reset(rdev, reset_mask);
|
||||
|
||||
reset_mask = r600_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!reset_mask)
|
||||
r600_set_bios_scratch_engine_hung(rdev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
/**
|
||||
* r600_gfx_is_lockup - Check if the GFX engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the GFX engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 srbm_status;
|
||||
u32 grbm_status;
|
||||
u32 grbm_status2;
|
||||
u32 reset_mask = r600_gpu_check_soft_reset(rdev);
|
||||
|
||||
srbm_status = RREG32(R_000E50_SRBM_STATUS);
|
||||
grbm_status = RREG32(R_008010_GRBM_STATUS);
|
||||
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
|
||||
if (!G_008010_GUI_ACTIVE(grbm_status)) {
|
||||
if (!(reset_mask & (RADEON_RESET_GFX |
|
||||
RADEON_RESET_COMPUTE |
|
||||
RADEON_RESET_CP))) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
@ -1431,15 +1569,14 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up (r6xx-evergreen).
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 dma_status_reg;
|
||||
u32 reset_mask = r600_gpu_check_soft_reset(rdev);
|
||||
|
||||
dma_status_reg = RREG32(DMA_STATUS_REG);
|
||||
if (dma_status_reg & DMA_IDLE) {
|
||||
if (!(reset_mask & RADEON_RESET_DMA)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
@ -1448,13 +1585,6 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
int r600_asic_reset(struct radeon_device *rdev)
|
||||
{
|
||||
return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
|
||||
RADEON_RESET_COMPUTE |
|
||||
RADEON_RESET_DMA));
|
||||
}
|
||||
|
||||
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
|
||||
u32 tiling_pipe_num,
|
||||
u32 max_rb_num,
|
||||
|
@ -22,6 +22,8 @@
|
||||
*
|
||||
* Authors:
|
||||
* Alex Deucher <alexander.deucher@amd.com>
|
||||
*
|
||||
* ------------------------ This file is DEPRECATED! -------------------------
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
@ -488,37 +490,6 @@ set_default_state(drm_radeon_private_t *dev_priv)
|
||||
ADVANCE_RING();
|
||||
}
|
||||
|
||||
/* 23 bits of float fractional data */
|
||||
#define I2F_FRAC_BITS 23
|
||||
#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
|
||||
|
||||
/*
|
||||
* Converts unsigned integer into 32-bit IEEE floating point representation.
|
||||
* Will be exact from 0 to 2^24. Above that, we round towards zero
|
||||
* as the fractional bits will not fit in a float. (It would be better to
|
||||
* round towards even as the fpu does, but that is slower.)
|
||||
*/
|
||||
__pure uint32_t int2float(uint32_t x)
|
||||
{
|
||||
uint32_t msb, exponent, fraction;
|
||||
|
||||
/* Zero is special */
|
||||
if (!x) return 0;
|
||||
|
||||
/* Get location of the most significant bit */
|
||||
msb = __fls(x);
|
||||
|
||||
/*
|
||||
* Use a rotate instead of a shift because that works both leftwards
|
||||
* and rightwards due to the mod(32) behaviour. This means we don't
|
||||
* need to check to see if we are above 2^24 or not.
|
||||
*/
|
||||
fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
|
||||
exponent = (127 + msb) << I2F_FRAC_BITS;
|
||||
|
||||
return fraction + exponent;
|
||||
}
|
||||
|
||||
static int r600_nomm_get_vb(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
@ -31,6 +31,37 @@
|
||||
#include "r600_blit_shaders.h"
|
||||
#include "radeon_blit_common.h"
|
||||
|
||||
/* 23 bits of float fractional data */
|
||||
#define I2F_FRAC_BITS 23
|
||||
#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
|
||||
|
||||
/*
|
||||
* Converts unsigned integer into 32-bit IEEE floating point representation.
|
||||
* Will be exact from 0 to 2^24. Above that, we round towards zero
|
||||
* as the fractional bits will not fit in a float. (It would be better to
|
||||
* round towards even as the fpu does, but that is slower.)
|
||||
*/
|
||||
__pure uint32_t int2float(uint32_t x)
|
||||
{
|
||||
uint32_t msb, exponent, fraction;
|
||||
|
||||
/* Zero is special */
|
||||
if (!x) return 0;
|
||||
|
||||
/* Get location of the most significant bit */
|
||||
msb = __fls(x);
|
||||
|
||||
/*
|
||||
* Use a rotate instead of a shift because that works both leftwards
|
||||
* and rightwards due to the mod(32) behaviour. This means we don't
|
||||
* need to check to see if we are above 2^24 or not.
|
||||
*/
|
||||
fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
|
||||
exponent = (127 + msb) << I2F_FRAC_BITS;
|
||||
|
||||
return fraction + exponent;
|
||||
}
|
||||
|
||||
/* emits 21 on rv770+, 23 on r600 */
|
||||
static void
|
||||
set_render_target(struct radeon_device *rdev, int format,
|
||||
|
@ -24,6 +24,8 @@
|
||||
* Authors:
|
||||
* Dave Airlie <airlied@redhat.com>
|
||||
* Alex Deucher <alexander.deucher@amd.com>
|
||||
*
|
||||
* ------------------------ This file is DEPRECATED! -------------------------
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -31,12 +31,7 @@
|
||||
#include "r600d.h"
|
||||
#include "r600_reg_safe.h"
|
||||
|
||||
static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc);
|
||||
static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc);
|
||||
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
|
||||
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
|
||||
static int r600_nomm;
|
||||
extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
|
||||
|
||||
|
||||
@ -784,170 +779,29 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_cs_packet_parse() - parse cp packet and point ib index to next packet
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @pkt: where to store packet informations
|
||||
* r600_cs_packet_parse_vline() - parse userspace VLINE packet
|
||||
* @parser: parser structure holding parsing context.
|
||||
*
|
||||
* Assume that chunk_ib_index is properly set. Will return -EINVAL
|
||||
* if packet is bigger than remaining ib size. or if packets is unknown.
|
||||
**/
|
||||
static int r600_cs_packet_parse(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx)
|
||||
* This is an R600-specific function for parsing VLINE packets.
|
||||
* Real work is done by r600_cs_common_vline_parse function.
|
||||
* Here we just set up ASIC-specific register table and call
|
||||
* the common implementation function.
|
||||
*/
|
||||
static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
{
|
||||
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
|
||||
uint32_t header;
|
||||
static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
|
||||
AVIVO_D2MODE_VLINE_START_END};
|
||||
static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
|
||||
AVIVO_D2MODE_VLINE_STATUS};
|
||||
|
||||
if (idx >= ib_chunk->length_dw) {
|
||||
DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
|
||||
idx, ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
header = radeon_get_ib_value(p, idx);
|
||||
pkt->idx = idx;
|
||||
pkt->type = CP_PACKET_GET_TYPE(header);
|
||||
pkt->count = CP_PACKET_GET_COUNT(header);
|
||||
pkt->one_reg_wr = 0;
|
||||
switch (pkt->type) {
|
||||
case PACKET_TYPE0:
|
||||
pkt->reg = CP_PACKET0_GET_REG(header);
|
||||
break;
|
||||
case PACKET_TYPE3:
|
||||
pkt->opcode = CP_PACKET3_GET_OPCODE(header);
|
||||
break;
|
||||
case PACKET_TYPE2:
|
||||
pkt->count = -1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
|
||||
DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
|
||||
pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @data: pointer to relocation data
|
||||
* @offset_start: starting offset
|
||||
* @offset_mask: offset mask (to align start offset on)
|
||||
* @reloc: reloc informations
|
||||
*
|
||||
* Check next packet is relocation packet3, do bo validation and compute
|
||||
* GPU offset using the provided start.
|
||||
**/
|
||||
static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc)
|
||||
{
|
||||
struct radeon_cs_chunk *relocs_chunk;
|
||||
struct radeon_cs_packet p3reloc;
|
||||
unsigned idx;
|
||||
int r;
|
||||
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
DRM_ERROR("No relocation chunk !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
*cs_reloc = NULL;
|
||||
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
r = r600_cs_packet_parse(p, &p3reloc, p->idx);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
p->idx += p3reloc.count + 2;
|
||||
if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
|
||||
DRM_ERROR("No packet3 for relocation for packet at %d.\n",
|
||||
p3reloc.idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
idx = radeon_get_ib_value(p, p3reloc.idx + 1);
|
||||
if (idx >= relocs_chunk->length_dw) {
|
||||
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
|
||||
idx, relocs_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* FIXME: we assume reloc size is 4 dwords */
|
||||
*cs_reloc = p->relocs_ptr[(idx / 4)];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @data: pointer to relocation data
|
||||
* @offset_start: starting offset
|
||||
* @offset_mask: offset mask (to align start offset on)
|
||||
* @reloc: reloc informations
|
||||
*
|
||||
* Check next packet is relocation packet3, do bo validation and compute
|
||||
* GPU offset using the provided start.
|
||||
**/
|
||||
static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc)
|
||||
{
|
||||
struct radeon_cs_chunk *relocs_chunk;
|
||||
struct radeon_cs_packet p3reloc;
|
||||
unsigned idx;
|
||||
int r;
|
||||
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
DRM_ERROR("No relocation chunk !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
*cs_reloc = NULL;
|
||||
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
r = r600_cs_packet_parse(p, &p3reloc, p->idx);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
p->idx += p3reloc.count + 2;
|
||||
if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
|
||||
DRM_ERROR("No packet3 for relocation for packet at %d.\n",
|
||||
p3reloc.idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
idx = radeon_get_ib_value(p, p3reloc.idx + 1);
|
||||
if (idx >= relocs_chunk->length_dw) {
|
||||
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
|
||||
idx, relocs_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
*cs_reloc = p->relocs;
|
||||
(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
|
||||
(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
|
||||
* @parser: parser structure holding parsing context.
|
||||
*
|
||||
* Check next packet is relocation packet3, do bo validation and compute
|
||||
* GPU offset using the provided start.
|
||||
**/
|
||||
static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
|
||||
{
|
||||
struct radeon_cs_packet p3reloc;
|
||||
int r;
|
||||
|
||||
r = r600_cs_packet_parse(p, &p3reloc, p->idx);
|
||||
if (r) {
|
||||
return 0;
|
||||
}
|
||||
if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
|
||||
return 0;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_cs_packet_next_vline() - parse userspace VLINE packet
|
||||
* r600_cs_common_vline_parse() - common vline parser
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @vline_start_end: table of vline_start_end registers
|
||||
* @vline_status: table of vline_status registers
|
||||
*
|
||||
* Userspace sends a special sequence for VLINE waits.
|
||||
* PACKET0 - VLINE_START_END + value
|
||||
@ -957,9 +811,16 @@ static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
|
||||
* This function parses this and relocates the VLINE START END
|
||||
* and WAIT_REG_MEM packets to the correct crtc.
|
||||
* It also detects a switched off crtc and nulls out the
|
||||
* wait in that case.
|
||||
* wait in that case. This function is common for all ASICs that
|
||||
* are R600 and newer. The parsing algorithm is the same, and only
|
||||
* differs in which registers are used.
|
||||
*
|
||||
* Caller is the ASIC-specific function which passes the parser
|
||||
* context and ASIC-specific register table
|
||||
*/
|
||||
static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
|
||||
uint32_t *vline_start_end,
|
||||
uint32_t *vline_status)
|
||||
{
|
||||
struct drm_mode_object *obj;
|
||||
struct drm_crtc *crtc;
|
||||
@ -973,12 +834,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
ib = p->ib.ptr;
|
||||
|
||||
/* parse the WAIT_REG_MEM */
|
||||
r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
|
||||
r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* check its a WAIT_REG_MEM */
|
||||
if (wait_reg_mem.type != PACKET_TYPE3 ||
|
||||
if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
|
||||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
|
||||
DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
|
||||
return -EINVAL;
|
||||
@ -987,7 +848,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
|
||||
/* bit 4 is reg (0) or mem (1) */
|
||||
if (wait_reg_mem_info & 0x10) {
|
||||
DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
|
||||
DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* bit 8 is me (0) or pfp (1) */
|
||||
if (wait_reg_mem_info & 0x100) {
|
||||
DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* waiting for value to be equal */
|
||||
@ -995,18 +861,18 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
|
||||
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
|
||||
DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
|
||||
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
|
||||
DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* jump over the NOP */
|
||||
r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
|
||||
r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1016,7 +882,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
|
||||
header = radeon_get_ib_value(p, h_idx);
|
||||
crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
|
||||
reg = CP_PACKET0_GET_REG(header);
|
||||
reg = R600_CP_PACKET0_GET_REG(header);
|
||||
|
||||
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
|
||||
if (!obj) {
|
||||
@ -1028,7 +894,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
crtc_id = radeon_crtc->crtc_id;
|
||||
|
||||
if (!crtc->enabled) {
|
||||
/* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
|
||||
/* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
|
||||
ib[h_idx + 2] = PACKET2(0);
|
||||
ib[h_idx + 3] = PACKET2(0);
|
||||
ib[h_idx + 4] = PACKET2(0);
|
||||
@ -1036,20 +902,15 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
|
||||
ib[h_idx + 6] = PACKET2(0);
|
||||
ib[h_idx + 7] = PACKET2(0);
|
||||
ib[h_idx + 8] = PACKET2(0);
|
||||
} else if (crtc_id == 1) {
|
||||
switch (reg) {
|
||||
case AVIVO_D1MODE_VLINE_START_END:
|
||||
header &= ~R600_CP_PACKET0_REG_MASK;
|
||||
header |= AVIVO_D2MODE_VLINE_START_END >> 2;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown crtc reloc\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (reg == vline_start_end[0]) {
|
||||
header &= ~R600_CP_PACKET0_REG_MASK;
|
||||
header |= vline_start_end[crtc_id] >> 2;
|
||||
ib[h_idx] = header;
|
||||
ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
|
||||
ib[h_idx + 4] = vline_status[crtc_id] >> 2;
|
||||
} else {
|
||||
DRM_ERROR("unknown crtc reloc\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1155,8 +1016,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
break;
|
||||
case R_028010_DB_DEPTH_INFO:
|
||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
|
||||
r600_cs_packet_next_is_pkt3_nop(p)) {
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
radeon_cs_packet_next_is_pkt3_nop(p)) {
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_warn(p->dev, "bad SET_CONTEXT_REG "
|
||||
"0x%04X\n", reg);
|
||||
@ -1198,7 +1059,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
case VGT_STRMOUT_BUFFER_BASE_1:
|
||||
case VGT_STRMOUT_BUFFER_BASE_2:
|
||||
case VGT_STRMOUT_BUFFER_BASE_3:
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_warn(p->dev, "bad SET_CONTEXT_REG "
|
||||
"0x%04X\n", reg);
|
||||
@ -1221,7 +1082,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
track->streamout_dirty = true;
|
||||
break;
|
||||
case CP_COHER_BASE:
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
|
||||
"0x%04X\n", reg);
|
||||
@ -1256,8 +1117,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
case R_0280B8_CB_COLOR6_INFO:
|
||||
case R_0280BC_CB_COLOR7_INFO:
|
||||
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
|
||||
r600_cs_packet_next_is_pkt3_nop(p)) {
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
radeon_cs_packet_next_is_pkt3_nop(p)) {
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
|
||||
return -EINVAL;
|
||||
@ -1320,7 +1181,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
case R_0280F8_CB_COLOR6_FRAG:
|
||||
case R_0280FC_CB_COLOR7_FRAG:
|
||||
tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
|
||||
if (!r600_cs_packet_next_is_pkt3_nop(p)) {
|
||||
if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
|
||||
if (!track->cb_color_base_last[tmp]) {
|
||||
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
|
||||
return -EINVAL;
|
||||
@ -1329,7 +1190,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
|
||||
ib[idx] = track->cb_color_base_last[tmp];
|
||||
} else {
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
|
||||
return -EINVAL;
|
||||
@ -1351,7 +1212,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
case R_0280D8_CB_COLOR6_TILE:
|
||||
case R_0280DC_CB_COLOR7_TILE:
|
||||
tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
|
||||
if (!r600_cs_packet_next_is_pkt3_nop(p)) {
|
||||
if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
|
||||
if (!track->cb_color_base_last[tmp]) {
|
||||
dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
|
||||
return -EINVAL;
|
||||
@ -1360,7 +1221,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
|
||||
ib[idx] = track->cb_color_base_last[tmp];
|
||||
} else {
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
|
||||
return -EINVAL;
|
||||
@ -1395,7 +1256,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
case CB_COLOR5_BASE:
|
||||
case CB_COLOR6_BASE:
|
||||
case CB_COLOR7_BASE:
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_warn(p->dev, "bad SET_CONTEXT_REG "
|
||||
"0x%04X\n", reg);
|
||||
@ -1410,7 +1271,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
track->cb_dirty = true;
|
||||
break;
|
||||
case DB_DEPTH_BASE:
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_warn(p->dev, "bad SET_CONTEXT_REG "
|
||||
"0x%04X\n", reg);
|
||||
@ -1423,7 +1284,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
track->db_dirty = true;
|
||||
break;
|
||||
case DB_HTILE_DATA_BASE:
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_warn(p->dev, "bad SET_CONTEXT_REG "
|
||||
"0x%04X\n", reg);
|
||||
@ -1493,7 +1354,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
case SQ_ALU_CONST_CACHE_VS_13:
|
||||
case SQ_ALU_CONST_CACHE_VS_14:
|
||||
case SQ_ALU_CONST_CACHE_VS_15:
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_warn(p->dev, "bad SET_CONTEXT_REG "
|
||||
"0x%04X\n", reg);
|
||||
@ -1502,7 +1363,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
|
||||
break;
|
||||
case SX_MEMORY_EXPORT_BASE:
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
dev_warn(p->dev, "bad SET_CONFIG_REG "
|
||||
"0x%04X\n", reg);
|
||||
@ -1788,7 +1649,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad SET PREDICATION\n");
|
||||
return -EINVAL;
|
||||
@ -1829,7 +1690,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
DRM_ERROR("bad DRAW_INDEX\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad DRAW_INDEX\n");
|
||||
return -EINVAL;
|
||||
@ -1881,7 +1742,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
if (idx_value & 0x10) {
|
||||
uint64_t offset;
|
||||
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad WAIT_REG_MEM\n");
|
||||
return -EINVAL;
|
||||
@ -1893,6 +1754,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
|
||||
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
|
||||
ib[idx+2] = upper_32_bits(offset) & 0xff;
|
||||
} else if (idx_value & 0x100) {
|
||||
DRM_ERROR("cannot use PFP on REG wait\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case PACKET3_CP_DMA:
|
||||
@ -1915,7 +1779,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
return -EINVAL;
|
||||
}
|
||||
/* src address space is memory */
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad CP DMA SRC\n");
|
||||
return -EINVAL;
|
||||
@ -1945,7 +1809,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
DRM_ERROR("CP DMA DAIC only supported for registers\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad CP DMA DST\n");
|
||||
return -EINVAL;
|
||||
@ -1975,7 +1839,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
/* 0xffffffff/0x0 is flush all cache flag */
|
||||
if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
|
||||
radeon_get_ib_value(p, idx + 2) != 0) {
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad SURFACE_SYNC\n");
|
||||
return -EINVAL;
|
||||
@ -1991,7 +1855,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
if (pkt->count) {
|
||||
uint64_t offset;
|
||||
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad EVENT_WRITE\n");
|
||||
return -EINVAL;
|
||||
@ -2012,7 +1876,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
DRM_ERROR("bad EVENT_WRITE_EOP\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad EVENT_WRITE\n");
|
||||
return -EINVAL;
|
||||
@ -2078,7 +1942,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
|
||||
case SQ_TEX_VTX_VALID_TEXTURE:
|
||||
/* tex base */
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad SET_RESOURCE\n");
|
||||
return -EINVAL;
|
||||
@ -2092,7 +1956,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
}
|
||||
texture = reloc->robj;
|
||||
/* tex mip base */
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad SET_RESOURCE\n");
|
||||
return -EINVAL;
|
||||
@ -2113,7 +1977,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
{
|
||||
uint64_t offset64;
|
||||
/* vtx base */
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad SET_RESOURCE\n");
|
||||
return -EINVAL;
|
||||
@ -2214,7 +2078,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
{
|
||||
u64 offset;
|
||||
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
|
||||
return -EINVAL;
|
||||
@ -2258,7 +2122,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
/* Updating memory at DST_ADDRESS. */
|
||||
if (idx_value & 0x1) {
|
||||
u64 offset;
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
|
||||
return -EINVAL;
|
||||
@ -2277,7 +2141,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
/* Reading data from SRC_ADDRESS. */
|
||||
if (((idx_value >> 1) & 0x3) == 2) {
|
||||
u64 offset;
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
|
||||
return -EINVAL;
|
||||
@ -2302,7 +2166,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
DRM_ERROR("bad MEM_WRITE (invalid count)\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
|
||||
return -EINVAL;
|
||||
@ -2331,7 +2195,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
if (idx_value & 0x1) {
|
||||
u64 offset;
|
||||
/* SRC is memory. */
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad COPY_DW (missing src reloc)\n");
|
||||
return -EINVAL;
|
||||
@ -2355,7 +2219,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
if (idx_value & 0x2) {
|
||||
u64 offset;
|
||||
/* DST is memory. */
|
||||
r = r600_cs_packet_next_reloc(p, &reloc);
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
|
||||
if (r) {
|
||||
DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
|
||||
return -EINVAL;
|
||||
@ -2410,7 +2274,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
|
||||
p->track = track;
|
||||
}
|
||||
do {
|
||||
r = r600_cs_packet_parse(p, &pkt, p->idx);
|
||||
r = radeon_cs_packet_parse(p, &pkt, p->idx);
|
||||
if (r) {
|
||||
kfree(p->track);
|
||||
p->track = NULL;
|
||||
@ -2418,12 +2282,12 @@ int r600_cs_parse(struct radeon_cs_parser *p)
|
||||
}
|
||||
p->idx += pkt.count + 2;
|
||||
switch (pkt.type) {
|
||||
case PACKET_TYPE0:
|
||||
case RADEON_PACKET_TYPE0:
|
||||
r = r600_cs_parse_packet0(p, &pkt);
|
||||
break;
|
||||
case PACKET_TYPE2:
|
||||
case RADEON_PACKET_TYPE2:
|
||||
break;
|
||||
case PACKET_TYPE3:
|
||||
case RADEON_PACKET_TYPE3:
|
||||
r = r600_packet3_check(p, &pkt);
|
||||
break;
|
||||
default:
|
||||
@ -2449,17 +2313,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
|
||||
{
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
return 0;
|
||||
}
|
||||
p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
||||
if (p->relocs == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
|
||||
/**
|
||||
* cs_parser_fini() - clean parser states
|
||||
@ -2485,6 +2339,18 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
||||
kfree(parser->chunks_array);
|
||||
}
|
||||
|
||||
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
|
||||
{
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
return 0;
|
||||
}
|
||||
p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
||||
if (p->relocs == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
|
||||
unsigned family, u32 *ib, int *l)
|
||||
{
|
||||
@ -2543,9 +2409,11 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
|
||||
|
||||
void r600_cs_legacy_init(void)
|
||||
{
|
||||
r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
|
||||
r600_nomm = 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DMA
|
||||
*/
|
||||
|
@ -182,6 +182,8 @@
|
||||
#define CP_COHER_BASE 0x85F8
|
||||
#define CP_DEBUG 0xC1FC
|
||||
#define R_0086D8_CP_ME_CNTL 0x86D8
|
||||
#define S_0086D8_CP_PFP_HALT(x) (((x) & 1)<<26)
|
||||
#define C_0086D8_CP_PFP_HALT(x) ((x) & 0xFBFFFFFF)
|
||||
#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
|
||||
#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
|
||||
#define CP_ME_RAM_DATA 0xC160
|
||||
@ -1143,19 +1145,10 @@
|
||||
/*
|
||||
* PM4
|
||||
*/
|
||||
#define PACKET_TYPE0 0
|
||||
#define PACKET_TYPE1 1
|
||||
#define PACKET_TYPE2 2
|
||||
#define PACKET_TYPE3 3
|
||||
|
||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
|
||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
|
||||
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
|
||||
(((reg) >> 2) & 0xFFFF) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
|
||||
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
|
||||
(((op) & 0xFF) << 8) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
|
||||
@ -1328,6 +1321,7 @@
|
||||
#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
|
||||
#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
|
||||
#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
|
||||
#define G_008010_TA_BUSY(x) (((x) >> 14) & 1)
|
||||
#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
|
||||
#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
|
||||
#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
|
||||
@ -1395,6 +1389,7 @@
|
||||
#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
|
||||
#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
|
||||
#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
|
||||
#define G_000E50_IH_BUSY(x) (((x) >> 17) & 1)
|
||||
#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
|
||||
#define R_000E60_SRBM_SOFT_RESET 0x0E60
|
||||
#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
|
||||
|
@ -136,6 +136,15 @@ extern int radeon_lockup_timeout;
|
||||
#define RADEON_RESET_GFX (1 << 0)
|
||||
#define RADEON_RESET_COMPUTE (1 << 1)
|
||||
#define RADEON_RESET_DMA (1 << 2)
|
||||
#define RADEON_RESET_CP (1 << 3)
|
||||
#define RADEON_RESET_GRBM (1 << 4)
|
||||
#define RADEON_RESET_DMA1 (1 << 5)
|
||||
#define RADEON_RESET_RLC (1 << 6)
|
||||
#define RADEON_RESET_SEM (1 << 7)
|
||||
#define RADEON_RESET_IH (1 << 8)
|
||||
#define RADEON_RESET_VMC (1 << 9)
|
||||
#define RADEON_RESET_MC (1 << 10)
|
||||
#define RADEON_RESET_DISPLAY (1 << 11)
|
||||
|
||||
/*
|
||||
* Errata workarounds.
|
||||
@ -771,6 +780,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
|
||||
struct radeon_ib *ib, struct radeon_vm *vm,
|
||||
unsigned size);
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
|
||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
||||
struct radeon_ib *const_ib);
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev);
|
||||
@ -1179,7 +1189,9 @@ struct radeon_asic {
|
||||
void (*fini)(struct radeon_device *rdev);
|
||||
|
||||
u32 pt_ring_index;
|
||||
void (*set_page)(struct radeon_device *rdev, uint64_t pe,
|
||||
void (*set_page)(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
} vm;
|
||||
@ -1757,6 +1769,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
|
||||
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
|
||||
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
|
||||
(rdev->flags & RADEON_IS_IGP))
|
||||
#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
|
||||
|
||||
/*
|
||||
* BIOS helpers.
|
||||
@ -1801,7 +1814,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
||||
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
|
||||
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
|
||||
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
|
||||
#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
|
||||
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
|
||||
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
|
||||
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
|
||||
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
|
||||
@ -1851,6 +1864,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
||||
/* Common functions */
|
||||
/* AGP */
|
||||
extern int radeon_gpu_reset(struct radeon_device *rdev);
|
||||
extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
|
||||
extern void radeon_agp_disable(struct radeon_device *rdev);
|
||||
extern int radeon_modeset_init(struct radeon_device *rdev);
|
||||
extern void radeon_modeset_fini(struct radeon_device *rdev);
|
||||
@ -1972,6 +1986,19 @@ static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
|
||||
static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
|
||||
#endif
|
||||
|
||||
int radeon_cs_packet_parse(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx);
|
||||
bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
|
||||
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt);
|
||||
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc,
|
||||
int nomm);
|
||||
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
|
||||
uint32_t *vline_start_end,
|
||||
uint32_t *vline_status);
|
||||
|
||||
#include "radeon_object.h"
|
||||
|
||||
#endif
|
||||
|
@ -946,7 +946,7 @@ static struct radeon_asic r600_asic = {
|
||||
.cs_parse = &r600_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &r600_gpu_is_lockup,
|
||||
.is_lockup = &r600_gfx_is_lockup,
|
||||
},
|
||||
[R600_RING_TYPE_DMA_INDEX] = {
|
||||
.ib_execute = &r600_dma_ring_ib_execute,
|
||||
@ -1030,7 +1030,7 @@ static struct radeon_asic rs780_asic = {
|
||||
.cs_parse = &r600_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &r600_gpu_is_lockup,
|
||||
.is_lockup = &r600_gfx_is_lockup,
|
||||
},
|
||||
[R600_RING_TYPE_DMA_INDEX] = {
|
||||
.ib_execute = &r600_dma_ring_ib_execute,
|
||||
@ -1114,7 +1114,7 @@ static struct radeon_asic rv770_asic = {
|
||||
.cs_parse = &r600_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &r600_gpu_is_lockup,
|
||||
.is_lockup = &r600_gfx_is_lockup,
|
||||
},
|
||||
[R600_RING_TYPE_DMA_INDEX] = {
|
||||
.ib_execute = &r600_dma_ring_ib_execute,
|
||||
@ -1198,7 +1198,7 @@ static struct radeon_asic evergreen_asic = {
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &evergreen_gpu_is_lockup,
|
||||
.is_lockup = &evergreen_gfx_is_lockup,
|
||||
},
|
||||
[R600_RING_TYPE_DMA_INDEX] = {
|
||||
.ib_execute = &evergreen_dma_ring_ib_execute,
|
||||
@ -1207,7 +1207,7 @@ static struct radeon_asic evergreen_asic = {
|
||||
.cs_parse = &evergreen_dma_cs_parse,
|
||||
.ring_test = &r600_dma_ring_test,
|
||||
.ib_test = &r600_dma_ib_test,
|
||||
.is_lockup = &r600_dma_is_lockup,
|
||||
.is_lockup = &evergreen_dma_is_lockup,
|
||||
}
|
||||
},
|
||||
.irq = {
|
||||
@ -1282,7 +1282,7 @@ static struct radeon_asic sumo_asic = {
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &evergreen_gpu_is_lockup,
|
||||
.is_lockup = &evergreen_gfx_is_lockup,
|
||||
},
|
||||
[R600_RING_TYPE_DMA_INDEX] = {
|
||||
.ib_execute = &evergreen_dma_ring_ib_execute,
|
||||
@ -1291,7 +1291,7 @@ static struct radeon_asic sumo_asic = {
|
||||
.cs_parse = &evergreen_dma_cs_parse,
|
||||
.ring_test = &r600_dma_ring_test,
|
||||
.ib_test = &r600_dma_ib_test,
|
||||
.is_lockup = &r600_dma_is_lockup,
|
||||
.is_lockup = &evergreen_dma_is_lockup,
|
||||
}
|
||||
},
|
||||
.irq = {
|
||||
@ -1366,7 +1366,7 @@ static struct radeon_asic btc_asic = {
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &evergreen_gpu_is_lockup,
|
||||
.is_lockup = &evergreen_gfx_is_lockup,
|
||||
},
|
||||
[R600_RING_TYPE_DMA_INDEX] = {
|
||||
.ib_execute = &evergreen_dma_ring_ib_execute,
|
||||
@ -1375,7 +1375,7 @@ static struct radeon_asic btc_asic = {
|
||||
.cs_parse = &evergreen_dma_cs_parse,
|
||||
.ring_test = &r600_dma_ring_test,
|
||||
.ib_test = &r600_dma_ib_test,
|
||||
.is_lockup = &r600_dma_is_lockup,
|
||||
.is_lockup = &evergreen_dma_is_lockup,
|
||||
}
|
||||
},
|
||||
.irq = {
|
||||
@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
|
||||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.set_page = &cayman_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
@ -1457,7 +1457,7 @@ static struct radeon_asic cayman_asic = {
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &evergreen_gpu_is_lockup,
|
||||
.is_lockup = &cayman_gfx_is_lockup,
|
||||
.vm_flush = &cayman_vm_flush,
|
||||
},
|
||||
[CAYMAN_RING_TYPE_CP1_INDEX] = {
|
||||
@ -1468,7 +1468,7 @@ static struct radeon_asic cayman_asic = {
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &evergreen_gpu_is_lockup,
|
||||
.is_lockup = &cayman_gfx_is_lockup,
|
||||
.vm_flush = &cayman_vm_flush,
|
||||
},
|
||||
[CAYMAN_RING_TYPE_CP2_INDEX] = {
|
||||
@ -1479,7 +1479,7 @@ static struct radeon_asic cayman_asic = {
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &evergreen_gpu_is_lockup,
|
||||
.is_lockup = &cayman_gfx_is_lockup,
|
||||
.vm_flush = &cayman_vm_flush,
|
||||
},
|
||||
[R600_RING_TYPE_DMA_INDEX] = {
|
||||
@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
|
||||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.set_page = &cayman_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
@ -1584,7 +1584,7 @@ static struct radeon_asic trinity_asic = {
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &evergreen_gpu_is_lockup,
|
||||
.is_lockup = &cayman_gfx_is_lockup,
|
||||
.vm_flush = &cayman_vm_flush,
|
||||
},
|
||||
[CAYMAN_RING_TYPE_CP1_INDEX] = {
|
||||
@ -1595,7 +1595,7 @@ static struct radeon_asic trinity_asic = {
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &evergreen_gpu_is_lockup,
|
||||
.is_lockup = &cayman_gfx_is_lockup,
|
||||
.vm_flush = &cayman_vm_flush,
|
||||
},
|
||||
[CAYMAN_RING_TYPE_CP2_INDEX] = {
|
||||
@ -1606,7 +1606,7 @@ static struct radeon_asic trinity_asic = {
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &evergreen_gpu_is_lockup,
|
||||
.is_lockup = &cayman_gfx_is_lockup,
|
||||
.vm_flush = &cayman_vm_flush,
|
||||
},
|
||||
[R600_RING_TYPE_DMA_INDEX] = {
|
||||
@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {
|
||||
.vm = {
|
||||
.init = &si_vm_init,
|
||||
.fini = &si_vm_fini,
|
||||
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
||||
.set_page = &si_vm_set_page,
|
||||
},
|
||||
.ring = {
|
||||
@ -1711,7 +1711,7 @@ static struct radeon_asic si_asic = {
|
||||
.cs_parse = NULL,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &si_gpu_is_lockup,
|
||||
.is_lockup = &si_gfx_is_lockup,
|
||||
.vm_flush = &si_vm_flush,
|
||||
},
|
||||
[CAYMAN_RING_TYPE_CP1_INDEX] = {
|
||||
@ -1722,7 +1722,7 @@ static struct radeon_asic si_asic = {
|
||||
.cs_parse = NULL,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &si_gpu_is_lockup,
|
||||
.is_lockup = &si_gfx_is_lockup,
|
||||
.vm_flush = &si_vm_flush,
|
||||
},
|
||||
[CAYMAN_RING_TYPE_CP2_INDEX] = {
|
||||
@ -1733,7 +1733,7 @@ static struct radeon_asic si_asic = {
|
||||
.cs_parse = NULL,
|
||||
.ring_test = &r600_ring_test,
|
||||
.ib_test = &r600_ib_test,
|
||||
.is_lockup = &si_gpu_is_lockup,
|
||||
.is_lockup = &si_gfx_is_lockup,
|
||||
.vm_flush = &si_vm_flush,
|
||||
},
|
||||
[R600_RING_TYPE_DMA_INDEX] = {
|
||||
@ -1744,7 +1744,7 @@ static struct radeon_asic si_asic = {
|
||||
.cs_parse = NULL,
|
||||
.ring_test = &r600_dma_ring_test,
|
||||
.ib_test = &r600_dma_ib_test,
|
||||
.is_lockup = &cayman_dma_is_lockup,
|
||||
.is_lockup = &si_dma_is_lockup,
|
||||
.vm_flush = &si_dma_vm_flush,
|
||||
},
|
||||
[CAYMAN_RING_TYPE_DMA1_INDEX] = {
|
||||
@ -1755,7 +1755,7 @@ static struct radeon_asic si_asic = {
|
||||
.cs_parse = NULL,
|
||||
.ring_test = &r600_dma_ring_test,
|
||||
.ib_test = &r600_dma_ib_test,
|
||||
.is_lockup = &cayman_dma_is_lockup,
|
||||
.is_lockup = &si_dma_is_lockup,
|
||||
.vm_flush = &si_dma_vm_flush,
|
||||
}
|
||||
},
|
||||
@ -1944,9 +1944,13 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
case CHIP_VERDE:
|
||||
case CHIP_OLAND:
|
||||
rdev->asic = &si_asic;
|
||||
/* set num crtcs */
|
||||
rdev->num_crtc = 6;
|
||||
if (rdev->family == CHIP_OLAND)
|
||||
rdev->num_crtc = 2;
|
||||
else
|
||||
rdev->num_crtc = 6;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
|
@ -319,7 +319,7 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
|
||||
bool emit_wait);
|
||||
void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int r600_asic_reset(struct radeon_device *rdev);
|
||||
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
|
||||
uint32_t tiling_flags, uint32_t pitch,
|
||||
@ -422,7 +422,8 @@ int evergreen_init(struct radeon_device *rdev);
|
||||
void evergreen_fini(struct radeon_device *rdev);
|
||||
int evergreen_suspend(struct radeon_device *rdev);
|
||||
int evergreen_resume(struct radeon_device *rdev);
|
||||
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int evergreen_asic_reset(struct radeon_device *rdev);
|
||||
void evergreen_bandwidth_update(struct radeon_device *rdev);
|
||||
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
@ -473,13 +474,16 @@ int cayman_vm_init(struct radeon_device *rdev);
|
||||
void cayman_vm_fini(struct radeon_device *rdev);
|
||||
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
|
||||
void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
void cayman_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib);
|
||||
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
|
||||
@ -496,14 +500,17 @@ int si_init(struct radeon_device *rdev);
|
||||
void si_fini(struct radeon_device *rdev);
|
||||
int si_suspend(struct radeon_device *rdev);
|
||||
int si_resume(struct radeon_device *rdev);
|
||||
bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
|
||||
int si_asic_reset(struct radeon_device *rdev);
|
||||
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
int si_irq_set(struct radeon_device *rdev);
|
||||
int si_irq_process(struct radeon_device *rdev);
|
||||
int si_vm_init(struct radeon_device *rdev);
|
||||
void si_vm_fini(struct radeon_device *rdev);
|
||||
void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
void si_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
|
@ -27,6 +27,8 @@
|
||||
* Authors:
|
||||
* Kevin E. Martin <martin@valinux.com>
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
*
|
||||
* ------------------------ This file is DEPRECATED! -------------------------
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
|
@ -29,9 +29,6 @@
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
|
||||
void r100_cs_dump_packet(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt);
|
||||
|
||||
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
||||
{
|
||||
struct drm_device *ddev = p->rdev->ddev;
|
||||
@ -128,18 +125,6 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void radeon_cs_sync_to(struct radeon_cs_parser *p,
|
||||
struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_fence *other;
|
||||
|
||||
if (!fence)
|
||||
return;
|
||||
|
||||
other = p->ib.sync_to[fence->ring];
|
||||
p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
|
||||
}
|
||||
|
||||
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
|
||||
{
|
||||
int i;
|
||||
@ -148,7 +133,7 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
|
||||
if (!p->relocs[i].robj)
|
||||
continue;
|
||||
|
||||
radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
|
||||
radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,7 +188,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||
p->chunks[i].length_dw = user_chunk.length_dw;
|
||||
p->chunks[i].kdata = NULL;
|
||||
p->chunks[i].chunk_id = user_chunk.chunk_id;
|
||||
|
||||
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
|
||||
p->chunk_relocs_idx = i;
|
||||
}
|
||||
@ -226,9 +211,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
p->chunks[i].length_dw = user_chunk.length_dw;
|
||||
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
|
||||
|
||||
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
|
||||
if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
|
||||
(p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
|
||||
@ -478,8 +460,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
||||
goto out;
|
||||
}
|
||||
radeon_cs_sync_rings(parser);
|
||||
radeon_cs_sync_to(parser, vm->fence);
|
||||
radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
|
||||
radeon_ib_sync_to(&parser->ib, vm->fence);
|
||||
radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
|
||||
rdev, vm, parser->ring));
|
||||
|
||||
if ((rdev->family >= CHIP_TAHITI) &&
|
||||
(parser->chunk_const_ib_idx != -1)) {
|
||||
@ -648,3 +631,152 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
|
||||
idx_value = ibc->kpage[new_page][pg_offset/4];
|
||||
return idx_value;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @pkt: where to store packet information
|
||||
*
|
||||
* Assume that chunk_ib_index is properly set. Will return -EINVAL
|
||||
* if packet is bigger than remaining ib size. or if packets is unknown.
|
||||
**/
|
||||
int radeon_cs_packet_parse(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx)
|
||||
{
|
||||
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
|
||||
struct radeon_device *rdev = p->rdev;
|
||||
uint32_t header;
|
||||
|
||||
if (idx >= ib_chunk->length_dw) {
|
||||
DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
|
||||
idx, ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
header = radeon_get_ib_value(p, idx);
|
||||
pkt->idx = idx;
|
||||
pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
|
||||
pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
|
||||
pkt->one_reg_wr = 0;
|
||||
switch (pkt->type) {
|
||||
case RADEON_PACKET_TYPE0:
|
||||
if (rdev->family < CHIP_R600) {
|
||||
pkt->reg = R100_CP_PACKET0_GET_REG(header);
|
||||
pkt->one_reg_wr =
|
||||
RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
|
||||
} else
|
||||
pkt->reg = R600_CP_PACKET0_GET_REG(header);
|
||||
break;
|
||||
case RADEON_PACKET_TYPE3:
|
||||
pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
|
||||
break;
|
||||
case RADEON_PACKET_TYPE2:
|
||||
pkt->count = -1;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
|
||||
DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
|
||||
pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
|
||||
* @p: structure holding the parser context.
|
||||
*
|
||||
* Check if the next packet is NOP relocation packet3.
|
||||
**/
|
||||
bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
|
||||
{
|
||||
struct radeon_cs_packet p3reloc;
|
||||
int r;
|
||||
|
||||
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
|
||||
if (r)
|
||||
return false;
|
||||
if (p3reloc.type != RADEON_PACKET_TYPE3)
|
||||
return false;
|
||||
if (p3reloc.opcode != RADEON_PACKET3_NOP)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_cs_dump_packet() - dump raw packet context
|
||||
* @p: structure holding the parser context.
|
||||
* @pkt: structure holding the packet.
|
||||
*
|
||||
* Used mostly for debugging and error reporting.
|
||||
**/
|
||||
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt)
|
||||
{
|
||||
volatile uint32_t *ib;
|
||||
unsigned i;
|
||||
unsigned idx;
|
||||
|
||||
ib = p->ib.ptr;
|
||||
idx = pkt->idx;
|
||||
for (i = 0; i <= (pkt->count + 1); i++, idx++)
|
||||
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @data: pointer to relocation data
|
||||
* @offset_start: starting offset
|
||||
* @offset_mask: offset mask (to align start offset on)
|
||||
* @reloc: reloc informations
|
||||
*
|
||||
* Check if next packet is relocation packet3, do bo validation and compute
|
||||
* GPU offset using the provided start.
|
||||
**/
|
||||
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc,
|
||||
int nomm)
|
||||
{
|
||||
struct radeon_cs_chunk *relocs_chunk;
|
||||
struct radeon_cs_packet p3reloc;
|
||||
unsigned idx;
|
||||
int r;
|
||||
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
DRM_ERROR("No relocation chunk !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
*cs_reloc = NULL;
|
||||
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
|
||||
if (r)
|
||||
return r;
|
||||
p->idx += p3reloc.count + 2;
|
||||
if (p3reloc.type != RADEON_PACKET_TYPE3 ||
|
||||
p3reloc.opcode != RADEON_PACKET3_NOP) {
|
||||
DRM_ERROR("No packet3 for relocation for packet at %d.\n",
|
||||
p3reloc.idx);
|
||||
radeon_cs_dump_packet(p, &p3reloc);
|
||||
return -EINVAL;
|
||||
}
|
||||
idx = radeon_get_ib_value(p, p3reloc.idx + 1);
|
||||
if (idx >= relocs_chunk->length_dw) {
|
||||
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
|
||||
idx, relocs_chunk->length_dw);
|
||||
radeon_cs_dump_packet(p, &p3reloc);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* FIXME: we assume reloc size is 4 dwords */
|
||||
if (nomm) {
|
||||
*cs_reloc = p->relocs;
|
||||
(*cs_reloc)->lobj.gpu_offset =
|
||||
(u64)relocs_chunk->kdata[idx + 3] << 32;
|
||||
(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
|
||||
} else
|
||||
*cs_reloc = p->relocs_ptr[(idx / 4)];
|
||||
return 0;
|
||||
}
|
||||
|
@ -93,6 +93,7 @@ static const char radeon_family_name[][16] = {
|
||||
"TAHITI",
|
||||
"PITCAIRN",
|
||||
"VERDE",
|
||||
"OLAND",
|
||||
"LAST",
|
||||
};
|
||||
|
||||
|
@ -123,15 +123,25 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
|
||||
int flags);
|
||||
struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf);
|
||||
extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
int radeon_debugfs_init(struct drm_minor *minor);
|
||||
void radeon_debugfs_cleanup(struct drm_minor *minor);
|
||||
#endif
|
||||
|
||||
/* atpx handler */
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
void radeon_register_atpx_handler(void);
|
||||
void radeon_unregister_atpx_handler(void);
|
||||
#else
|
||||
static inline void radeon_register_atpx_handler(void) {}
|
||||
static inline void radeon_unregister_atpx_handler(void) {}
|
||||
#endif
|
||||
|
||||
int radeon_no_wb;
|
||||
int radeon_modeset = -1;
|
||||
int radeon_modeset = 1;
|
||||
int radeon_dynclks = -1;
|
||||
int radeon_r4xx_atom = 0;
|
||||
int radeon_agpmode = 0;
|
||||
@ -199,6 +209,14 @@ module_param_named(msi, radeon_msi, int, 0444);
|
||||
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
|
||||
module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
radeon_PCI_IDS
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
|
||||
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
@ -227,14 +245,6 @@ static int radeon_resume(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
radeon_PCI_IDS
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DRM_RADEON_KMS)
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
#endif
|
||||
|
||||
static const struct file_operations radeon_driver_old_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
@ -284,6 +294,8 @@ static struct drm_driver driver_old = {
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static struct drm_driver kms_driver;
|
||||
|
||||
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
|
||||
@ -411,10 +423,12 @@ static struct drm_driver kms_driver = {
|
||||
static struct drm_driver *driver;
|
||||
static struct pci_driver *pdriver;
|
||||
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
static struct pci_driver radeon_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
};
|
||||
#endif
|
||||
|
||||
static struct pci_driver radeon_kms_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
@ -427,28 +441,6 @@ static struct pci_driver radeon_kms_pci_driver = {
|
||||
|
||||
static int __init radeon_init(void)
|
||||
{
|
||||
driver = &driver_old;
|
||||
pdriver = &radeon_pci_driver;
|
||||
driver->num_ioctls = radeon_max_ioctl;
|
||||
#ifdef CONFIG_VGA_CONSOLE
|
||||
if (vgacon_text_force() && radeon_modeset == -1) {
|
||||
DRM_INFO("VGACON disable radeon kernel modesetting.\n");
|
||||
driver = &driver_old;
|
||||
pdriver = &radeon_pci_driver;
|
||||
driver->driver_features &= ~DRIVER_MODESET;
|
||||
radeon_modeset = 0;
|
||||
}
|
||||
#endif
|
||||
/* if enabled by default */
|
||||
if (radeon_modeset == -1) {
|
||||
#ifdef CONFIG_DRM_RADEON_KMS
|
||||
DRM_INFO("radeon defaulting to kernel modesetting.\n");
|
||||
radeon_modeset = 1;
|
||||
#else
|
||||
DRM_INFO("radeon defaulting to userspace modesetting.\n");
|
||||
radeon_modeset = 0;
|
||||
#endif
|
||||
}
|
||||
if (radeon_modeset == 1) {
|
||||
DRM_INFO("radeon kernel modesetting enabled.\n");
|
||||
driver = &kms_driver;
|
||||
@ -456,9 +448,21 @@ static int __init radeon_init(void)
|
||||
driver->driver_features |= DRIVER_MODESET;
|
||||
driver->num_ioctls = radeon_max_kms_ioctl;
|
||||
radeon_register_atpx_handler();
|
||||
|
||||
} else {
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
DRM_INFO("radeon userspace modesetting enabled.\n");
|
||||
driver = &driver_old;
|
||||
pdriver = &radeon_pci_driver;
|
||||
driver->driver_features &= ~DRIVER_MODESET;
|
||||
driver->num_ioctls = radeon_max_ioctl;
|
||||
#else
|
||||
DRM_ERROR("No UMS support in radeon module!\n");
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
/* if the vga console setting is enabled still
|
||||
* let modprobe override it */
|
||||
|
||||
/* let modprobe override vga console setting */
|
||||
return drm_pci_init(driver, pdriver);
|
||||
}
|
||||
|
||||
|
@ -113,6 +113,9 @@
|
||||
#define DRIVER_MINOR 33
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
/* The rest of the file is DEPRECATED! */
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
|
||||
enum radeon_cp_microcode_version {
|
||||
UCODE_R100,
|
||||
UCODE_R200,
|
||||
@ -418,8 +421,6 @@ extern int radeon_driver_open(struct drm_device *dev,
|
||||
struct drm_file *file_priv);
|
||||
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
|
||||
extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
|
||||
@ -462,15 +463,6 @@ extern void r600_blit_swap(struct drm_device *dev,
|
||||
int sx, int sy, int dx, int dy,
|
||||
int w, int h, int src_pitch, int dst_pitch, int cpp);
|
||||
|
||||
/* atpx handler */
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
void radeon_register_atpx_handler(void);
|
||||
void radeon_unregister_atpx_handler(void);
|
||||
#else
|
||||
static inline void radeon_register_atpx_handler(void) {}
|
||||
static inline void radeon_unregister_atpx_handler(void) {}
|
||||
#endif
|
||||
|
||||
/* Flags for stats.boxes
|
||||
*/
|
||||
#define RADEON_BOX_DMA_IDLE 0x1
|
||||
@ -2167,4 +2159,6 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
|
||||
} while (0)
|
||||
|
||||
|
||||
#endif /* CONFIG_DRM_RADEON_UMS */
|
||||
|
||||
#endif /* __RADEON_DRV_H__ */
|
||||
|
@ -91,6 +91,7 @@ enum radeon_family {
|
||||
CHIP_TAHITI,
|
||||
CHIP_PITCAIRN,
|
||||
CHIP_VERDE,
|
||||
CHIP_OLAND,
|
||||
CHIP_LAST,
|
||||
};
|
||||
|
||||
|
@ -929,6 +929,7 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
|
||||
*/
|
||||
static int radeon_vm_update_pdes(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t start, uint64_t end)
|
||||
{
|
||||
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
|
||||
@ -971,7 +972,7 @@ retry:
|
||||
((last_pt + incr * count) != pt)) {
|
||||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, last_pde,
|
||||
radeon_asic_vm_set_page(rdev, ib, last_pde,
|
||||
last_pt, count, incr,
|
||||
RADEON_VM_PAGE_VALID);
|
||||
}
|
||||
@ -985,7 +986,7 @@ retry:
|
||||
}
|
||||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
|
||||
radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
|
||||
incr, RADEON_VM_PAGE_VALID);
|
||||
|
||||
}
|
||||
@ -1009,6 +1010,7 @@ retry:
|
||||
*/
|
||||
static void radeon_vm_update_ptes(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t start, uint64_t end,
|
||||
uint64_t dst, uint32_t flags)
|
||||
{
|
||||
@ -1038,7 +1040,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
|
||||
if ((last_pte + 8 * count) != pte) {
|
||||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, last_pte,
|
||||
radeon_asic_vm_set_page(rdev, ib, last_pte,
|
||||
last_dst, count,
|
||||
RADEON_GPU_PAGE_SIZE,
|
||||
flags);
|
||||
@ -1056,7 +1058,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
|
||||
}
|
||||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
|
||||
radeon_asic_vm_set_page(rdev, ib, last_pte,
|
||||
last_dst, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags);
|
||||
}
|
||||
}
|
||||
@ -1080,8 +1083,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
unsigned ridx = rdev->asic->vm.pt_ring_index;
|
||||
struct radeon_ring *ring = &rdev->ring[ridx];
|
||||
struct radeon_semaphore *sem = NULL;
|
||||
struct radeon_ib ib;
|
||||
struct radeon_bo_va *bo_va;
|
||||
unsigned nptes, npdes, ndw;
|
||||
uint64_t addr;
|
||||
@ -1124,25 +1126,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
||||
bo_va->valid = false;
|
||||
}
|
||||
|
||||
if (vm->fence && radeon_fence_signaled(vm->fence)) {
|
||||
radeon_fence_unref(&vm->fence);
|
||||
}
|
||||
|
||||
if (vm->fence && vm->fence->ring != ridx) {
|
||||
r = radeon_semaphore_create(rdev, &sem);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
nptes = radeon_bo_ngpu_pages(bo);
|
||||
|
||||
/* assume two extra pdes in case the mapping overlaps the borders */
|
||||
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
|
||||
|
||||
/* estimate number of dw needed */
|
||||
/* semaphore, fence and padding */
|
||||
ndw = 32;
|
||||
/* padding, etc. */
|
||||
ndw = 64;
|
||||
|
||||
if (RADEON_VM_BLOCK_SIZE > 11)
|
||||
/* reserve space for one header for every 2k dwords */
|
||||
@ -1161,33 +1151,31 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
|
||||
/* reserve space for pde addresses */
|
||||
ndw += npdes * 2;
|
||||
|
||||
r = radeon_ring_lock(rdev, ring, ndw);
|
||||
/* update too big for an IB */
|
||||
if (ndw > 0xfffff)
|
||||
return -ENOMEM;
|
||||
|
||||
r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
|
||||
ib.length_dw = 0;
|
||||
|
||||
r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
|
||||
radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
|
||||
radeon_fence_note_sync(vm->fence, ridx);
|
||||
}
|
||||
|
||||
r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
|
||||
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
|
||||
addr, bo_va->flags);
|
||||
|
||||
radeon_fence_unref(&vm->fence);
|
||||
r = radeon_fence_emit(rdev, &vm->fence, ridx);
|
||||
radeon_ib_sync_to(&ib, vm->fence);
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r) {
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
radeon_semaphore_free(rdev, &sem, vm->fence);
|
||||
radeon_fence_unref(&vm->fence);
|
||||
vm->fence = radeon_fence_ref(ib.fence);
|
||||
radeon_ib_free(rdev, &ib);
|
||||
radeon_fence_unref(&vm->last_flush);
|
||||
|
||||
return 0;
|
||||
|
@ -28,6 +28,8 @@
|
||||
* Authors:
|
||||
* Keith Whitwell <keith@tungstengraphics.com>
|
||||
* Michel D<EFBFBD>zer <michel@daenzer.net>
|
||||
*
|
||||
* ------------------------ This file is DEPRECATED! -------------------------
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
@ -27,6 +27,8 @@
|
||||
*
|
||||
* Authors:
|
||||
* Keith Whitwell <keith@tungstengraphics.com>
|
||||
*
|
||||
* ------------------------ This file is DEPRECATED! -------------------------
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
@ -3706,4 +3706,19 @@
|
||||
|
||||
#define RV530_GB_PIPE_SELECT2 0x4124
|
||||
|
||||
#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
|
||||
#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
|
||||
#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
|
||||
#define RADEON_PACKET_TYPE0 0
|
||||
#define RADEON_PACKET_TYPE1 1
|
||||
#define RADEON_PACKET_TYPE2 2
|
||||
#define RADEON_PACKET_TYPE3 3
|
||||
|
||||
#define RADEON_PACKET3_NOP 0x10
|
||||
|
||||
#define RADEON_VLINE_STAT (1 << 12)
|
||||
|
||||
#endif
|
||||
|
@ -108,6 +108,25 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
radeon_fence_unref(&ib->fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_sync_to - sync to fence before executing the IB
|
||||
*
|
||||
* @ib: IB object to add fence to
|
||||
* @fence: fence to sync to
|
||||
*
|
||||
* Sync to the fence before executing the IB
|
||||
*/
|
||||
void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
|
||||
{
|
||||
struct radeon_fence *other;
|
||||
|
||||
if (!fence)
|
||||
return;
|
||||
|
||||
other = ib->sync_to[fence->ring];
|
||||
ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
|
||||
*
|
||||
|
@ -25,6 +25,8 @@
|
||||
* Authors:
|
||||
* Gareth Hughes <gareth@valinux.com>
|
||||
* Kevin E. Martin <martin@valinux.com>
|
||||
*
|
||||
* ------------------------ This file is DEPRECATED! -------------------------
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
@ -205,17 +205,6 @@
|
||||
REG_SET(PACKET3_IT_OPCODE, (op)) | \
|
||||
REG_SET(PACKET3_COUNT, (n)))
|
||||
|
||||
#define PACKET_TYPE0 0
|
||||
#define PACKET_TYPE1 1
|
||||
#define PACKET_TYPE2 2
|
||||
#define PACKET_TYPE3 3
|
||||
|
||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
|
||||
#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
|
||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
|
||||
/* Registers */
|
||||
#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
|
||||
#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
|
||||
|
@ -38,6 +38,7 @@
|
||||
#define SI_CE_UCODE_SIZE 2144
|
||||
#define SI_RLC_UCODE_SIZE 2048
|
||||
#define SI_MC_UCODE_SIZE 7769
|
||||
#define OLAND_MC_UCODE_SIZE 7863
|
||||
|
||||
MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/TAHITI_me.bin");
|
||||
@ -54,6 +55,11 @@ MODULE_FIRMWARE("radeon/VERDE_me.bin");
|
||||
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
|
||||
MODULE_FIRMWARE("radeon/VERDE_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_me.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_ce.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
|
||||
|
||||
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
|
||||
extern void r600_ih_ring_fini(struct radeon_device *rdev);
|
||||
@ -61,6 +67,8 @@ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
|
||||
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
||||
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
|
||||
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
|
||||
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
|
||||
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
|
||||
|
||||
/* get temperature in millidegrees */
|
||||
int si_get_temp(struct radeon_device *rdev)
|
||||
@ -200,6 +208,45 @@ static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
|
||||
{0x0000009f, 0x00a37400}
|
||||
};
|
||||
|
||||
static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
|
||||
{0x0000006f, 0x03044000},
|
||||
{0x00000070, 0x0480c018},
|
||||
{0x00000071, 0x00000040},
|
||||
{0x00000072, 0x01000000},
|
||||
{0x00000074, 0x000000ff},
|
||||
{0x00000075, 0x00143400},
|
||||
{0x00000076, 0x08ec0800},
|
||||
{0x00000077, 0x040000cc},
|
||||
{0x00000079, 0x00000000},
|
||||
{0x0000007a, 0x21000409},
|
||||
{0x0000007c, 0x00000000},
|
||||
{0x0000007d, 0xe8000000},
|
||||
{0x0000007e, 0x044408a8},
|
||||
{0x0000007f, 0x00000003},
|
||||
{0x00000080, 0x00000000},
|
||||
{0x00000081, 0x01000000},
|
||||
{0x00000082, 0x02000000},
|
||||
{0x00000083, 0x00000000},
|
||||
{0x00000084, 0xe3f3e4f4},
|
||||
{0x00000085, 0x00052024},
|
||||
{0x00000087, 0x00000000},
|
||||
{0x00000088, 0x66036603},
|
||||
{0x00000089, 0x01000000},
|
||||
{0x0000008b, 0x1c0a0000},
|
||||
{0x0000008c, 0xff010000},
|
||||
{0x0000008e, 0xffffefff},
|
||||
{0x0000008f, 0xfff3efff},
|
||||
{0x00000090, 0xfff3efbf},
|
||||
{0x00000094, 0x00101101},
|
||||
{0x00000095, 0x00000fff},
|
||||
{0x00000096, 0x00116fff},
|
||||
{0x00000097, 0x60010000},
|
||||
{0x00000098, 0x10010000},
|
||||
{0x00000099, 0x00006000},
|
||||
{0x0000009a, 0x00001000},
|
||||
{0x0000009f, 0x00a17730}
|
||||
};
|
||||
|
||||
/* ucode loading */
|
||||
static int si_mc_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
@ -228,6 +275,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
|
||||
ucode_size = SI_MC_UCODE_SIZE;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
io_mc_regs = (u32 *)&oland_io_mc_regs;
|
||||
ucode_size = OLAND_MC_UCODE_SIZE;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
}
|
||||
|
||||
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
|
||||
@ -322,6 +374,15 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
|
||||
mc_req_size = SI_MC_UCODE_SIZE * 4;
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
chip_name = "OLAND";
|
||||
rlc_chip_name = "OLAND";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
me_req_size = SI_PM4_UCODE_SIZE * 4;
|
||||
ce_req_size = SI_CE_UCODE_SIZE * 4;
|
||||
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
|
||||
mc_req_size = OLAND_MC_UCODE_SIZE * 4;
|
||||
break;
|
||||
default: BUG();
|
||||
}
|
||||
|
||||
@ -1125,7 +1186,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
|
||||
}
|
||||
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
|
||||
}
|
||||
} else if (rdev->family == CHIP_VERDE) {
|
||||
} else if ((rdev->family == CHIP_VERDE) ||
|
||||
(rdev->family == CHIP_OLAND)) {
|
||||
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
|
||||
switch (reg_offset) {
|
||||
case 0: /* non-AA compressed depth or any compressed stencil */
|
||||
@ -1564,6 +1626,23 @@ static void si_gpu_init(struct radeon_device *rdev)
|
||||
rdev->config.si.max_gs_threads = 32;
|
||||
rdev->config.si.max_hw_contexts = 8;
|
||||
|
||||
rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
|
||||
rdev->config.si.sc_prim_fifo_size_backend = 0x40;
|
||||
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
|
||||
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
|
||||
gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
rdev->config.si.max_shader_engines = 1;
|
||||
rdev->config.si.max_tile_pipes = 4;
|
||||
rdev->config.si.max_cu_per_sh = 6;
|
||||
rdev->config.si.max_sh_per_se = 1;
|
||||
rdev->config.si.max_backends_per_se = 2;
|
||||
rdev->config.si.max_texture_channel_caches = 4;
|
||||
rdev->config.si.max_gprs = 256;
|
||||
rdev->config.si.max_gs_threads = 16;
|
||||
rdev->config.si.max_hw_contexts = 8;
|
||||
|
||||
rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
|
||||
rdev->config.si.sc_prim_fifo_size_backend = 0x40;
|
||||
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
|
||||
@ -2106,18 +2185,241 @@ static int si_cp_resume(struct radeon_device *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 srbm_status;
|
||||
u32 grbm_status, grbm_status2;
|
||||
u32 grbm_status_se0, grbm_status_se1;
|
||||
u32 reset_mask = 0;
|
||||
u32 tmp;
|
||||
|
||||
srbm_status = RREG32(SRBM_STATUS);
|
||||
grbm_status = RREG32(GRBM_STATUS);
|
||||
grbm_status2 = RREG32(GRBM_STATUS2);
|
||||
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
|
||||
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
|
||||
if (!(grbm_status & GUI_ACTIVE)) {
|
||||
/* GRBM_STATUS */
|
||||
tmp = RREG32(GRBM_STATUS);
|
||||
if (tmp & (PA_BUSY | SC_BUSY |
|
||||
BCI_BUSY | SX_BUSY |
|
||||
TA_BUSY | VGT_BUSY |
|
||||
DB_BUSY | CB_BUSY |
|
||||
GDS_BUSY | SPI_BUSY |
|
||||
IA_BUSY | IA_BUSY_NO_DMA))
|
||||
reset_mask |= RADEON_RESET_GFX;
|
||||
|
||||
if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
|
||||
CP_BUSY | CP_COHERENCY_BUSY))
|
||||
reset_mask |= RADEON_RESET_CP;
|
||||
|
||||
if (tmp & GRBM_EE_BUSY)
|
||||
reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
|
||||
|
||||
/* GRBM_STATUS2 */
|
||||
tmp = RREG32(GRBM_STATUS2);
|
||||
if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
|
||||
reset_mask |= RADEON_RESET_RLC;
|
||||
|
||||
/* DMA_STATUS_REG 0 */
|
||||
tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
|
||||
if (!(tmp & DMA_IDLE))
|
||||
reset_mask |= RADEON_RESET_DMA;
|
||||
|
||||
/* DMA_STATUS_REG 1 */
|
||||
tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
|
||||
if (!(tmp & DMA_IDLE))
|
||||
reset_mask |= RADEON_RESET_DMA1;
|
||||
|
||||
/* SRBM_STATUS2 */
|
||||
tmp = RREG32(SRBM_STATUS2);
|
||||
if (tmp & DMA_BUSY)
|
||||
reset_mask |= RADEON_RESET_DMA;
|
||||
|
||||
if (tmp & DMA1_BUSY)
|
||||
reset_mask |= RADEON_RESET_DMA1;
|
||||
|
||||
/* SRBM_STATUS */
|
||||
tmp = RREG32(SRBM_STATUS);
|
||||
|
||||
if (tmp & IH_BUSY)
|
||||
reset_mask |= RADEON_RESET_IH;
|
||||
|
||||
if (tmp & SEM_BUSY)
|
||||
reset_mask |= RADEON_RESET_SEM;
|
||||
|
||||
if (tmp & GRBM_RQ_PENDING)
|
||||
reset_mask |= RADEON_RESET_GRBM;
|
||||
|
||||
if (tmp & VMC_BUSY)
|
||||
reset_mask |= RADEON_RESET_VMC;
|
||||
|
||||
if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
|
||||
MCC_BUSY | MCD_BUSY))
|
||||
reset_mask |= RADEON_RESET_MC;
|
||||
|
||||
if (evergreen_is_display_hung(rdev))
|
||||
reset_mask |= RADEON_RESET_DISPLAY;
|
||||
|
||||
/* VM_L2_STATUS */
|
||||
tmp = RREG32(VM_L2_STATUS);
|
||||
if (tmp & L2_BUSY)
|
||||
reset_mask |= RADEON_RESET_VMC;
|
||||
|
||||
return reset_mask;
|
||||
}
|
||||
|
||||
static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||
{
|
||||
struct evergreen_mc_save save;
|
||||
u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
|
||||
u32 tmp;
|
||||
|
||||
if (reset_mask == 0)
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
|
||||
|
||||
evergreen_print_gpu_status_regs(rdev);
|
||||
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
|
||||
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
|
||||
|
||||
/* Disable CP parsing/prefetching */
|
||||
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA) {
|
||||
/* dma0 */
|
||||
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
|
||||
}
|
||||
if (reset_mask & RADEON_RESET_DMA1) {
|
||||
/* dma1 */
|
||||
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
|
||||
}
|
||||
|
||||
udelay(50);
|
||||
|
||||
evergreen_mc_stop(rdev, &save);
|
||||
if (evergreen_mc_wait_for_idle(rdev)) {
|
||||
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
||||
}
|
||||
|
||||
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
|
||||
grbm_soft_reset = SOFT_RESET_CB |
|
||||
SOFT_RESET_DB |
|
||||
SOFT_RESET_GDS |
|
||||
SOFT_RESET_PA |
|
||||
SOFT_RESET_SC |
|
||||
SOFT_RESET_BCI |
|
||||
SOFT_RESET_SPI |
|
||||
SOFT_RESET_SX |
|
||||
SOFT_RESET_TC |
|
||||
SOFT_RESET_TA |
|
||||
SOFT_RESET_VGT |
|
||||
SOFT_RESET_IA;
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_CP) {
|
||||
grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
|
||||
|
||||
srbm_soft_reset |= SOFT_RESET_GRBM;
|
||||
}
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA)
|
||||
srbm_soft_reset |= SOFT_RESET_DMA;
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA1)
|
||||
srbm_soft_reset |= SOFT_RESET_DMA1;
|
||||
|
||||
if (reset_mask & RADEON_RESET_DISPLAY)
|
||||
srbm_soft_reset |= SOFT_RESET_DC;
|
||||
|
||||
if (reset_mask & RADEON_RESET_RLC)
|
||||
grbm_soft_reset |= SOFT_RESET_RLC;
|
||||
|
||||
if (reset_mask & RADEON_RESET_SEM)
|
||||
srbm_soft_reset |= SOFT_RESET_SEM;
|
||||
|
||||
if (reset_mask & RADEON_RESET_IH)
|
||||
srbm_soft_reset |= SOFT_RESET_IH;
|
||||
|
||||
if (reset_mask & RADEON_RESET_GRBM)
|
||||
srbm_soft_reset |= SOFT_RESET_GRBM;
|
||||
|
||||
if (reset_mask & RADEON_RESET_VMC)
|
||||
srbm_soft_reset |= SOFT_RESET_VMC;
|
||||
|
||||
if (reset_mask & RADEON_RESET_MC)
|
||||
srbm_soft_reset |= SOFT_RESET_MC;
|
||||
|
||||
if (grbm_soft_reset) {
|
||||
tmp = RREG32(GRBM_SOFT_RESET);
|
||||
tmp |= grbm_soft_reset;
|
||||
dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(GRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(GRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~grbm_soft_reset;
|
||||
WREG32(GRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(GRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
tmp |= srbm_soft_reset;
|
||||
dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
udelay(50);
|
||||
|
||||
tmp &= ~srbm_soft_reset;
|
||||
WREG32(SRBM_SOFT_RESET, tmp);
|
||||
tmp = RREG32(SRBM_SOFT_RESET);
|
||||
}
|
||||
|
||||
/* Wait a little for things to settle down */
|
||||
udelay(50);
|
||||
|
||||
evergreen_mc_resume(rdev, &save);
|
||||
udelay(50);
|
||||
|
||||
evergreen_print_gpu_status_regs(rdev);
|
||||
}
|
||||
|
||||
int si_asic_reset(struct radeon_device *rdev)
|
||||
{
|
||||
u32 reset_mask;
|
||||
|
||||
reset_mask = si_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (reset_mask)
|
||||
r600_set_bios_scratch_engine_hung(rdev, true);
|
||||
|
||||
si_gpu_soft_reset(rdev, reset_mask);
|
||||
|
||||
reset_mask = si_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!reset_mask)
|
||||
r600_set_bios_scratch_engine_hung(rdev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* si_gfx_is_lockup - Check if the GFX engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the GFX engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 reset_mask = si_gpu_check_soft_reset(rdev);
|
||||
|
||||
if (!(reset_mask & (RADEON_RESET_GFX |
|
||||
RADEON_RESET_COMPUTE |
|
||||
RADEON_RESET_CP))) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
@ -2126,134 +2428,32 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
|
||||
/**
|
||||
* si_dma_is_lockup - Check if the DMA engine is locked up
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: radeon_ring structure holding ring information
|
||||
*
|
||||
* Check if the async DMA engine is locked up.
|
||||
* Returns true if the engine appears to be locked up, false if not.
|
||||
*/
|
||||
bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
u32 grbm_reset = 0;
|
||||
u32 reset_mask = si_gpu_check_soft_reset(rdev);
|
||||
u32 mask;
|
||||
|
||||
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
|
||||
return;
|
||||
if (ring->idx == R600_RING_TYPE_DMA_INDEX)
|
||||
mask = RADEON_RESET_DMA;
|
||||
else
|
||||
mask = RADEON_RESET_DMA1;
|
||||
|
||||
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
|
||||
RREG32(GRBM_STATUS));
|
||||
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
|
||||
RREG32(GRBM_STATUS2));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE0));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE1));
|
||||
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
|
||||
RREG32(SRBM_STATUS));
|
||||
|
||||
/* Disable CP parsing/prefetching */
|
||||
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
|
||||
|
||||
/* reset all the gfx blocks */
|
||||
grbm_reset = (SOFT_RESET_CP |
|
||||
SOFT_RESET_CB |
|
||||
SOFT_RESET_DB |
|
||||
SOFT_RESET_GDS |
|
||||
SOFT_RESET_PA |
|
||||
SOFT_RESET_SC |
|
||||
SOFT_RESET_BCI |
|
||||
SOFT_RESET_SPI |
|
||||
SOFT_RESET_SX |
|
||||
SOFT_RESET_TC |
|
||||
SOFT_RESET_TA |
|
||||
SOFT_RESET_VGT |
|
||||
SOFT_RESET_IA);
|
||||
|
||||
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
|
||||
WREG32(GRBM_SOFT_RESET, grbm_reset);
|
||||
(void)RREG32(GRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(GRBM_SOFT_RESET, 0);
|
||||
(void)RREG32(GRBM_SOFT_RESET);
|
||||
|
||||
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
|
||||
RREG32(GRBM_STATUS));
|
||||
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
|
||||
RREG32(GRBM_STATUS2));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE0));
|
||||
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
|
||||
RREG32(GRBM_STATUS_SE1));
|
||||
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
|
||||
RREG32(SRBM_STATUS));
|
||||
}
|
||||
|
||||
static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
|
||||
{
|
||||
u32 tmp;
|
||||
|
||||
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
|
||||
return;
|
||||
|
||||
dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG));
|
||||
|
||||
/* dma0 */
|
||||
tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
|
||||
|
||||
/* dma1 */
|
||||
tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
|
||||
tmp &= ~DMA_RB_ENABLE;
|
||||
WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
|
||||
RREG32(DMA_STATUS_REG));
|
||||
}
|
||||
|
||||
static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
||||
{
|
||||
struct evergreen_mc_save save;
|
||||
|
||||
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
|
||||
reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
|
||||
|
||||
if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
|
||||
reset_mask &= ~RADEON_RESET_DMA;
|
||||
|
||||
if (reset_mask == 0)
|
||||
return 0;
|
||||
|
||||
dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
|
||||
|
||||
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
|
||||
RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
|
||||
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
|
||||
|
||||
evergreen_mc_stop(rdev, &save);
|
||||
if (radeon_mc_wait_for_idle(rdev)) {
|
||||
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
|
||||
if (!(reset_mask & mask)) {
|
||||
radeon_ring_lockup_update(ring);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
|
||||
si_gpu_soft_reset_gfx(rdev);
|
||||
|
||||
if (reset_mask & RADEON_RESET_DMA)
|
||||
si_gpu_soft_reset_dma(rdev);
|
||||
|
||||
/* Wait a little for things to settle down */
|
||||
udelay(50);
|
||||
|
||||
evergreen_mc_resume(rdev, &save);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int si_asic_reset(struct radeon_device *rdev)
|
||||
{
|
||||
return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
|
||||
RADEON_RESET_COMPUTE |
|
||||
RADEON_RESET_DMA));
|
||||
/* force ring activities */
|
||||
radeon_ring_force_activity(rdev, ring);
|
||||
return radeon_ring_test_lockup(rdev, ring);
|
||||
}
|
||||
|
||||
/* MC */
|
||||
@ -2855,19 +3055,19 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
|
||||
do {
|
||||
pkt.idx = idx;
|
||||
pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
|
||||
pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
|
||||
pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
|
||||
pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
|
||||
pkt.one_reg_wr = 0;
|
||||
switch (pkt.type) {
|
||||
case PACKET_TYPE0:
|
||||
case RADEON_PACKET_TYPE0:
|
||||
dev_err(rdev->dev, "Packet0 not allowed!\n");
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case PACKET_TYPE2:
|
||||
case RADEON_PACKET_TYPE2:
|
||||
idx += 1;
|
||||
break;
|
||||
case PACKET_TYPE3:
|
||||
pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
|
||||
case RADEON_PACKET_TYPE3:
|
||||
pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
|
||||
if (ib->is_const_ib)
|
||||
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
|
||||
else {
|
||||
@ -2920,19 +3120,21 @@ void si_vm_fini(struct radeon_device *rdev)
|
||||
* si_vm_set_page - update the page tables using the CP
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update the page tables using the CP (cayman-si).
|
||||
* Update the page tables using the CP (SI).
|
||||
*/
|
||||
void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
void si_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
|
||||
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
@ -2943,11 +3145,11 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
if (ndw > 0x3FFE)
|
||||
ndw = 0x3FFE;
|
||||
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
|
||||
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
||||
WRITE_DATA_DST_SEL(1)));
|
||||
radeon_ring_write(ring, pe);
|
||||
radeon_ring_write(ring, upper_32_bits(pe));
|
||||
ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
|
||||
ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
|
||||
WRITE_DATA_DST_SEL(1));
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
@ -2959,8 +3161,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
radeon_ring_write(ring, value);
|
||||
radeon_ring_write(ring, upper_32_bits(value));
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -2972,9 +3174,9 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
|
||||
radeon_ring_write(ring, pe);
|
||||
radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & RADEON_VM_PAGE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
@ -2986,8 +3188,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
}
|
||||
addr += incr;
|
||||
value |= r600_flags;
|
||||
radeon_ring_write(ring, value);
|
||||
radeon_ring_write(ring, upper_32_bits(value));
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -3001,20 +3203,22 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
|
||||
else
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
|
||||
radeon_ring_write(ring, pe); /* dst addr */
|
||||
radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
|
||||
radeon_ring_write(ring, r600_flags); /* mask */
|
||||
radeon_ring_write(ring, 0);
|
||||
radeon_ring_write(ring, value); /* value */
|
||||
radeon_ring_write(ring, upper_32_bits(value));
|
||||
radeon_ring_write(ring, incr); /* increment size */
|
||||
radeon_ring_write(ring, 0);
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = r600_flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,6 +61,14 @@
|
||||
#define DMIF_ADDR_CONFIG 0xBD4
|
||||
|
||||
#define SRBM_STATUS 0xE50
|
||||
#define GRBM_RQ_PENDING (1 << 5)
|
||||
#define VMC_BUSY (1 << 8)
|
||||
#define MCB_BUSY (1 << 9)
|
||||
#define MCB_NON_DISPLAY_BUSY (1 << 10)
|
||||
#define MCC_BUSY (1 << 11)
|
||||
#define MCD_BUSY (1 << 12)
|
||||
#define SEM_BUSY (1 << 14)
|
||||
#define IH_BUSY (1 << 17)
|
||||
|
||||
#define SRBM_SOFT_RESET 0x0E60
|
||||
#define SOFT_RESET_BIF (1 << 1)
|
||||
@ -81,6 +89,10 @@
|
||||
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
|
||||
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
|
||||
|
||||
#define SRBM_STATUS2 0x0EC4
|
||||
#define DMA_BUSY (1 << 5)
|
||||
#define DMA1_BUSY (1 << 6)
|
||||
|
||||
#define VM_L2_CNTL 0x1400
|
||||
#define ENABLE_L2_CACHE (1 << 0)
|
||||
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
|
||||
@ -783,16 +795,7 @@
|
||||
/*
|
||||
* PM4
|
||||
*/
|
||||
#define PACKET_TYPE0 0
|
||||
#define PACKET_TYPE1 1
|
||||
#define PACKET_TYPE2 2
|
||||
#define PACKET_TYPE3 3
|
||||
|
||||
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
|
||||
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
|
||||
#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
|
||||
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
|
||||
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
|
||||
#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
|
||||
(((reg) >> 2) & 0xFFFF) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
#define CP_PACKET2 0x80000000
|
||||
@ -801,7 +804,7 @@
|
||||
|
||||
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
|
||||
|
||||
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
|
||||
#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
|
||||
(((op) & 0xFF) << 8) | \
|
||||
((n) & 0x3FFF) << 16)
|
||||
|
||||
|
@ -139,6 +139,19 @@
|
||||
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||
|
Loading…
Reference in New Issue
Block a user