mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 13:13:57 +08:00
Merge branch 'drm-next-3.17' of git://people.freedesktop.org/~agd5f/linux into drm-next
- Additional Hawaii fixes - Support for using the display scaler on non-fixed mode displays - Support for new firmware format that makes it easier to update - Enable dpm by default on additional asics - GPUVM improvements - Support for uncached and write combined gtt buffers - Allow allocation of BOs larger than visible vram - Various other small fixes and improvements * 'drm-next-3.17' of git://people.freedesktop.org/~agd5f/linux: (57 commits) drm/radeon: Prevent hdmi deep color if max_tmds_clock is undefined. drm/radeon: Use pflip irqs for pageflip completion if possible. (v2) drm/radeon: tweak ACCEL_WORKING2 query for the new firmware for hawaii drm/radeon: use packet3 for nop on hawaii with new firmware drm/radeon: tweak ACCEL_WORKING2 query for hawaii drm/radeon: use packet2 for nop on hawaii with old firmware drm/radeon: update IB size estimation for VM drm/radeon: split PT setup in more functions drm/radeon: add VM GART copy optimization to NI as well drm/radeon: take a BO reference on VM cleanup drm/radeon: add radeon_bo_ref function drm/radeon: remove taking mclk_lock from radeon_bo_unref drm/radeon: adjust default radeon_vm_block_size v2 drm/radeon: try to enable VM flushing once more drm/radeon: use an intervall tree to manage the VMA v2 drm/radeon: remove radeon_bo_clear_va drm/radeon: invalidate moved BOs in the VM (v2) drm/radeon: re-enable dpm by default on BTC drm/radeon: re-enable dpm by default on cayman drm/radeon: Only flush HDP cache from idle ioctl if BO is in VRAM ...
This commit is contained in:
commit
f7257a224e
@ -114,6 +114,7 @@ config DRM_RADEON
|
||||
select POWER_SUPPLY
|
||||
select HWMON
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
select INTERVAL_TREE
|
||||
help
|
||||
Choose this option if you have an ATI Radeon graphics card. There
|
||||
are both PCI and AGP versions. You don't need to choose this to
|
||||
|
@ -80,7 +80,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
|
||||
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
|
||||
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
|
||||
trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
|
||||
ci_dpm.o dce6_afmt.o radeon_vm.o
|
||||
ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o
|
||||
|
||||
# add async DMA block
|
||||
radeon-y += \
|
||||
|
@ -331,12 +331,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
|
||||
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
|
||||
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
|
||||
|
||||
/* get the native mode for LVDS */
|
||||
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
|
||||
/* get the native mode for scaling */
|
||||
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
radeon_panel_mode_fixup(encoder, adjusted_mode);
|
||||
|
||||
/* get the native mode for TV */
|
||||
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
|
||||
} else if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
|
||||
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
|
||||
if (tv_dac) {
|
||||
if (tv_dac->tv_std == TV_STD_NTSC ||
|
||||
@ -346,6 +344,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
|
||||
else
|
||||
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
|
||||
}
|
||||
} else if (radeon_encoder->rmx_type != RMX_OFF) {
|
||||
radeon_panel_mode_fixup(encoder, adjusted_mode);
|
||||
}
|
||||
|
||||
if (ASIC_IS_DCE3(rdev) &&
|
||||
@ -716,7 +716,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
||||
if (radeon_connector->use_digital &&
|
||||
(radeon_connector->audio == RADEON_AUDIO_ENABLE))
|
||||
return ATOM_ENCODER_MODE_HDMI;
|
||||
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
||||
else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
|
||||
(radeon_connector->audio == RADEON_AUDIO_AUTO))
|
||||
return ATOM_ENCODER_MODE_HDMI;
|
||||
else if (radeon_connector->use_digital)
|
||||
@ -735,7 +735,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
||||
if (radeon_audio != 0) {
|
||||
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
|
||||
return ATOM_ENCODER_MODE_HDMI;
|
||||
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
||||
else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
|
||||
(radeon_connector->audio == RADEON_AUDIO_AUTO))
|
||||
return ATOM_ENCODER_MODE_HDMI;
|
||||
else
|
||||
@ -755,7 +755,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
|
||||
} else if (radeon_audio != 0) {
|
||||
if (radeon_connector->audio == RADEON_AUDIO_ENABLE)
|
||||
return ATOM_ENCODER_MODE_HDMI;
|
||||
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
||||
else if (drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
|
||||
(radeon_connector->audio == RADEON_AUDIO_AUTO))
|
||||
return ATOM_ENCODER_MODE_HDMI;
|
||||
else
|
||||
|
@ -940,7 +940,18 @@ static void ci_get_leakage_voltages(struct radeon_device *rdev)
|
||||
pi->vddc_leakage.count = 0;
|
||||
pi->vddci_leakage.count = 0;
|
||||
|
||||
if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
|
||||
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
|
||||
for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
|
||||
virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
|
||||
if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
|
||||
continue;
|
||||
if (vddc != 0 && vddc != virtual_voltage_id) {
|
||||
pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
|
||||
pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
|
||||
pi->vddc_leakage.count++;
|
||||
}
|
||||
}
|
||||
} else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
|
||||
for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
|
||||
virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
|
||||
if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
|
||||
|
@ -213,24 +213,37 @@ int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
|
||||
if (!rdev->smc_fw)
|
||||
return -EINVAL;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_BONAIRE:
|
||||
ucode_start_address = BONAIRE_SMC_UCODE_START;
|
||||
ucode_size = BONAIRE_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
ucode_start_address = HAWAII_SMC_UCODE_START;
|
||||
ucode_size = HAWAII_SMC_UCODE_SIZE;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown asic in smc ucode loader\n");
|
||||
BUG();
|
||||
if (rdev->new_fw) {
|
||||
const struct smc_firmware_header_v1_0 *hdr =
|
||||
(const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
|
||||
|
||||
radeon_ucode_print_smc_hdr(&hdr->header);
|
||||
|
||||
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
|
||||
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
|
||||
src = (const u8 *)
|
||||
(rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
} else {
|
||||
switch (rdev->family) {
|
||||
case CHIP_BONAIRE:
|
||||
ucode_start_address = BONAIRE_SMC_UCODE_START;
|
||||
ucode_size = BONAIRE_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_HAWAII:
|
||||
ucode_start_address = HAWAII_SMC_UCODE_START;
|
||||
ucode_size = HAWAII_SMC_UCODE_SIZE;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown asic in smc ucode loader\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
src = (const u8 *)rdev->smc_fw->data;
|
||||
}
|
||||
|
||||
if (ucode_size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
src = (const u8 *)rdev->smc_fw->data;
|
||||
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
|
||||
WREG32(SMC_IND_INDEX_0, ucode_start_address);
|
||||
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -24,6 +24,7 @@
|
||||
#include <linux/firmware.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_ucode.h"
|
||||
#include "radeon_asic.h"
|
||||
#include "radeon_trace.h"
|
||||
#include "cikd.h"
|
||||
@ -118,6 +119,7 @@ void cik_sdma_set_wptr(struct radeon_device *rdev,
|
||||
reg = SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET;
|
||||
|
||||
WREG32(reg, (ring->wptr << 2) & 0x3fffc);
|
||||
(void)RREG32(reg);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -419,7 +421,6 @@ static int cik_sdma_rlc_resume(struct radeon_device *rdev)
|
||||
*/
|
||||
static int cik_sdma_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const __be32 *fw_data;
|
||||
int i;
|
||||
|
||||
if (!rdev->sdma_fw)
|
||||
@ -428,19 +429,48 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
|
||||
/* halt the MEs */
|
||||
cik_sdma_enable(rdev, false);
|
||||
|
||||
/* sdma0 */
|
||||
fw_data = (const __be32 *)rdev->sdma_fw->data;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
if (rdev->new_fw) {
|
||||
const struct sdma_firmware_header_v1_0 *hdr =
|
||||
(const struct sdma_firmware_header_v1_0 *)rdev->sdma_fw->data;
|
||||
const __le32 *fw_data;
|
||||
u32 fw_size;
|
||||
|
||||
/* sdma1 */
|
||||
fw_data = (const __be32 *)rdev->sdma_fw->data;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
radeon_ucode_print_sdma_hdr(&hdr->header);
|
||||
|
||||
/* sdma0 */
|
||||
fw_data = (const __le32 *)
|
||||
(rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, le32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
|
||||
/* sdma1 */
|
||||
fw_data = (const __le32 *)
|
||||
(rdev->sdma_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, le32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
} else {
|
||||
const __be32 *fw_data;
|
||||
|
||||
/* sdma0 */
|
||||
fw_data = (const __be32 *)rdev->sdma_fw->data;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
|
||||
/* sdma1 */
|
||||
fw_data = (const __be32 *)rdev->sdma_fw->data;
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
|
||||
for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
|
||||
WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
|
||||
}
|
||||
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
|
||||
WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
|
||||
@ -719,7 +749,93 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_vm_set_page - update the page tables using sDMA
|
||||
* cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @src: src addr to copy from
|
||||
* @count: number of page entries to update
|
||||
*
|
||||
* Update PTEs by copying them from the GART using sDMA (CIK).
|
||||
*/
|
||||
void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe, uint64_t src,
|
||||
unsigned count)
|
||||
{
|
||||
while (count) {
|
||||
unsigned bytes = count * 8;
|
||||
if (bytes > 0x1FFFF8)
|
||||
bytes = 0x1FFFF8;
|
||||
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY,
|
||||
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||
ib->ptr[ib->length_dw++] = bytes;
|
||||
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(src);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
|
||||
pe += bytes;
|
||||
src += bytes;
|
||||
count -= bytes / 8;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_vm_write_pages - update PTEs by writing them manually
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update PTEs by writing them manually using sDMA (CIK).
|
||||
*/
|
||||
void cik_sdma_vm_write_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
|
||||
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = ndw;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & R600_PTE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & R600_PTE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_vm_set_pages - update the page tables using sDMA
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
@ -731,82 +847,51 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
*
|
||||
* Update the page tables using sDMA (CIK).
|
||||
*/
|
||||
void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
void cik_sdma_vm_set_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
||||
while (count) {
|
||||
ndw = count;
|
||||
if (ndw > 0x7FFFF)
|
||||
ndw = 0x7FFFF;
|
||||
|
||||
if (flags == R600_PTE_GART) {
|
||||
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
|
||||
while (count) {
|
||||
unsigned bytes = count * 8;
|
||||
if (bytes > 0x1FFFF8)
|
||||
bytes = 0x1FFFF8;
|
||||
if (flags & R600_PTE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||
ib->ptr[ib->length_dw++] = bytes;
|
||||
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(src);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = ndw; /* number of entries */
|
||||
|
||||
pe += bytes;
|
||||
src += bytes;
|
||||
count -= bytes / 8;
|
||||
}
|
||||
} else if (flags & R600_PTE_SYSTEM) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = ndw;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
addr += incr;
|
||||
value |= flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (count) {
|
||||
ndw = count;
|
||||
if (ndw > 0x7FFFF)
|
||||
ndw = 0x7FFFF;
|
||||
|
||||
if (flags & R600_PTE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = ndw; /* number of entries */
|
||||
pe += ndw * 8;
|
||||
addr += ndw * incr;
|
||||
count -= ndw;
|
||||
}
|
||||
pe += ndw * 8;
|
||||
addr += ndw * incr;
|
||||
count -= ndw;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cik_sdma_vm_pad_ib - pad the IB to the required number of dw
|
||||
*
|
||||
* @ib: indirect buffer to fill with padding
|
||||
*
|
||||
*/
|
||||
void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
|
||||
{
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
|
||||
}
|
||||
|
@ -136,13 +136,13 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
|
||||
tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
|
||||
AUDIO_LIPSYNC(connector->audio_latency[1]);
|
||||
else
|
||||
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
|
||||
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
|
||||
} else {
|
||||
if (connector->latency_present[0])
|
||||
tmp = VIDEO_LIPSYNC(connector->video_latency[0]) |
|
||||
AUDIO_LIPSYNC(connector->audio_latency[0]);
|
||||
else
|
||||
tmp = VIDEO_LIPSYNC(255) | AUDIO_LIPSYNC(255);
|
||||
tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
|
||||
}
|
||||
WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
|
||||
}
|
||||
@ -164,8 +164,10 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
||||
offset = dig->afmt->pin->offset;
|
||||
|
||||
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder)
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
@ -173,7 +175,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
||||
return;
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
|
||||
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
|
||||
if (sad_count <= 0) {
|
||||
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
|
||||
return;
|
||||
@ -225,8 +227,10 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
|
||||
offset = dig->afmt->pin->offset;
|
||||
|
||||
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
|
||||
if (connector->encoder == encoder)
|
||||
if (connector->encoder == encoder) {
|
||||
radeon_connector = to_radeon_connector(connector);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!radeon_connector) {
|
||||
@ -234,7 +238,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
|
||||
return;
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
|
||||
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
|
||||
if (sad_count <= 0) {
|
||||
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||
return;
|
||||
|
@ -2424,7 +2424,6 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
|
||||
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
|
||||
@ -2677,7 +2676,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
|
||||
if (save->crtc_enabled[i]) {
|
||||
if (ASIC_IS_DCE6(rdev)) {
|
||||
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
|
||||
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
||||
tmp &= ~EVERGREEN_CRTC_BLANK_DATA_EN;
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||||
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||
@ -4023,7 +4022,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
|
||||
/* save restore block */
|
||||
if (rdev->rlc.save_restore_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
|
||||
&rdev->rlc.save_restore_obj);
|
||||
if (r) {
|
||||
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
|
||||
return r;
|
||||
@ -4101,7 +4101,8 @@ int sumo_rlc_init(struct radeon_device *rdev)
|
||||
|
||||
if (rdev->rlc.clear_state_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
|
||||
&rdev->rlc.clear_state_obj);
|
||||
if (r) {
|
||||
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
|
||||
sumo_rlc_fini(rdev);
|
||||
@ -4175,8 +4176,10 @@ int sumo_rlc_init(struct radeon_device *rdev)
|
||||
|
||||
if (rdev->rlc.cp_table_size) {
|
||||
if (rdev->rlc.cp_table_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
|
||||
r = radeon_bo_create(rdev, rdev->rlc.cp_table_size,
|
||||
PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
|
||||
&rdev->rlc.cp_table_obj);
|
||||
if (r) {
|
||||
dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
|
||||
sumo_rlc_fini(rdev);
|
||||
@ -4961,7 +4964,8 @@ restart_ih:
|
||||
case 16: /* D5 page flip */
|
||||
case 18: /* D6 page flip */
|
||||
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
|
||||
radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
|
||||
if (radeon_use_pflipirq > 0)
|
||||
radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
|
||||
break;
|
||||
case 42: /* HPD hotplug */
|
||||
switch (src_data) {
|
||||
|
@ -117,7 +117,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
|
||||
return;
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
|
||||
sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
|
||||
if (sad_count <= 0) {
|
||||
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
|
||||
return;
|
||||
@ -172,7 +172,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
|
||||
return;
|
||||
}
|
||||
|
||||
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
|
||||
sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
|
||||
if (sad_count <= 0) {
|
||||
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
|
||||
return;
|
||||
|
@ -1229,7 +1229,6 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* Setup TLB control */
|
||||
WREG32(MC_VM_MX_L1_TLB_CNTL,
|
||||
(0xA << 7) |
|
||||
|
@ -307,7 +307,43 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_vm_set_page - update the page tables using the DMA
|
||||
* cayman_dma_vm_copy_pages - update PTEs by copying them from the GART
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @src: src addr where to copy from
|
||||
* @count: number of page entries to update
|
||||
*
|
||||
* Update PTEs by copying them from the GART using the DMA (cayman/TN).
|
||||
*/
|
||||
void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe, uint64_t src,
|
||||
unsigned count)
|
||||
{
|
||||
unsigned ndw;
|
||||
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
|
||||
0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
|
||||
|
||||
pe += ndw * 4;
|
||||
src += ndw * 4;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_vm_write_pages - update PTEs by writing them manually
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
@ -315,71 +351,103 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: hw access flags
|
||||
* @flags: hw access flags
|
||||
*
|
||||
* Update the page tables using the DMA (cayman/TN).
|
||||
* Update PTEs by writing them manually using the DMA (cayman/TN).
|
||||
*/
|
||||
void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
void cayman_dma_vm_write_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if ((flags & R600_PTE_SYSTEM) || (count == 1)) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & R600_PTE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & R600_PTE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & R600_PTE_VALID)
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE,
|
||||
0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & R600_PTE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & R600_PTE_VALID) {
|
||||
value = addr;
|
||||
else
|
||||
} else {
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
}
|
||||
addr += incr;
|
||||
value |= flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_vm_set_pages - update the page tables using the DMA
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: hw access flags
|
||||
*
|
||||
* Update the page tables using the DMA (cayman/TN).
|
||||
*/
|
||||
void cayman_dma_vm_set_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & R600_PTE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cayman_dma_vm_pad_ib - pad the IB to the required number of dw
|
||||
*
|
||||
* @ib: indirect buffer to fill with padding
|
||||
*
|
||||
*/
|
||||
void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
|
||||
{
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
|
||||
}
|
||||
|
@ -652,7 +652,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
radeon_gart_restore(rdev);
|
||||
/* discard memory request outside of configured range */
|
||||
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
|
||||
WREG32(RADEON_AIC_CNTL, tmp);
|
||||
@ -683,7 +682,7 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
|
||||
}
|
||||
|
||||
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr)
|
||||
uint64_t addr, uint32_t flags)
|
||||
{
|
||||
u32 *gtt = rdev->gart.ptr;
|
||||
gtt[i] = cpu_to_le32(lower_32_bits(addr));
|
||||
@ -838,11 +837,7 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
/* Wait until IDLE & CLEAN */
|
||||
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
|
||||
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
|
||||
r100_ring_hdp_flush(rdev, ring);
|
||||
/* Emit fence sequence & fire IRQ */
|
||||
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
|
||||
radeon_ring_write(ring, fence->seq);
|
||||
@ -1061,6 +1056,20 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
|
||||
(void)RREG32(RADEON_CP_RB_WPTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* r100_ring_hdp_flush - flush Host Data Path via the ring buffer
|
||||
* rdev: radeon device structure
|
||||
* ring: ring buffer struct for emitting packets
|
||||
*/
|
||||
void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
|
||||
}
|
||||
|
||||
static void r100_cp_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const __be32 *fw_data;
|
||||
@ -4065,39 +4074,6 @@ int r100_init(struct radeon_device *rdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
|
||||
bool always_indirect)
|
||||
{
|
||||
if (reg < rdev->rmmio_size && !always_indirect)
|
||||
return readl(((void __iomem *)rdev->rmmio) + reg);
|
||||
else {
|
||||
unsigned long flags;
|
||||
uint32_t ret;
|
||||
|
||||
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
|
||||
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
|
||||
ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
|
||||
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
|
||||
bool always_indirect)
|
||||
{
|
||||
if (reg < rdev->rmmio_size && !always_indirect)
|
||||
writel(v, ((void __iomem *)rdev->rmmio) + reg);
|
||||
else {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
|
||||
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
|
||||
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
|
||||
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
|
||||
{
|
||||
if (reg < rdev->rio_mem_size)
|
||||
|
@ -69,17 +69,23 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
|
||||
mb();
|
||||
}
|
||||
|
||||
#define R300_PTE_UNSNOOPED (1 << 0)
|
||||
#define R300_PTE_WRITEABLE (1 << 2)
|
||||
#define R300_PTE_READABLE (1 << 3)
|
||||
|
||||
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr)
|
||||
uint64_t addr, uint32_t flags)
|
||||
{
|
||||
void __iomem *ptr = rdev->gart.ptr;
|
||||
|
||||
addr = (lower_32_bits(addr) >> 8) |
|
||||
((upper_32_bits(addr) & 0xff) << 24) |
|
||||
R300_PTE_WRITEABLE | R300_PTE_READABLE;
|
||||
((upper_32_bits(addr) & 0xff) << 24);
|
||||
if (flags & RADEON_GART_PAGE_READ)
|
||||
addr |= R300_PTE_READABLE;
|
||||
if (flags & RADEON_GART_PAGE_WRITE)
|
||||
addr |= R300_PTE_WRITEABLE;
|
||||
if (!(flags & RADEON_GART_PAGE_SNOOP))
|
||||
addr |= R300_PTE_UNSNOOPED;
|
||||
/* on x86 we want this to be CPU endian, on powerpc
|
||||
* on powerpc without HW swappers, it'll get swapped on way
|
||||
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
|
||||
@ -120,7 +126,6 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* discard memory request outside of configured range */
|
||||
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
|
||||
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
|
||||
|
@ -968,7 +968,6 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
|
||||
@ -1339,7 +1338,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev)
|
||||
if (rdev->vram_scratch.robj == NULL) {
|
||||
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
|
||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
NULL, &rdev->vram_scratch.robj);
|
||||
0, NULL, &rdev->vram_scratch.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
@ -3227,7 +3226,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev)
|
||||
if (rdev->ih.ring_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, rdev->ih.ring_size,
|
||||
PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
RADEON_GEM_DOMAIN_GTT, 0,
|
||||
NULL, &rdev->ih.ring_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
|
||||
@ -3924,11 +3923,13 @@ restart_ih:
|
||||
break;
|
||||
case 9: /* D1 pflip */
|
||||
DRM_DEBUG("IH: D1 flip\n");
|
||||
radeon_crtc_handle_flip(rdev, 0);
|
||||
if (radeon_use_pflipirq > 0)
|
||||
radeon_crtc_handle_flip(rdev, 0);
|
||||
break;
|
||||
case 11: /* D2 pflip */
|
||||
DRM_DEBUG("IH: D2 flip\n");
|
||||
radeon_crtc_handle_flip(rdev, 1);
|
||||
if (radeon_use_pflipirq > 0)
|
||||
radeon_crtc_handle_flip(rdev, 1);
|
||||
break;
|
||||
case 19: /* HPD/DAC hotplug */
|
||||
switch (src_data) {
|
||||
@ -4089,16 +4090,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
|
||||
}
|
||||
|
||||
/**
|
||||
* r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
|
||||
* r600_mmio_hdp_flush - flush Host Data Path cache via MMIO
|
||||
* rdev: radeon device structure
|
||||
* bo: buffer object struct which userspace is waiting for idle
|
||||
*
|
||||
* Some R6XX/R7XX doesn't seems to take into account HDP flush performed
|
||||
* through ring buffer, this leads to corruption in rendering, see
|
||||
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
|
||||
* directly perform HDP flush by writing register through MMIO.
|
||||
* Some R6XX/R7XX don't seem to take into account HDP flushes performed
|
||||
* through the ring buffer. This leads to corruption in rendering, see
|
||||
* http://bugzilla.kernel.org/show_bug.cgi?id=15186 . To avoid this, we
|
||||
* directly perform the HDP flush by writing the register through MMIO.
|
||||
*/
|
||||
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
|
||||
void r600_mmio_hdp_flush(struct radeon_device *rdev)
|
||||
{
|
||||
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
|
||||
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL.
|
||||
|
@ -64,6 +64,7 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/interval_tree.h>
|
||||
|
||||
#include <ttm/ttm_bo_api.h>
|
||||
#include <ttm/ttm_bo_driver.h>
|
||||
@ -103,6 +104,7 @@ extern int radeon_hard_reset;
|
||||
extern int radeon_vm_size;
|
||||
extern int radeon_vm_block_size;
|
||||
extern int radeon_deep_color;
|
||||
extern int radeon_use_pflipirq;
|
||||
|
||||
/*
|
||||
* Copy from radeon_drv.h so we don't have to include both and have conflicting
|
||||
@ -304,6 +306,9 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
|
||||
u16 *vddc, u16 *vddci,
|
||||
u16 virtual_voltage_id,
|
||||
u16 vbios_voltage_id);
|
||||
int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
|
||||
u16 virtual_voltage_id,
|
||||
u16 *voltage);
|
||||
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
|
||||
u8 voltage_type,
|
||||
u16 nominal_voltage,
|
||||
@ -317,6 +322,9 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
|
||||
struct atom_voltage_table *voltage_table);
|
||||
bool radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
|
||||
u8 voltage_type, u8 voltage_mode);
|
||||
int radeon_atom_get_svi2_info(struct radeon_device *rdev,
|
||||
u8 voltage_type,
|
||||
u8 *svd_gpio_id, u8 *svc_gpio_id);
|
||||
void radeon_atom_update_memory_dll(struct radeon_device *rdev,
|
||||
u32 mem_clock);
|
||||
void radeon_atom_set_ac_timing(struct radeon_device *rdev,
|
||||
@ -441,14 +449,12 @@ struct radeon_mman {
|
||||
struct radeon_bo_va {
|
||||
/* protected by bo being reserved */
|
||||
struct list_head bo_list;
|
||||
uint64_t soffset;
|
||||
uint64_t eoffset;
|
||||
uint32_t flags;
|
||||
bool valid;
|
||||
uint64_t addr;
|
||||
unsigned ref_count;
|
||||
|
||||
/* protected by vm mutex */
|
||||
struct list_head vm_list;
|
||||
struct interval_tree_node it;
|
||||
struct list_head vm_status;
|
||||
|
||||
/* constant after initialization */
|
||||
@ -465,6 +471,7 @@ struct radeon_bo {
|
||||
struct ttm_placement placement;
|
||||
struct ttm_buffer_object tbo;
|
||||
struct ttm_bo_kmap_obj kmap;
|
||||
u32 flags;
|
||||
unsigned pin_count;
|
||||
void *kptr;
|
||||
u32 tiling_flags;
|
||||
@ -543,9 +550,9 @@ struct radeon_gem {
|
||||
|
||||
int radeon_gem_init(struct radeon_device *rdev);
|
||||
void radeon_gem_fini(struct radeon_device *rdev);
|
||||
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
|
||||
int alignment, int initial_domain,
|
||||
bool discardable, bool kernel,
|
||||
u32 flags, bool kernel,
|
||||
struct drm_gem_object **obj);
|
||||
|
||||
int radeon_mode_dumb_create(struct drm_file *file_priv,
|
||||
@ -590,6 +597,12 @@ struct radeon_mc;
|
||||
#define RADEON_GPU_PAGE_SHIFT 12
|
||||
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
|
||||
|
||||
#define RADEON_GART_PAGE_DUMMY 0
|
||||
#define RADEON_GART_PAGE_VALID (1 << 0)
|
||||
#define RADEON_GART_PAGE_READ (1 << 1)
|
||||
#define RADEON_GART_PAGE_WRITE (1 << 2)
|
||||
#define RADEON_GART_PAGE_SNOOP (1 << 3)
|
||||
|
||||
struct radeon_gart {
|
||||
dma_addr_t table_addr;
|
||||
struct radeon_bo *robj;
|
||||
@ -614,8 +627,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
int pages);
|
||||
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
int pages, struct page **pagelist,
|
||||
dma_addr_t *dma_addr);
|
||||
void radeon_gart_restore(struct radeon_device *rdev);
|
||||
dma_addr_t *dma_addr, uint32_t flags);
|
||||
|
||||
|
||||
/*
|
||||
@ -855,9 +867,9 @@ struct radeon_mec {
|
||||
#define R600_PTE_FRAG_64KB (4 << 7)
|
||||
#define R600_PTE_FRAG_256KB (6 << 7)
|
||||
|
||||
/* flags used for GART page table entries on R600+ */
|
||||
#define R600_PTE_GART ( R600_PTE_VALID | R600_PTE_SYSTEM | R600_PTE_SNOOPED \
|
||||
| R600_PTE_READABLE | R600_PTE_WRITEABLE)
|
||||
/* flags needed to be set so we can copy directly from the GART table */
|
||||
#define R600_PTE_GART_MASK ( R600_PTE_READABLE | R600_PTE_WRITEABLE | \
|
||||
R600_PTE_SYSTEM | R600_PTE_VALID )
|
||||
|
||||
struct radeon_vm_pt {
|
||||
struct radeon_bo *bo;
|
||||
@ -865,9 +877,12 @@ struct radeon_vm_pt {
|
||||
};
|
||||
|
||||
struct radeon_vm {
|
||||
struct list_head va;
|
||||
struct rb_root va;
|
||||
unsigned id;
|
||||
|
||||
/* BOs moved, but not yet updated in the PT */
|
||||
struct list_head invalidated;
|
||||
|
||||
/* BOs freed, but not yet updated in the PT */
|
||||
struct list_head freed;
|
||||
|
||||
@ -1740,6 +1755,7 @@ struct radeon_asic_ring {
|
||||
/* command emmit functions */
|
||||
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
|
||||
void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
|
||||
struct radeon_semaphore *semaphore, bool emit_wait);
|
||||
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
@ -1763,13 +1779,8 @@ struct radeon_asic {
|
||||
int (*suspend)(struct radeon_device *rdev);
|
||||
void (*vga_set_state)(struct radeon_device *rdev, bool state);
|
||||
int (*asic_reset)(struct radeon_device *rdev);
|
||||
/* ioctl hw specific callback. Some hw might want to perform special
|
||||
* operation on specific ioctl. For instance on wait idle some hw
|
||||
* might want to perform and HDP flush through MMIO as it seems that
|
||||
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed
|
||||
* through ring.
|
||||
*/
|
||||
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
|
||||
/* Flush the HDP cache via MMIO */
|
||||
void (*mmio_hdp_flush)(struct radeon_device *rdev);
|
||||
/* check if 3D engine is idle */
|
||||
bool (*gui_idle)(struct radeon_device *rdev);
|
||||
/* wait for mc_idle */
|
||||
@ -1782,16 +1793,26 @@ struct radeon_asic {
|
||||
struct {
|
||||
void (*tlb_flush)(struct radeon_device *rdev);
|
||||
void (*set_page)(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
} gart;
|
||||
struct {
|
||||
int (*init)(struct radeon_device *rdev);
|
||||
void (*fini)(struct radeon_device *rdev);
|
||||
void (*set_page)(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void (*copy_pages)(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe, uint64_t src,
|
||||
unsigned count);
|
||||
void (*write_pages)(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void (*set_pages)(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void (*pad_ib)(struct radeon_ib *ib);
|
||||
} vm;
|
||||
/* ring specific callbacks */
|
||||
struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
|
||||
@ -2299,10 +2320,12 @@ struct radeon_device {
|
||||
const struct firmware *mc_fw; /* NI MC firmware */
|
||||
const struct firmware *ce_fw; /* SI CE firmware */
|
||||
const struct firmware *mec_fw; /* CIK MEC firmware */
|
||||
const struct firmware *mec2_fw; /* KV MEC2 firmware */
|
||||
const struct firmware *sdma_fw; /* CIK SDMA firmware */
|
||||
const struct firmware *smc_fw; /* SMC firmware */
|
||||
const struct firmware *uvd_fw; /* UVD firmware */
|
||||
const struct firmware *vce_fw; /* VCE firmware */
|
||||
bool new_fw;
|
||||
struct r600_vram_scratch vram_scratch;
|
||||
int msi_enabled; /* msi enabled */
|
||||
struct r600_ih ih; /* r6/700 interrupt ring */
|
||||
@ -2342,6 +2365,11 @@ struct radeon_device {
|
||||
|
||||
struct dev_pm_domain vga_pm_domain;
|
||||
bool have_disp_power_ref;
|
||||
u32 px_quirk_flags;
|
||||
|
||||
/* tracking pinned memory */
|
||||
u64 vram_pin_size;
|
||||
u64 gart_pin_size;
|
||||
};
|
||||
|
||||
bool radeon_is_px(struct drm_device *dev);
|
||||
@ -2352,10 +2380,42 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
void radeon_device_fini(struct radeon_device *rdev);
|
||||
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
|
||||
|
||||
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
|
||||
bool always_indirect);
|
||||
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
|
||||
bool always_indirect);
|
||||
#define RADEON_MIN_MMIO_SIZE 0x10000
|
||||
|
||||
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
|
||||
bool always_indirect)
|
||||
{
|
||||
/* The mmio size is 64kb at minimum. Allows the if to be optimized out. */
|
||||
if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
|
||||
return readl(((void __iomem *)rdev->rmmio) + reg);
|
||||
else {
|
||||
unsigned long flags;
|
||||
uint32_t ret;
|
||||
|
||||
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
|
||||
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
|
||||
ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
|
||||
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
|
||||
bool always_indirect)
|
||||
{
|
||||
if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect)
|
||||
writel(v, ((void __iomem *)rdev->rmmio) + reg);
|
||||
else {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
|
||||
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
|
||||
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
|
||||
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
|
||||
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
|
||||
|
||||
@ -2709,10 +2769,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
|
||||
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
|
||||
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
|
||||
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
|
||||
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
|
||||
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
|
||||
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
|
||||
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
|
||||
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
|
||||
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
|
||||
#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
|
||||
#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
|
||||
#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
|
||||
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
|
||||
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
|
||||
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
|
||||
@ -2840,6 +2903,8 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm);
|
||||
int radeon_vm_clear_freed(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm);
|
||||
int radeon_vm_clear_invalids(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm);
|
||||
int radeon_vm_bo_update(struct radeon_device *rdev,
|
||||
struct radeon_bo_va *bo_va,
|
||||
struct ttm_mem_reg *mem);
|
||||
|
@ -185,6 +185,7 @@ static struct radeon_asic_ring r100_gfx_ring = {
|
||||
.get_rptr = &r100_gfx_get_rptr,
|
||||
.get_wptr = &r100_gfx_get_wptr,
|
||||
.set_wptr = &r100_gfx_set_wptr,
|
||||
.hdp_flush = &r100_ring_hdp_flush,
|
||||
};
|
||||
|
||||
static struct radeon_asic r100_asic = {
|
||||
@ -194,7 +195,7 @@ static struct radeon_asic r100_asic = {
|
||||
.resume = &r100_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &r100_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &r100_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -260,7 +261,7 @@ static struct radeon_asic r200_asic = {
|
||||
.resume = &r100_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &r100_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &r100_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -331,6 +332,7 @@ static struct radeon_asic_ring r300_gfx_ring = {
|
||||
.get_rptr = &r100_gfx_get_rptr,
|
||||
.get_wptr = &r100_gfx_get_wptr,
|
||||
.set_wptr = &r100_gfx_set_wptr,
|
||||
.hdp_flush = &r100_ring_hdp_flush,
|
||||
};
|
||||
|
||||
static struct radeon_asic r300_asic = {
|
||||
@ -340,7 +342,7 @@ static struct radeon_asic r300_asic = {
|
||||
.resume = &r300_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &r300_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &r300_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -406,7 +408,7 @@ static struct radeon_asic r300_asic_pcie = {
|
||||
.resume = &r300_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &r300_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &r300_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -472,7 +474,7 @@ static struct radeon_asic r420_asic = {
|
||||
.resume = &r420_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &r300_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &r300_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -538,7 +540,7 @@ static struct radeon_asic rs400_asic = {
|
||||
.resume = &rs400_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &r300_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -604,7 +606,7 @@ static struct radeon_asic rs600_asic = {
|
||||
.resume = &rs600_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &rs600_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -672,7 +674,7 @@ static struct radeon_asic rs690_asic = {
|
||||
.resume = &rs690_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &rs600_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -740,7 +742,7 @@ static struct radeon_asic rv515_asic = {
|
||||
.resume = &rv515_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &rs600_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -806,7 +808,7 @@ static struct radeon_asic r520_asic = {
|
||||
.resume = &r520_resume,
|
||||
.vga_set_state = &r100_vga_set_state,
|
||||
.asic_reset = &rs600_asic_reset,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = NULL,
|
||||
.gui_idle = &r100_gui_idle,
|
||||
.mc_wait_for_idle = &r520_mc_wait_for_idle,
|
||||
.gart = {
|
||||
@ -898,7 +900,7 @@ static struct radeon_asic r600_asic = {
|
||||
.resume = &r600_resume,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.asic_reset = &r600_asic_reset,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
||||
.get_xclk = &r600_get_xclk,
|
||||
@ -970,7 +972,7 @@ static struct radeon_asic rv6xx_asic = {
|
||||
.resume = &r600_resume,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.asic_reset = &r600_asic_reset,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
||||
.get_xclk = &r600_get_xclk,
|
||||
@ -1060,7 +1062,7 @@ static struct radeon_asic rs780_asic = {
|
||||
.resume = &r600_resume,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.asic_reset = &r600_asic_reset,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
||||
.get_xclk = &r600_get_xclk,
|
||||
@ -1163,7 +1165,7 @@ static struct radeon_asic rv770_asic = {
|
||||
.resume = &rv770_resume,
|
||||
.asic_reset = &r600_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &r600_mc_wait_for_idle,
|
||||
.get_xclk = &rv770_get_xclk,
|
||||
@ -1281,7 +1283,7 @@ static struct radeon_asic evergreen_asic = {
|
||||
.resume = &evergreen_resume,
|
||||
.asic_reset = &evergreen_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||
.get_xclk = &rv770_get_xclk,
|
||||
@ -1373,7 +1375,7 @@ static struct radeon_asic sumo_asic = {
|
||||
.resume = &evergreen_resume,
|
||||
.asic_reset = &evergreen_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||
.get_xclk = &r600_get_xclk,
|
||||
@ -1464,7 +1466,7 @@ static struct radeon_asic btc_asic = {
|
||||
.resume = &evergreen_resume,
|
||||
.asic_reset = &evergreen_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||
.get_xclk = &rv770_get_xclk,
|
||||
@ -1599,7 +1601,7 @@ static struct radeon_asic cayman_asic = {
|
||||
.resume = &cayman_resume,
|
||||
.asic_reset = &cayman_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||
.get_xclk = &rv770_get_xclk,
|
||||
@ -1611,7 +1613,10 @@ static struct radeon_asic cayman_asic = {
|
||||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.set_page = &cayman_dma_vm_set_page,
|
||||
.copy_pages = &cayman_dma_vm_copy_pages,
|
||||
.write_pages = &cayman_dma_vm_write_pages,
|
||||
.set_pages = &cayman_dma_vm_set_pages,
|
||||
.pad_ib = &cayman_dma_vm_pad_ib,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
|
||||
@ -1699,7 +1704,7 @@ static struct radeon_asic trinity_asic = {
|
||||
.resume = &cayman_resume,
|
||||
.asic_reset = &cayman_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||
.get_xclk = &r600_get_xclk,
|
||||
@ -1711,7 +1716,10 @@ static struct radeon_asic trinity_asic = {
|
||||
.vm = {
|
||||
.init = &cayman_vm_init,
|
||||
.fini = &cayman_vm_fini,
|
||||
.set_page = &cayman_dma_vm_set_page,
|
||||
.copy_pages = &cayman_dma_vm_copy_pages,
|
||||
.write_pages = &cayman_dma_vm_write_pages,
|
||||
.set_pages = &cayman_dma_vm_set_pages,
|
||||
.pad_ib = &cayman_dma_vm_pad_ib,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
|
||||
@ -1829,7 +1837,7 @@ static struct radeon_asic si_asic = {
|
||||
.resume = &si_resume,
|
||||
.asic_reset = &si_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.ioctl_wait_idle = r600_ioctl_wait_idle,
|
||||
.mmio_hdp_flush = r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||
.get_xclk = &si_get_xclk,
|
||||
@ -1841,7 +1849,10 @@ static struct radeon_asic si_asic = {
|
||||
.vm = {
|
||||
.init = &si_vm_init,
|
||||
.fini = &si_vm_fini,
|
||||
.set_page = &si_dma_vm_set_page,
|
||||
.copy_pages = &si_dma_vm_copy_pages,
|
||||
.write_pages = &si_dma_vm_write_pages,
|
||||
.set_pages = &si_dma_vm_set_pages,
|
||||
.pad_ib = &cayman_dma_vm_pad_ib,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
|
||||
@ -1987,7 +1998,7 @@ static struct radeon_asic ci_asic = {
|
||||
.resume = &cik_resume,
|
||||
.asic_reset = &cik_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = &r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||
.get_xclk = &cik_get_xclk,
|
||||
@ -1999,7 +2010,10 @@ static struct radeon_asic ci_asic = {
|
||||
.vm = {
|
||||
.init = &cik_vm_init,
|
||||
.fini = &cik_vm_fini,
|
||||
.set_page = &cik_sdma_vm_set_page,
|
||||
.copy_pages = &cik_sdma_vm_copy_pages,
|
||||
.write_pages = &cik_sdma_vm_write_pages,
|
||||
.set_pages = &cik_sdma_vm_set_pages,
|
||||
.pad_ib = &cik_sdma_vm_pad_ib,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
|
||||
@ -2091,7 +2105,7 @@ static struct radeon_asic kv_asic = {
|
||||
.resume = &cik_resume,
|
||||
.asic_reset = &cik_asic_reset,
|
||||
.vga_set_state = &r600_vga_set_state,
|
||||
.ioctl_wait_idle = NULL,
|
||||
.mmio_hdp_flush = &r600_mmio_hdp_flush,
|
||||
.gui_idle = &r600_gui_idle,
|
||||
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
|
||||
.get_xclk = &cik_get_xclk,
|
||||
@ -2103,7 +2117,10 @@ static struct radeon_asic kv_asic = {
|
||||
.vm = {
|
||||
.init = &cik_vm_init,
|
||||
.fini = &cik_vm_fini,
|
||||
.set_page = &cik_sdma_vm_set_page,
|
||||
.copy_pages = &cik_sdma_vm_copy_pages,
|
||||
.write_pages = &cik_sdma_vm_write_pages,
|
||||
.set_pages = &cik_sdma_vm_set_pages,
|
||||
.pad_ib = &cik_sdma_vm_pad_ib,
|
||||
},
|
||||
.ring = {
|
||||
[RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
|
||||
@ -2457,7 +2474,7 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
rdev->cg_flags =
|
||||
RADEON_CG_SUPPORT_GFX_MGCG |
|
||||
RADEON_CG_SUPPORT_GFX_MGLS |
|
||||
RADEON_CG_SUPPORT_GFX_CGCG |
|
||||
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
|
||||
RADEON_CG_SUPPORT_GFX_CGLS |
|
||||
RADEON_CG_SUPPORT_GFX_CGTS |
|
||||
RADEON_CG_SUPPORT_GFX_CGTS_LS |
|
||||
@ -2476,7 +2493,7 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
rdev->cg_flags =
|
||||
RADEON_CG_SUPPORT_GFX_MGCG |
|
||||
RADEON_CG_SUPPORT_GFX_MGLS |
|
||||
RADEON_CG_SUPPORT_GFX_CGCG |
|
||||
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
|
||||
RADEON_CG_SUPPORT_GFX_CGLS |
|
||||
RADEON_CG_SUPPORT_GFX_CGTS |
|
||||
RADEON_CG_SUPPORT_GFX_CP_LS |
|
||||
@ -2502,7 +2519,7 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
rdev->cg_flags =
|
||||
RADEON_CG_SUPPORT_GFX_MGCG |
|
||||
RADEON_CG_SUPPORT_GFX_MGLS |
|
||||
RADEON_CG_SUPPORT_GFX_CGCG |
|
||||
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
|
||||
RADEON_CG_SUPPORT_GFX_CGLS |
|
||||
RADEON_CG_SUPPORT_GFX_CGTS |
|
||||
RADEON_CG_SUPPORT_GFX_CGTS_LS |
|
||||
@ -2530,7 +2547,7 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
rdev->cg_flags =
|
||||
RADEON_CG_SUPPORT_GFX_MGCG |
|
||||
RADEON_CG_SUPPORT_GFX_MGLS |
|
||||
RADEON_CG_SUPPORT_GFX_CGCG |
|
||||
/*RADEON_CG_SUPPORT_GFX_CGCG |*/
|
||||
RADEON_CG_SUPPORT_GFX_CGLS |
|
||||
RADEON_CG_SUPPORT_GFX_CGTS |
|
||||
RADEON_CG_SUPPORT_GFX_CGTS_LS |
|
||||
|
@ -68,7 +68,7 @@ int r100_asic_reset(struct radeon_device *rdev);
|
||||
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
|
||||
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
|
||||
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
int r100_irq_set(struct radeon_device *rdev);
|
||||
int r100_irq_process(struct radeon_device *rdev);
|
||||
@ -148,7 +148,8 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring);
|
||||
void r100_gfx_set_wptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring);
|
||||
|
||||
void r100_ring_hdp_flush(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring);
|
||||
/*
|
||||
* r200,rv250,rs300,rv280
|
||||
*/
|
||||
@ -173,7 +174,7 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
|
||||
extern int r300_cs_parse(struct radeon_cs_parser *p);
|
||||
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
|
||||
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
|
||||
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
|
||||
extern void r300_set_reg_safe(struct radeon_device *rdev);
|
||||
@ -209,7 +210,7 @@ extern int rs400_suspend(struct radeon_device *rdev);
|
||||
extern int rs400_resume(struct radeon_device *rdev);
|
||||
void rs400_gart_tlb_flush(struct radeon_device *rdev);
|
||||
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
int rs400_gart_init(struct radeon_device *rdev);
|
||||
@ -233,7 +234,7 @@ void rs600_irq_disable(struct radeon_device *rdev);
|
||||
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
|
||||
void rs600_gart_tlb_flush(struct radeon_device *rdev);
|
||||
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr);
|
||||
uint64_t addr, uint32_t flags);
|
||||
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
|
||||
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
|
||||
void rs600_bandwidth_update(struct radeon_device *rdev);
|
||||
@ -351,7 +352,7 @@ void r600_hpd_fini(struct radeon_device *rdev);
|
||||
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
|
||||
void r600_hpd_set_polarity(struct radeon_device *rdev,
|
||||
enum radeon_hpd_id hpd);
|
||||
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
|
||||
extern void r600_mmio_hdp_flush(struct radeon_device *rdev);
|
||||
extern bool r600_gui_idle(struct radeon_device *rdev);
|
||||
extern void r600_pm_misc(struct radeon_device *rdev);
|
||||
extern void r600_pm_init_profile(struct radeon_device *rdev);
|
||||
@ -606,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib);
|
||||
bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
|
||||
void cayman_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
|
||||
void cayman_dma_vm_copy_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe, uint64_t src,
|
||||
unsigned count);
|
||||
void cayman_dma_vm_write_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void cayman_dma_vm_set_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
|
||||
|
||||
void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
|
||||
@ -693,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev,
|
||||
uint64_t src_offset, uint64_t dst_offset,
|
||||
unsigned num_gpu_pages,
|
||||
struct radeon_fence **fence);
|
||||
void si_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
|
||||
void si_dma_vm_copy_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe, uint64_t src,
|
||||
unsigned count);
|
||||
void si_dma_vm_write_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void si_dma_vm_set_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
|
||||
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
u32 si_get_xclk(struct radeon_device *rdev);
|
||||
uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
|
||||
@ -771,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev);
|
||||
int cik_vm_init(struct radeon_device *rdev);
|
||||
void cik_vm_fini(struct radeon_device *rdev);
|
||||
void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
void cik_sdma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
|
||||
void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe, uint64_t src,
|
||||
unsigned count);
|
||||
void cik_sdma_vm_write_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void cik_sdma_vm_set_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags);
|
||||
void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
|
||||
|
||||
void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
|
||||
int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
|
||||
u32 cik_gfx_get_rptr(struct radeon_device *rdev,
|
||||
|
@ -1963,7 +1963,7 @@ static const char *thermal_controller_names[] = {
|
||||
"adm1032",
|
||||
"adm1030",
|
||||
"max6649",
|
||||
"lm64",
|
||||
"lm63", /* lm64 */
|
||||
"f75375",
|
||||
"asc7xxx",
|
||||
};
|
||||
@ -1974,7 +1974,7 @@ static const char *pp_lib_thermal_controller_names[] = {
|
||||
"adm1032",
|
||||
"adm1030",
|
||||
"max6649",
|
||||
"lm64",
|
||||
"lm63", /* lm64 */
|
||||
"f75375",
|
||||
"RV6xx",
|
||||
"RV770",
|
||||
@ -3236,6 +3236,41 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *r
|
||||
return 0;
|
||||
}
|
||||
|
||||
union get_voltage_info {
|
||||
struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
|
||||
struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
|
||||
};
|
||||
|
||||
int radeon_atom_get_voltage_evv(struct radeon_device *rdev,
|
||||
u16 virtual_voltage_id,
|
||||
u16 *voltage)
|
||||
{
|
||||
int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
|
||||
u32 entry_id;
|
||||
u32 count = rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
|
||||
union get_voltage_info args;
|
||||
|
||||
for (entry_id = 0; entry_id < count; entry_id++) {
|
||||
if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
|
||||
virtual_voltage_id)
|
||||
break;
|
||||
}
|
||||
|
||||
if (entry_id >= count)
|
||||
return -EINVAL;
|
||||
|
||||
args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
|
||||
args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
|
||||
args.in.ulSCLKFreq =
|
||||
cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
|
||||
|
||||
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
|
||||
|
||||
*voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
|
||||
u16 voltage_level, u8 voltage_type,
|
||||
u32 *gpio_value, u32 *gpio_mask)
|
||||
@ -3397,6 +3432,50 @@ radeon_atom_is_voltage_gpio(struct radeon_device *rdev,
|
||||
return false;
|
||||
}
|
||||
|
||||
int radeon_atom_get_svi2_info(struct radeon_device *rdev,
|
||||
u8 voltage_type,
|
||||
u8 *svd_gpio_id, u8 *svc_gpio_id)
|
||||
{
|
||||
int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
|
||||
u8 frev, crev;
|
||||
u16 data_offset, size;
|
||||
union voltage_object_info *voltage_info;
|
||||
union voltage_object *voltage_object = NULL;
|
||||
|
||||
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
|
||||
&frev, &crev, &data_offset)) {
|
||||
voltage_info = (union voltage_object_info *)
|
||||
(rdev->mode_info.atom_context->bios + data_offset);
|
||||
|
||||
switch (frev) {
|
||||
case 3:
|
||||
switch (crev) {
|
||||
case 1:
|
||||
voltage_object = (union voltage_object *)
|
||||
atom_lookup_voltage_object_v3(&voltage_info->v3,
|
||||
voltage_type,
|
||||
VOLTAGE_OBJ_SVID2);
|
||||
if (voltage_object) {
|
||||
*svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
|
||||
*svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown voltage object table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown voltage object table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_atom_get_max_voltage(struct radeon_device *rdev,
|
||||
u8 voltage_type, u16 *max_voltage)
|
||||
{
|
||||
|
@ -97,7 +97,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
|
||||
int time;
|
||||
|
||||
n = RADEON_BENCHMARK_ITERATIONS;
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
@ -109,7 +109,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
|
||||
case DRM_MODE_CONNECTOR_DVII:
|
||||
case DRM_MODE_CONNECTOR_HDMIB:
|
||||
if (radeon_connector->use_digital) {
|
||||
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
|
||||
if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
|
||||
if (connector->display_info.bpc)
|
||||
bpc = connector->display_info.bpc;
|
||||
}
|
||||
@ -115,7 +115,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVID:
|
||||
case DRM_MODE_CONNECTOR_HDMIA:
|
||||
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
|
||||
if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
|
||||
if (connector->display_info.bpc)
|
||||
bpc = connector->display_info.bpc;
|
||||
}
|
||||
@ -124,7 +124,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
|
||||
dig_connector = radeon_connector->con_priv;
|
||||
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
|
||||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
|
||||
drm_detect_hdmi_monitor(radeon_connector->edid)) {
|
||||
drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
|
||||
if (connector->display_info.bpc)
|
||||
bpc = connector->display_info.bpc;
|
||||
}
|
||||
@ -148,7 +148,7 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
|
||||
break;
|
||||
}
|
||||
|
||||
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
|
||||
if (drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
|
||||
/* hdmi deep color only implemented on DCE4+ */
|
||||
if ((bpc > 8) && !ASIC_IS_DCE4(rdev)) {
|
||||
DRM_DEBUG("%s: HDMI deep color %d bpc unsupported. Using 8 bpc.\n",
|
||||
@ -197,10 +197,19 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
|
||||
connector->name, bpc);
|
||||
}
|
||||
}
|
||||
else if (bpc > 8) {
|
||||
/* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
|
||||
DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
|
||||
connector->name);
|
||||
bpc = 8;
|
||||
}
|
||||
}
|
||||
|
||||
if ((radeon_deep_color == 0) && (bpc > 8))
|
||||
if ((radeon_deep_color == 0) && (bpc > 8)) {
|
||||
DRM_DEBUG("%s: Deep color disabled. Set radeon module param deep_color=1 to enable.\n",
|
||||
connector->name);
|
||||
bpc = 8;
|
||||
}
|
||||
|
||||
DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
|
||||
connector->name, connector->display_info.bpc, bpc);
|
||||
@ -262,6 +271,94 @@ static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct edid *radeon_connector_edid(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
|
||||
|
||||
if (radeon_connector->edid) {
|
||||
return radeon_connector->edid;
|
||||
} else if (edid_blob) {
|
||||
struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
|
||||
if (edid)
|
||||
radeon_connector->edid = edid;
|
||||
}
|
||||
return radeon_connector->edid;
|
||||
}
|
||||
|
||||
static void radeon_connector_get_edid(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
if (radeon_connector->edid)
|
||||
return;
|
||||
|
||||
/* on hw with routers, select right port */
|
||||
if (radeon_connector->router.ddc_valid)
|
||||
radeon_router_select_ddc_port(radeon_connector);
|
||||
|
||||
if ((radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
|
||||
ENCODER_OBJECT_ID_NONE) &&
|
||||
radeon_connector->ddc_bus->has_aux) {
|
||||
radeon_connector->edid = drm_get_edid(connector,
|
||||
&radeon_connector->ddc_bus->aux.ddc);
|
||||
} else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
|
||||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
|
||||
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
|
||||
|
||||
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
|
||||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
|
||||
radeon_connector->ddc_bus->has_aux)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->aux.ddc);
|
||||
else if (radeon_connector->ddc_bus)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->adapter);
|
||||
} else if (radeon_connector->ddc_bus) {
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->adapter);
|
||||
}
|
||||
|
||||
if (!radeon_connector->edid) {
|
||||
if (rdev->is_atom_bios) {
|
||||
/* some laptops provide a hardcoded edid in rom for LCDs */
|
||||
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
|
||||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
|
||||
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
|
||||
} else {
|
||||
/* some servers provide a hardcoded edid in rom for KVMs */
|
||||
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_connector_free_edid(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector->edid = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int radeon_ddc_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
int ret;
|
||||
|
||||
if (radeon_connector->edid) {
|
||||
drm_mode_connector_update_edid_property(connector, radeon_connector->edid);
|
||||
ret = drm_add_edid_modes(connector, radeon_connector->edid);
|
||||
drm_edid_to_eld(connector, radeon_connector->edid);
|
||||
return ret;
|
||||
}
|
||||
drm_mode_connector_update_edid_property(connector, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
|
||||
{
|
||||
int enc_id = connector->encoder_ids[0];
|
||||
@ -271,6 +368,27 @@ static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *conn
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void radeon_get_native_mode(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
|
||||
if (encoder == NULL)
|
||||
return;
|
||||
|
||||
radeon_encoder = to_radeon_encoder(encoder);
|
||||
|
||||
if (!list_empty(&connector->probed_modes)) {
|
||||
struct drm_display_mode *preferred_mode =
|
||||
list_first_entry(&connector->probed_modes,
|
||||
struct drm_display_mode, head);
|
||||
|
||||
radeon_encoder->native_mode = *preferred_mode;
|
||||
} else {
|
||||
radeon_encoder->native_mode.clock = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* radeon_connector_analog_encoder_conflict_solve
|
||||
* - search for other connectors sharing this encoder
|
||||
@ -571,6 +689,35 @@ static int radeon_connector_set_property(struct drm_connector *connector, struct
|
||||
radeon_property_change_mode(&radeon_encoder->base);
|
||||
}
|
||||
|
||||
if (property == dev->mode_config.scaling_mode_property) {
|
||||
enum radeon_rmx_type rmx_type;
|
||||
|
||||
if (connector->encoder)
|
||||
radeon_encoder = to_radeon_encoder(connector->encoder);
|
||||
else {
|
||||
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
|
||||
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector));
|
||||
}
|
||||
|
||||
switch (val) {
|
||||
default:
|
||||
case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
|
||||
case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
|
||||
case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
|
||||
case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
|
||||
}
|
||||
if (radeon_encoder->rmx_type == rmx_type)
|
||||
return 0;
|
||||
|
||||
if ((rmx_type != DRM_MODE_SCALE_NONE) &&
|
||||
(radeon_encoder->native_mode.clock == 0))
|
||||
return 0;
|
||||
|
||||
radeon_encoder->rmx_type = rmx_type;
|
||||
|
||||
radeon_property_change_mode(&radeon_encoder->base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -611,22 +758,20 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
|
||||
|
||||
static int radeon_lvds_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
struct drm_encoder *encoder;
|
||||
int ret = 0;
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
if (radeon_connector->ddc_bus) {
|
||||
ret = radeon_ddc_get_modes(radeon_connector);
|
||||
if (ret > 0) {
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
if (encoder) {
|
||||
radeon_fixup_lvds_native_mode(encoder, connector);
|
||||
/* add scaled modes */
|
||||
radeon_add_common_modes(encoder, connector);
|
||||
}
|
||||
return ret;
|
||||
radeon_connector_get_edid(connector);
|
||||
ret = radeon_ddc_get_modes(connector);
|
||||
if (ret > 0) {
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
if (encoder) {
|
||||
radeon_fixup_lvds_native_mode(encoder, connector);
|
||||
/* add scaled modes */
|
||||
radeon_add_common_modes(encoder, connector);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
encoder = radeon_best_single_encoder(connector);
|
||||
@ -701,16 +846,9 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
|
||||
}
|
||||
|
||||
/* check for edid as well */
|
||||
radeon_connector_get_edid(connector);
|
||||
if (radeon_connector->edid)
|
||||
ret = connector_status_connected;
|
||||
else {
|
||||
if (radeon_connector->ddc_bus) {
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->adapter);
|
||||
if (radeon_connector->edid)
|
||||
ret = connector_status_connected;
|
||||
}
|
||||
}
|
||||
/* check acpi lid status ??? */
|
||||
|
||||
radeon_connector_update_scratch_regs(connector, ret);
|
||||
@ -723,8 +861,7 @@ static void radeon_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
|
||||
if (radeon_connector->edid)
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector_free_edid(connector);
|
||||
kfree(radeon_connector->con_priv);
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
@ -783,10 +920,12 @@ static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
|
||||
|
||||
static int radeon_vga_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
int ret;
|
||||
|
||||
ret = radeon_ddc_get_modes(radeon_connector);
|
||||
radeon_connector_get_edid(connector);
|
||||
ret = radeon_ddc_get_modes(connector);
|
||||
|
||||
radeon_get_native_mode(connector);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -829,28 +968,26 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
|
||||
dret = radeon_ddc_probe(radeon_connector, false);
|
||||
if (dret) {
|
||||
radeon_connector->detected_by_load = false;
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector->edid = NULL;
|
||||
}
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_connector_free_edid(connector);
|
||||
radeon_connector_get_edid(connector);
|
||||
|
||||
if (!radeon_connector->edid) {
|
||||
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
|
||||
connector->name);
|
||||
ret = connector_status_connected;
|
||||
} else {
|
||||
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
|
||||
radeon_connector->use_digital =
|
||||
!!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
|
||||
|
||||
/* some oems have boards with separate digital and analog connectors
|
||||
* with a shared ddc line (often vga + hdmi)
|
||||
*/
|
||||
if (radeon_connector->use_digital && radeon_connector->shared_ddc) {
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector->edid = NULL;
|
||||
radeon_connector_free_edid(connector);
|
||||
ret = connector_status_disconnected;
|
||||
} else
|
||||
} else {
|
||||
ret = connector_status_connected;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
@ -985,15 +1122,6 @@ static const struct drm_connector_funcs radeon_tv_connector_funcs = {
|
||||
.set_property = radeon_connector_set_property,
|
||||
};
|
||||
|
||||
static int radeon_dvi_get_modes(struct drm_connector *connector)
|
||||
{
|
||||
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
||||
int ret;
|
||||
|
||||
ret = radeon_ddc_get_modes(radeon_connector);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
@ -1051,18 +1179,16 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
|
||||
dret = radeon_ddc_probe(radeon_connector, false);
|
||||
if (dret) {
|
||||
radeon_connector->detected_by_load = false;
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector->edid = NULL;
|
||||
}
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
|
||||
radeon_connector_free_edid(connector);
|
||||
radeon_connector_get_edid(connector);
|
||||
|
||||
if (!radeon_connector->edid) {
|
||||
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
|
||||
connector->name);
|
||||
/* rs690 seems to have a problem with connectors not existing and always
|
||||
* return a block of 0's. If we see this just stop polling on this output */
|
||||
if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
|
||||
if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) &&
|
||||
radeon_connector->base.null_edid_counter) {
|
||||
ret = connector_status_disconnected;
|
||||
DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n",
|
||||
connector->name);
|
||||
@ -1072,18 +1198,18 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
|
||||
broken_edid = true; /* defer use_digital to later */
|
||||
}
|
||||
} else {
|
||||
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
|
||||
radeon_connector->use_digital =
|
||||
!!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
|
||||
|
||||
/* some oems have boards with separate digital and analog connectors
|
||||
* with a shared ddc line (often vga + hdmi)
|
||||
*/
|
||||
if ((!radeon_connector->use_digital) && radeon_connector->shared_ddc) {
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector->edid = NULL;
|
||||
radeon_connector_free_edid(connector);
|
||||
ret = connector_status_disconnected;
|
||||
} else
|
||||
} else {
|
||||
ret = connector_status_connected;
|
||||
|
||||
}
|
||||
/* This gets complicated. We have boards with VGA + HDMI with a
|
||||
* shared DDC line and we have boards with DVI-D + HDMI with a shared
|
||||
* DDC line. The latter is more complex because with DVI<->HDMI adapters
|
||||
@ -1103,8 +1229,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
|
||||
if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
|
||||
/* hpd is our only option in this case */
|
||||
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector->edid = NULL;
|
||||
radeon_connector_free_edid(connector);
|
||||
ret = connector_status_disconnected;
|
||||
}
|
||||
}
|
||||
@ -1265,7 +1390,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
|
||||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
|
||||
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
|
||||
return MODE_OK;
|
||||
else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
|
||||
else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
|
||||
/* HDMI 1.3+ supports max clock of 340 Mhz */
|
||||
if (mode->clock > 340000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
@ -1284,7 +1409,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
|
||||
}
|
||||
|
||||
static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
|
||||
.get_modes = radeon_dvi_get_modes,
|
||||
.get_modes = radeon_vga_get_modes,
|
||||
.mode_valid = radeon_dvi_mode_valid,
|
||||
.best_encoder = radeon_dvi_encoder,
|
||||
};
|
||||
@ -1313,7 +1438,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
|
||||
if (!radeon_dig_connector->edp_on)
|
||||
atombios_set_edp_panel_power(connector,
|
||||
ATOM_TRANSMITTER_ACTION_POWER_ON);
|
||||
ret = radeon_ddc_get_modes(radeon_connector);
|
||||
radeon_connector_get_edid(connector);
|
||||
ret = radeon_ddc_get_modes(connector);
|
||||
if (!radeon_dig_connector->edp_on)
|
||||
atombios_set_edp_panel_power(connector,
|
||||
ATOM_TRANSMITTER_ACTION_POWER_OFF);
|
||||
@ -1324,7 +1450,8 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
|
||||
if (encoder)
|
||||
radeon_atom_ext_encoder_setup_ddc(encoder);
|
||||
}
|
||||
ret = radeon_ddc_get_modes(radeon_connector);
|
||||
radeon_connector_get_edid(connector);
|
||||
ret = radeon_ddc_get_modes(connector);
|
||||
}
|
||||
|
||||
if (ret > 0) {
|
||||
@ -1357,7 +1484,10 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
|
||||
if (encoder)
|
||||
radeon_atom_ext_encoder_setup_ddc(encoder);
|
||||
}
|
||||
ret = radeon_ddc_get_modes(radeon_connector);
|
||||
radeon_connector_get_edid(connector);
|
||||
ret = radeon_ddc_get_modes(connector);
|
||||
|
||||
radeon_get_native_mode(connector);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1391,7 +1521,7 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
|
||||
return ENCODER_OBJECT_ID_NONE;
|
||||
}
|
||||
|
||||
bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
|
||||
static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_encoder *encoder;
|
||||
struct radeon_encoder *radeon_encoder;
|
||||
@ -1448,10 +1578,7 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (radeon_connector->edid) {
|
||||
kfree(radeon_connector->edid);
|
||||
radeon_connector->edid = NULL;
|
||||
}
|
||||
radeon_connector_free_edid(connector);
|
||||
|
||||
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
|
||||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
|
||||
@ -1557,7 +1684,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
|
||||
(radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
|
||||
return radeon_dp_mode_valid_helper(connector, mode);
|
||||
} else {
|
||||
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
|
||||
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
|
||||
/* HDMI 1.3+ supports max clock of 340 Mhz */
|
||||
if (mode->clock > 340000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
@ -1717,6 +1844,9 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.load_detect_property,
|
||||
1);
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
dev->mode_config.scaling_mode_property,
|
||||
DRM_MODE_SCALE_NONE);
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DVII:
|
||||
case DRM_MODE_CONNECTOR_DVID:
|
||||
@ -1737,6 +1867,10 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
rdev->mode_info.underscan_vborder_property,
|
||||
0);
|
||||
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
dev->mode_config.scaling_mode_property,
|
||||
DRM_MODE_SCALE_NONE);
|
||||
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
@ -1787,6 +1921,10 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.load_detect_property,
|
||||
1);
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
dev->mode_config.scaling_mode_property,
|
||||
DRM_MODE_SCALE_NONE);
|
||||
/* no HPD on analog connectors */
|
||||
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
@ -1805,6 +1943,10 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.load_detect_property,
|
||||
1);
|
||||
if (ASIC_IS_AVIVO(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
dev->mode_config.scaling_mode_property,
|
||||
DRM_MODE_SCALE_NONE);
|
||||
/* no HPD on analog connectors */
|
||||
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
|
||||
connector->interlace_allowed = true;
|
||||
@ -1838,17 +1980,18 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.underscan_vborder_property,
|
||||
0);
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
dev->mode_config.scaling_mode_property,
|
||||
DRM_MODE_SCALE_NONE);
|
||||
}
|
||||
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
}
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
}
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
|
||||
radeon_connector->dac_load_detect = true;
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
@ -1888,17 +2031,18 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.underscan_vborder_property,
|
||||
0);
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
dev->mode_config.scaling_mode_property,
|
||||
DRM_MODE_SCALE_NONE);
|
||||
}
|
||||
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
}
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
}
|
||||
subpixel_order = SubPixelHorizontalRGB;
|
||||
connector->interlace_allowed = true;
|
||||
if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
|
||||
@ -1935,18 +2079,18 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.underscan_vborder_property,
|
||||
0);
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
dev->mode_config.scaling_mode_property,
|
||||
DRM_MODE_SCALE_NONE);
|
||||
}
|
||||
if (ASIC_IS_DCE2(rdev) && (radeon_audio != 0)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
}
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
|
||||
}
|
||||
connector->interlace_allowed = true;
|
||||
/* in theory with a DP to VGA converter... */
|
||||
connector->doublescan_allowed = false;
|
||||
|
@ -500,7 +500,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
|
||||
return radeon_vm_clear_invalids(rdev, vm);
|
||||
}
|
||||
|
||||
static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
||||
|
@ -103,6 +103,31 @@ static const char radeon_family_name[][16] = {
|
||||
"LAST",
|
||||
};
|
||||
|
||||
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
||||
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
|
||||
|
||||
struct radeon_px_quirk {
|
||||
u32 chip_vendor;
|
||||
u32 chip_device;
|
||||
u32 subsys_vendor;
|
||||
u32 subsys_device;
|
||||
u32 px_quirk_flags;
|
||||
};
|
||||
|
||||
static struct radeon_px_quirk radeon_px_quirk_list[] = {
|
||||
/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=74551
|
||||
*/
|
||||
{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
|
||||
/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
|
||||
*/
|
||||
{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
|
||||
/* macbook pro 8.2 */
|
||||
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
|
||||
{ 0, 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
bool radeon_is_px(struct drm_device *dev)
|
||||
{
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
@ -112,6 +137,26 @@ bool radeon_is_px(struct drm_device *dev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_px_quirk *p = radeon_px_quirk_list;
|
||||
|
||||
/* Apply PX quirks */
|
||||
while (p && p->chip_device != 0) {
|
||||
if (rdev->pdev->vendor == p->chip_vendor &&
|
||||
rdev->pdev->device == p->chip_device &&
|
||||
rdev->pdev->subsystem_vendor == p->subsys_vendor &&
|
||||
rdev->pdev->subsystem_device == p->subsys_device) {
|
||||
rdev->px_quirk_flags = p->px_quirk_flags;
|
||||
break;
|
||||
}
|
||||
++p;
|
||||
}
|
||||
|
||||
if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
|
||||
rdev->flags &= ~RADEON_IS_PX;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_program_register_sequence - program an array of registers.
|
||||
*
|
||||
@ -385,7 +430,8 @@ int radeon_wb_init(struct radeon_device *rdev)
|
||||
|
||||
if (rdev->wb.wb_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
|
||||
RADEON_GEM_DOMAIN_GTT, 0, NULL,
|
||||
&rdev->wb.wb_obj);
|
||||
if (r) {
|
||||
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
|
||||
return r;
|
||||
@ -1077,7 +1123,19 @@ static void radeon_check_arguments(struct radeon_device *rdev)
|
||||
/* defines number of bits in page table versus page directory,
|
||||
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
|
||||
* page table and the remaining bits are in the page directory */
|
||||
if (radeon_vm_block_size < 9) {
|
||||
if (radeon_vm_block_size == -1) {
|
||||
|
||||
/* Total bits covered by PD + PTs */
|
||||
unsigned bits = ilog2(radeon_vm_size) + 17;
|
||||
|
||||
/* Make sure the PD is 4K in size up to 8GB address space.
|
||||
Above that split equal between PD and PTs */
|
||||
if (radeon_vm_size <= 8)
|
||||
radeon_vm_block_size = bits - 9;
|
||||
else
|
||||
radeon_vm_block_size = (bits + 3) / 2;
|
||||
|
||||
} else if (radeon_vm_block_size < 9) {
|
||||
dev_warn(rdev->dev, "VM page table size (%d) too small\n",
|
||||
radeon_vm_block_size);
|
||||
radeon_vm_block_size = 9;
|
||||
@ -1091,25 +1149,6 @@ static void radeon_check_arguments(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
|
||||
* needed for waking up.
|
||||
*
|
||||
* @pdev: pci dev pointer
|
||||
*/
|
||||
static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
|
||||
{
|
||||
|
||||
/* 6600m in a macbook pro */
|
||||
if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
|
||||
pdev->subsystem_device == 0x00e2) {
|
||||
printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_switcheroo_set_state - set switcheroo state
|
||||
*
|
||||
@ -1122,6 +1161,7 @@ static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
|
||||
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
|
||||
return;
|
||||
@ -1133,7 +1173,7 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
|
||||
/* don't suspend or resume card normally */
|
||||
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
|
||||
|
||||
if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
|
||||
if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
|
||||
dev->pdev->d3_delay = 20;
|
||||
|
||||
radeon_resume_kms(dev, true, true);
|
||||
@ -1337,6 +1377,9 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
if (rdev->rio_mem == NULL)
|
||||
DRM_ERROR("Unable to find PCI I/O BAR\n");
|
||||
|
||||
if (rdev->flags & RADEON_IS_PX)
|
||||
radeon_device_handle_px_quirks(rdev);
|
||||
|
||||
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
|
||||
/* this will fail for cards that aren't VGA class devices, just
|
||||
* ignore it */
|
||||
|
@ -293,6 +293,18 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
|
||||
if (radeon_crtc == NULL)
|
||||
return;
|
||||
|
||||
/* Skip the pageflip completion check below (based on polling) on
|
||||
* asics which reliably support hw pageflip completion irqs. pflip
|
||||
* irqs are a reliable and race-free method of handling pageflip
|
||||
* completion detection. A use_pflipirq module parameter < 2 allows
|
||||
* to override this in case of asics with faulty pflip irqs.
|
||||
* A module parameter of 0 would only use this polling based path,
|
||||
* a parameter of 1 would use pflip irq only as a backup to this
|
||||
* path, as in Linux 3.16.
|
||||
*/
|
||||
if ((radeon_use_pflipirq == 2) && ASIC_IS_DCE4(rdev))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&rdev->ddev->event_lock, flags);
|
||||
if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
|
||||
DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
|
||||
@ -823,64 +835,6 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
|
||||
{
|
||||
struct drm_device *dev = radeon_connector->base.dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
/* don't leak the edid if we already fetched it in detect() */
|
||||
if (radeon_connector->edid)
|
||||
goto got_edid;
|
||||
|
||||
/* on hw with routers, select right port */
|
||||
if (radeon_connector->router.ddc_valid)
|
||||
radeon_router_select_ddc_port(radeon_connector);
|
||||
|
||||
if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
|
||||
ENCODER_OBJECT_ID_NONE) {
|
||||
if (radeon_connector->ddc_bus->has_aux)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->aux.ddc);
|
||||
} else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
|
||||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
|
||||
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
|
||||
|
||||
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
|
||||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
|
||||
radeon_connector->ddc_bus->has_aux)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->aux.ddc);
|
||||
else if (radeon_connector->ddc_bus && !radeon_connector->edid)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->adapter);
|
||||
} else {
|
||||
if (radeon_connector->ddc_bus && !radeon_connector->edid)
|
||||
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
|
||||
&radeon_connector->ddc_bus->adapter);
|
||||
}
|
||||
|
||||
if (!radeon_connector->edid) {
|
||||
if (rdev->is_atom_bios) {
|
||||
/* some laptops provide a hardcoded edid in rom for LCDs */
|
||||
if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
|
||||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
|
||||
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
|
||||
} else
|
||||
/* some servers provide a hardcoded edid in rom for KVMs */
|
||||
radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
|
||||
}
|
||||
if (radeon_connector->edid) {
|
||||
got_edid:
|
||||
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
|
||||
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
|
||||
drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
|
||||
return ret;
|
||||
}
|
||||
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* avivo */
|
||||
|
||||
/**
|
||||
@ -1749,7 +1703,7 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
|
||||
(!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
|
||||
((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
|
||||
((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
|
||||
drm_detect_hdmi_monitor(radeon_connector->edid) &&
|
||||
drm_detect_hdmi_monitor(radeon_connector_edid(connector)) &&
|
||||
is_hdtv_mode(mode)))) {
|
||||
if (radeon_encoder->underscan_hborder != 0)
|
||||
radeon_crtc->h_border = radeon_encoder->underscan_hborder;
|
||||
|
@ -82,9 +82,11 @@
|
||||
* 2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
|
||||
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
|
||||
* 2.39.0 - Add INFO query for number of active CUs
|
||||
* 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
|
||||
* CS to GPU
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 39
|
||||
#define KMS_DRIVER_MINOR 40
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||
int radeon_driver_unload_kms(struct drm_device *dev);
|
||||
@ -173,9 +175,10 @@ int radeon_dpm = -1;
|
||||
int radeon_aspm = -1;
|
||||
int radeon_runtime_pm = -1;
|
||||
int radeon_hard_reset = 0;
|
||||
int radeon_vm_size = 4;
|
||||
int radeon_vm_block_size = 9;
|
||||
int radeon_vm_size = 8;
|
||||
int radeon_vm_block_size = -1;
|
||||
int radeon_deep_color = 0;
|
||||
int radeon_use_pflipirq = 2;
|
||||
|
||||
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
|
||||
module_param_named(no_wb, radeon_no_wb, int, 0444);
|
||||
@ -246,12 +249,15 @@ module_param_named(hard_reset, radeon_hard_reset, int, 0444);
|
||||
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
|
||||
module_param_named(vm_size, radeon_vm_size, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
|
||||
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
|
||||
module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
|
||||
module_param_named(deep_color, radeon_deep_color, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
|
||||
module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
|
||||
|
||||
static struct pci_device_id pciidlist[] = {
|
||||
radeon_PCI_IDS
|
||||
};
|
||||
|
@ -343,7 +343,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
|
||||
case DRM_MODE_CONNECTOR_HDMIB:
|
||||
if (radeon_connector->use_digital) {
|
||||
/* HDMI 1.3 supports up to 340 Mhz over single link */
|
||||
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
|
||||
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
|
||||
if (pixel_clock > 340000)
|
||||
return true;
|
||||
else
|
||||
@ -365,7 +365,7 @@ bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
|
||||
return false;
|
||||
else {
|
||||
/* HDMI 1.3 supports up to 340 Mhz over single link */
|
||||
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
|
||||
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector_edid(connector))) {
|
||||
if (pixel_clock > 340000)
|
||||
return true;
|
||||
else
|
||||
|
@ -127,8 +127,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
|
||||
aligned_size = ALIGN(size, PAGE_SIZE);
|
||||
ret = radeon_gem_object_create(rdev, aligned_size, 0,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
false, true,
|
||||
&gobj);
|
||||
0, true, &gobj);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
|
||||
aligned_size);
|
||||
|
@ -128,7 +128,7 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
|
||||
if (rdev->gart.robj == NULL) {
|
||||
r = radeon_bo_create(rdev, rdev->gart.table_size,
|
||||
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
NULL, &rdev->gart.robj);
|
||||
0, NULL, &rdev->gart.robj);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
@ -243,7 +243,8 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
page_base = rdev->gart.pages_addr[p];
|
||||
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
||||
if (rdev->gart.ptr) {
|
||||
radeon_gart_set_page(rdev, t, page_base);
|
||||
radeon_gart_set_page(rdev, t, page_base,
|
||||
RADEON_GART_PAGE_DUMMY);
|
||||
}
|
||||
page_base += RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
@ -261,13 +262,15 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
|
||||
* @pages: number of pages to bind
|
||||
* @pagelist: pages to bind
|
||||
* @dma_addr: DMA addresses of pages
|
||||
* @flags: RADEON_GART_PAGE_* flags
|
||||
*
|
||||
* Binds the requested pages to the gart page table
|
||||
* (all asics).
|
||||
* Returns 0 for success, -EINVAL for failure.
|
||||
*/
|
||||
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
int pages, struct page **pagelist, dma_addr_t *dma_addr)
|
||||
int pages, struct page **pagelist, dma_addr_t *dma_addr,
|
||||
uint32_t flags)
|
||||
{
|
||||
unsigned t;
|
||||
unsigned p;
|
||||
@ -287,7 +290,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
if (rdev->gart.ptr) {
|
||||
page_base = rdev->gart.pages_addr[p];
|
||||
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
||||
radeon_gart_set_page(rdev, t, page_base);
|
||||
radeon_gart_set_page(rdev, t, page_base, flags);
|
||||
page_base += RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
@ -297,33 +300,6 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_gart_restore - bind all pages in the gart page table
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Binds all pages in the gart page table (all asics).
|
||||
* Used to rebuild the gart table on device startup or resume.
|
||||
*/
|
||||
void radeon_gart_restore(struct radeon_device *rdev)
|
||||
{
|
||||
int i, j, t;
|
||||
u64 page_base;
|
||||
|
||||
if (!rdev->gart.ptr) {
|
||||
return;
|
||||
}
|
||||
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
|
||||
page_base = rdev->gart.pages_addr[i];
|
||||
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
|
||||
radeon_gart_set_page(rdev, t, page_base);
|
||||
page_base += RADEON_GPU_PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
mb();
|
||||
radeon_gart_tlb_flush(rdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_gart_init - init the driver info for managing the gart
|
||||
*
|
||||
|
@ -40,9 +40,9 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
|
||||
int alignment, int initial_domain,
|
||||
bool discardable, bool kernel,
|
||||
u32 flags, bool kernel,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct radeon_bo *robj;
|
||||
@ -55,23 +55,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
|
||||
alignment = PAGE_SIZE;
|
||||
}
|
||||
|
||||
/* maximun bo size is the minimun btw visible vram and gtt size */
|
||||
max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
|
||||
/* Maximum bo size is the unpinned gtt size since we use the gtt to
|
||||
* handle vram to system pool migrations.
|
||||
*/
|
||||
max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
|
||||
if (size > max_size) {
|
||||
printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
|
||||
__func__, __LINE__, size >> 20, max_size >> 20);
|
||||
DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
|
||||
size >> 20, max_size >> 20);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
retry:
|
||||
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
|
||||
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
|
||||
flags, NULL, &robj);
|
||||
if (r) {
|
||||
if (r != -ERESTARTSYS) {
|
||||
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
|
||||
initial_domain |= RADEON_GEM_DOMAIN_GTT;
|
||||
goto retry;
|
||||
}
|
||||
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
|
||||
DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
|
||||
size, initial_domain, alignment, r);
|
||||
}
|
||||
return r;
|
||||
@ -208,18 +211,15 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_radeon_gem_info *args = data;
|
||||
struct ttm_mem_type_manager *man;
|
||||
unsigned i;
|
||||
|
||||
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
|
||||
|
||||
args->vram_size = rdev->mc.real_vram_size;
|
||||
args->vram_visible = (u64)man->size << PAGE_SHIFT;
|
||||
if (rdev->stollen_vga_memory)
|
||||
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
|
||||
args->vram_visible -= radeon_fbdev_total_size(rdev);
|
||||
args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
|
||||
for(i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
args->gart_size -= rdev->ring[i].ring_size;
|
||||
args->vram_visible -= rdev->vram_pin_size;
|
||||
args->gart_size = rdev->mc.gtt_size;
|
||||
args->gart_size -= rdev->gart_pin_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -252,8 +252,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
/* create a gem object to contain this object in */
|
||||
args->size = roundup(args->size, PAGE_SIZE);
|
||||
r = radeon_gem_object_create(rdev, args->size, args->alignment,
|
||||
args->initial_domain, false,
|
||||
false, &gobj);
|
||||
args->initial_domain, args->flags,
|
||||
false, &gobj);
|
||||
if (r) {
|
||||
up_read(&rdev->exclusive_lock);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
@ -358,16 +358,18 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
int r;
|
||||
uint32_t cur_placement = 0;
|
||||
|
||||
gobj = drm_gem_object_lookup(dev, filp, args->handle);
|
||||
if (gobj == NULL) {
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_radeon_bo(gobj);
|
||||
r = radeon_bo_wait(robj, NULL, false);
|
||||
/* callback hw specific functions if any */
|
||||
if (rdev->asic->ioctl_wait_idle)
|
||||
robj->rdev->asic->ioctl_wait_idle(rdev, robj);
|
||||
r = radeon_bo_wait(robj, &cur_placement, false);
|
||||
/* Flush HDP cache via MMIO if necessary */
|
||||
if (rdev->asic->mmio_hdp_flush &&
|
||||
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
|
||||
robj->rdev->asic->mmio_hdp_flush(rdev);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
r = radeon_gem_handle_lockup(rdev, r);
|
||||
return r;
|
||||
@ -461,11 +463,6 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
args->operation = RADEON_VA_RESULT_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
|
||||
dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
|
||||
args->operation = RADEON_VA_RESULT_ERROR;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (args->operation) {
|
||||
case RADEON_VA_MAP:
|
||||
@ -499,9 +496,9 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
switch (args->operation) {
|
||||
case RADEON_VA_MAP:
|
||||
if (bo_va->soffset) {
|
||||
if (bo_va->it.start) {
|
||||
args->operation = RADEON_VA_RESULT_VA_EXIST;
|
||||
args->offset = bo_va->soffset;
|
||||
args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
|
||||
goto out;
|
||||
}
|
||||
r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
|
||||
@ -572,9 +569,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
|
||||
args->size = ALIGN(args->size, PAGE_SIZE);
|
||||
|
||||
r = radeon_gem_object_create(rdev, args->size, 0,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
false, ttm_bo_type_device,
|
||||
&gobj);
|
||||
RADEON_GEM_DOMAIN_VRAM, 0,
|
||||
false, &gobj);
|
||||
if (r)
|
||||
return -ENOMEM;
|
||||
|
||||
|
319
drivers/gpu/drm/radeon/radeon_ib.c
Normal file
319
drivers/gpu/drm/radeon/radeon_ib.c
Normal file
@ -0,0 +1,319 @@
|
||||
/*
|
||||
* Copyright 2008 Advanced Micro Devices, Inc.
|
||||
* Copyright 2008 Red Hat Inc.
|
||||
* Copyright 2009 Jerome Glisse.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie
|
||||
* Alex Deucher
|
||||
* Jerome Glisse
|
||||
* Christian König
|
||||
*/
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
|
||||
/*
|
||||
* IB
|
||||
* IBs (Indirect Buffers) and areas of GPU accessible memory where
|
||||
* commands are stored. You can put a pointer to the IB in the
|
||||
* command ring and the hw will fetch the commands from the IB
|
||||
* and execute them. Generally userspace acceleration drivers
|
||||
* produce command buffers which are send to the kernel and
|
||||
* put in IBs for execution by the requested ring.
|
||||
*/
|
||||
static int radeon_debugfs_sa_init(struct radeon_device *rdev);
|
||||
|
||||
/**
|
||||
* radeon_ib_get - request an IB (Indirect Buffer)
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: ring index the IB is associated with
|
||||
* @ib: IB object returned
|
||||
* @size: requested IB size
|
||||
*
|
||||
* Request an IB (all asics). IBs are allocated using the
|
||||
* suballocator.
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int radeon_ib_get(struct radeon_device *rdev, int ring,
|
||||
struct radeon_ib *ib, struct radeon_vm *vm,
|
||||
unsigned size)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_semaphore_create(rdev, &ib->semaphore);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
ib->ring = ring;
|
||||
ib->fence = NULL;
|
||||
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
|
||||
ib->vm = vm;
|
||||
if (vm) {
|
||||
/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
|
||||
* space and soffset is the offset inside the pool bo
|
||||
*/
|
||||
ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
|
||||
} else {
|
||||
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
|
||||
}
|
||||
ib->is_const_ib = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_free - free an IB (Indirect Buffer)
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to free
|
||||
*
|
||||
* Free an IB (all asics).
|
||||
*/
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
|
||||
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
|
||||
radeon_fence_unref(&ib->fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
* @const_ib: Const IB to schedule (SI only)
|
||||
*
|
||||
* Schedule an IB on the associated ring (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*
|
||||
* On SI, there are two parallel engines fed from the primary ring,
|
||||
* the CE (Constant Engine) and the DE (Drawing Engine). Since
|
||||
* resource descriptors have moved to memory, the CE allows you to
|
||||
* prime the caches while the DE is updating register state so that
|
||||
* the resource descriptors will be already in cache when the draw is
|
||||
* processed. To accomplish this, the userspace driver submits two
|
||||
* IBs, one for the CE and one for the DE. If there is a CE IB (called
|
||||
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior
|
||||
* to SI there was just a DE IB.
|
||||
*/
|
||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
||||
struct radeon_ib *const_ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
int r = 0;
|
||||
|
||||
if (!ib->length_dw || !ring->ready) {
|
||||
/* TODO: Nothings in the ib we should report. */
|
||||
dev_err(rdev->dev, "couldn't schedule ib\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* 64 dwords should be enough for fence too */
|
||||
r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* grab a vm id if necessary */
|
||||
if (ib->vm) {
|
||||
struct radeon_fence *vm_id_fence;
|
||||
vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
|
||||
radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
|
||||
}
|
||||
|
||||
/* sync with other rings */
|
||||
r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (ib->vm)
|
||||
radeon_vm_flush(rdev, ib->vm, ib->ring);
|
||||
|
||||
if (const_ib) {
|
||||
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
|
||||
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
|
||||
}
|
||||
radeon_ring_ib_execute(rdev, ib->ring, ib);
|
||||
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
if (const_ib) {
|
||||
const_ib->fence = radeon_fence_ref(ib->fence);
|
||||
}
|
||||
|
||||
if (ib->vm)
|
||||
radeon_vm_fence(rdev, ib->vm, ib->fence);
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Initialize the suballocator to manage a pool of memory
|
||||
* for use as IBs (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (rdev->ib_pool_ready) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rdev->family >= CHIP_BONAIRE) {
|
||||
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
|
||||
RADEON_IB_POOL_SIZE*64*1024,
|
||||
RADEON_GPU_PAGE_SIZE,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
RADEON_GEM_GTT_WC);
|
||||
} else {
|
||||
/* Before CIK, it's better to stick to cacheable GTT due
|
||||
* to the command stream checking
|
||||
*/
|
||||
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
|
||||
RADEON_IB_POOL_SIZE*64*1024,
|
||||
RADEON_GPU_PAGE_SIZE,
|
||||
RADEON_GEM_DOMAIN_GTT, 0);
|
||||
}
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
rdev->ib_pool_ready = true;
|
||||
if (radeon_debugfs_sa_init(rdev)) {
|
||||
dev_err(rdev->dev, "failed to register debugfs file for SA\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Tear down the suballocator managing the pool of memory
|
||||
* for use as IBs (all asics).
|
||||
*/
|
||||
void radeon_ib_pool_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->ib_pool_ready) {
|
||||
radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
|
||||
radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
|
||||
rdev->ib_pool_ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_ring_tests - test IBs on the rings
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Test an IB (Indirect Buffer) on each ring.
|
||||
* If the test fails, disable the ring.
|
||||
* Returns 0 on success, error if the primary GFX ring
|
||||
* IB test fails.
|
||||
*/
|
||||
int radeon_ib_ring_tests(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
struct radeon_ring *ring = &rdev->ring[i];
|
||||
|
||||
if (!ring->ready)
|
||||
continue;
|
||||
|
||||
r = radeon_ib_test(rdev, i, ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
rdev->needs_reset = false;
|
||||
|
||||
if (i == RADEON_RING_TYPE_GFX_INDEX) {
|
||||
/* oh, oh, that's really bad */
|
||||
DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
return r;
|
||||
|
||||
} else {
|
||||
/* still not good, but we can live with it */
|
||||
DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Debugfs info
|
||||
*/
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static struct drm_info_list radeon_debugfs_sa_list[] = {
|
||||
{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static int radeon_debugfs_sa_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
@ -254,7 +254,18 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
||||
}
|
||||
break;
|
||||
case RADEON_INFO_ACCEL_WORKING2:
|
||||
*value = rdev->accel_working;
|
||||
if (rdev->family == CHIP_HAWAII) {
|
||||
if (rdev->accel_working) {
|
||||
if (rdev->new_fw)
|
||||
*value = 3;
|
||||
else
|
||||
*value = 2;
|
||||
} else {
|
||||
*value = 0;
|
||||
}
|
||||
} else {
|
||||
*value = rdev->accel_working;
|
||||
}
|
||||
break;
|
||||
case RADEON_INFO_TILING_CONFIG:
|
||||
if (rdev->family >= CHIP_BONAIRE)
|
||||
|
@ -685,10 +685,11 @@ extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
|
||||
|
||||
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
|
||||
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
|
||||
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
|
||||
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
|
||||
extern int radeon_get_monitor_bpc(struct drm_connector *connector);
|
||||
|
||||
extern struct edid *radeon_connector_edid(struct drm_connector *connector);
|
||||
|
||||
extern void radeon_connector_hotplug(struct drm_connector *connector);
|
||||
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode);
|
||||
@ -738,7 +739,6 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
|
||||
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
|
||||
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
|
||||
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
|
||||
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
|
||||
|
||||
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
|
||||
|
||||
|
@ -46,16 +46,6 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
|
||||
* function are calling it.
|
||||
*/
|
||||
|
||||
static void radeon_bo_clear_va(struct radeon_bo *bo)
|
||||
{
|
||||
struct radeon_bo_va *bo_va, *tmp;
|
||||
|
||||
list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
|
||||
/* remove from all vm address space */
|
||||
radeon_vm_bo_rmv(bo->rdev, bo_va);
|
||||
}
|
||||
}
|
||||
|
||||
static void radeon_update_memory_usage(struct radeon_bo *bo,
|
||||
unsigned mem_type, int sign)
|
||||
{
|
||||
@ -90,7 +80,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
||||
list_del_init(&bo->list);
|
||||
mutex_unlock(&bo->rdev->gem.mutex);
|
||||
radeon_bo_clear_surface_reg(bo);
|
||||
radeon_bo_clear_va(bo);
|
||||
WARN_ON(!list_empty(&bo->va));
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
kfree(bo);
|
||||
}
|
||||
@ -114,15 +104,23 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
|
||||
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_VRAM;
|
||||
if (domain & RADEON_GEM_DOMAIN_GTT) {
|
||||
if (rbo->rdev->flags & RADEON_IS_AGP) {
|
||||
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
|
||||
if (rbo->flags & RADEON_GEM_GTT_UC) {
|
||||
rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
|
||||
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
|
||||
(rbo->rdev->flags & RADEON_IS_AGP)) {
|
||||
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_TT;
|
||||
} else {
|
||||
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
|
||||
}
|
||||
}
|
||||
if (domain & RADEON_GEM_DOMAIN_CPU) {
|
||||
if (rbo->rdev->flags & RADEON_IS_AGP) {
|
||||
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
|
||||
if (rbo->flags & RADEON_GEM_GTT_UC) {
|
||||
rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
|
||||
} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
|
||||
rbo->rdev->flags & RADEON_IS_AGP) {
|
||||
rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
||||
TTM_PL_FLAG_SYSTEM;
|
||||
} else {
|
||||
rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
|
||||
}
|
||||
@ -146,7 +144,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
|
||||
|
||||
int radeon_bo_create(struct radeon_device *rdev,
|
||||
unsigned long size, int byte_align, bool kernel, u32 domain,
|
||||
struct sg_table *sg, struct radeon_bo **bo_ptr)
|
||||
u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
|
||||
{
|
||||
struct radeon_bo *bo;
|
||||
enum ttm_bo_type type;
|
||||
@ -183,6 +181,12 @@ int radeon_bo_create(struct radeon_device *rdev,
|
||||
bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
|
||||
RADEON_GEM_DOMAIN_GTT |
|
||||
RADEON_GEM_DOMAIN_CPU);
|
||||
|
||||
bo->flags = flags;
|
||||
/* PCI GART is always snooped */
|
||||
if (!(rdev->flags & RADEON_IS_PCIE))
|
||||
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
|
||||
|
||||
radeon_ttm_placement_from_domain(bo, domain);
|
||||
/* Kernel allocation are uninterruptible */
|
||||
down_read(&rdev->pm.mclk_lock);
|
||||
@ -232,6 +236,15 @@ void radeon_bo_kunmap(struct radeon_bo *bo)
|
||||
ttm_bo_kunmap(&bo->kmap);
|
||||
}
|
||||
|
||||
struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
|
||||
{
|
||||
if (bo == NULL)
|
||||
return NULL;
|
||||
|
||||
ttm_bo_reference(&bo->tbo);
|
||||
return bo;
|
||||
}
|
||||
|
||||
void radeon_bo_unref(struct radeon_bo **bo)
|
||||
{
|
||||
struct ttm_buffer_object *tbo;
|
||||
@ -241,9 +254,7 @@ void radeon_bo_unref(struct radeon_bo **bo)
|
||||
return;
|
||||
rdev = (*bo)->rdev;
|
||||
tbo = &((*bo)->tbo);
|
||||
down_read(&rdev->pm.mclk_lock);
|
||||
ttm_bo_unref(&tbo);
|
||||
up_read(&rdev->pm.mclk_lock);
|
||||
if (tbo == NULL)
|
||||
*bo = NULL;
|
||||
}
|
||||
@ -292,9 +303,13 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
|
||||
bo->pin_count = 1;
|
||||
if (gpu_addr != NULL)
|
||||
*gpu_addr = radeon_bo_gpu_offset(bo);
|
||||
}
|
||||
if (unlikely(r != 0))
|
||||
if (domain == RADEON_GEM_DOMAIN_VRAM)
|
||||
bo->rdev->vram_pin_size += radeon_bo_size(bo);
|
||||
else
|
||||
bo->rdev->gart_pin_size += radeon_bo_size(bo);
|
||||
} else {
|
||||
dev_err(bo->rdev->dev, "%p pin failed\n", bo);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -317,8 +332,14 @@ int radeon_bo_unpin(struct radeon_bo *bo)
|
||||
for (i = 0; i < bo->placement.num_placement; i++)
|
||||
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (unlikely(r != 0))
|
||||
if (likely(r == 0)) {
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
bo->rdev->vram_pin_size -= radeon_bo_size(bo);
|
||||
else
|
||||
bo->rdev->gart_pin_size -= radeon_bo_size(bo);
|
||||
} else {
|
||||
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -124,11 +124,12 @@ extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
|
||||
|
||||
extern int radeon_bo_create(struct radeon_device *rdev,
|
||||
unsigned long size, int byte_align,
|
||||
bool kernel, u32 domain,
|
||||
bool kernel, u32 domain, u32 flags,
|
||||
struct sg_table *sg,
|
||||
struct radeon_bo **bo_ptr);
|
||||
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
|
||||
extern void radeon_bo_kunmap(struct radeon_bo *bo);
|
||||
extern struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo);
|
||||
extern void radeon_bo_unref(struct radeon_bo **bo);
|
||||
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
|
||||
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
|
||||
@ -170,7 +171,8 @@ static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
|
||||
|
||||
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
|
||||
struct radeon_sa_manager *sa_manager,
|
||||
unsigned size, u32 align, u32 domain);
|
||||
unsigned size, u32 align, u32 domain,
|
||||
u32 flags);
|
||||
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
|
||||
struct radeon_sa_manager *sa_manager);
|
||||
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
|
||||
|
@ -1303,10 +1303,6 @@ int radeon_pm_init(struct radeon_device *rdev)
|
||||
case CHIP_RS780:
|
||||
case CHIP_RS880:
|
||||
case CHIP_RV770:
|
||||
case CHIP_BARTS:
|
||||
case CHIP_TURKS:
|
||||
case CHIP_CAICOS:
|
||||
case CHIP_CAYMAN:
|
||||
/* DPM requires the RLC, RV770+ dGPU requires SMC */
|
||||
if (!rdev->rlc_fw)
|
||||
rdev->pm.pm_method = PM_METHOD_PROFILE;
|
||||
@ -1330,6 +1326,10 @@ int radeon_pm_init(struct radeon_device *rdev)
|
||||
case CHIP_PALM:
|
||||
case CHIP_SUMO:
|
||||
case CHIP_SUMO2:
|
||||
case CHIP_BARTS:
|
||||
case CHIP_TURKS:
|
||||
case CHIP_CAICOS:
|
||||
case CHIP_CAYMAN:
|
||||
case CHIP_ARUBA:
|
||||
case CHIP_TAHITI:
|
||||
case CHIP_PITCAIRN:
|
||||
@ -1400,9 +1400,7 @@ static void radeon_pm_fini_old(struct radeon_device *rdev)
|
||||
}
|
||||
|
||||
radeon_hwmon_fini(rdev);
|
||||
|
||||
if (rdev->pm.power_state)
|
||||
kfree(rdev->pm.power_state);
|
||||
kfree(rdev->pm.power_state);
|
||||
}
|
||||
|
||||
static void radeon_pm_fini_dpm(struct radeon_device *rdev)
|
||||
@ -1421,9 +1419,7 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
|
||||
radeon_dpm_fini(rdev);
|
||||
|
||||
radeon_hwmon_fini(rdev);
|
||||
|
||||
if (rdev->pm.power_state)
|
||||
kfree(rdev->pm.power_state);
|
||||
kfree(rdev->pm.power_state);
|
||||
}
|
||||
|
||||
void radeon_pm_fini(struct radeon_device *rdev)
|
||||
|
@ -65,7 +65,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
int ret;
|
||||
|
||||
ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
|
||||
RADEON_GEM_DOMAIN_GTT, sg, &bo);
|
||||
RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
@ -26,258 +26,8 @@
|
||||
* Jerome Glisse
|
||||
* Christian König
|
||||
*/
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include "radeon_reg.h"
|
||||
#include "radeon.h"
|
||||
#include "atom.h"
|
||||
|
||||
/*
|
||||
* IB
|
||||
* IBs (Indirect Buffers) and areas of GPU accessible memory where
|
||||
* commands are stored. You can put a pointer to the IB in the
|
||||
* command ring and the hw will fetch the commands from the IB
|
||||
* and execute them. Generally userspace acceleration drivers
|
||||
* produce command buffers which are send to the kernel and
|
||||
* put in IBs for execution by the requested ring.
|
||||
*/
|
||||
static int radeon_debugfs_sa_init(struct radeon_device *rdev);
|
||||
|
||||
/**
|
||||
* radeon_ib_get - request an IB (Indirect Buffer)
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ring: ring index the IB is associated with
|
||||
* @ib: IB object returned
|
||||
* @size: requested IB size
|
||||
*
|
||||
* Request an IB (all asics). IBs are allocated using the
|
||||
* suballocator.
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int radeon_ib_get(struct radeon_device *rdev, int ring,
|
||||
struct radeon_ib *ib, struct radeon_vm *vm,
|
||||
unsigned size)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_semaphore_create(rdev, &ib->semaphore);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
ib->ring = ring;
|
||||
ib->fence = NULL;
|
||||
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
|
||||
ib->vm = vm;
|
||||
if (vm) {
|
||||
/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
|
||||
* space and soffset is the offset inside the pool bo
|
||||
*/
|
||||
ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
|
||||
} else {
|
||||
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
|
||||
}
|
||||
ib->is_const_ib = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_free - free an IB (Indirect Buffer)
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to free
|
||||
*
|
||||
* Free an IB (all asics).
|
||||
*/
|
||||
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
|
||||
{
|
||||
radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
|
||||
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
|
||||
radeon_fence_unref(&ib->fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: IB object to schedule
|
||||
* @const_ib: Const IB to schedule (SI only)
|
||||
*
|
||||
* Schedule an IB on the associated ring (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*
|
||||
* On SI, there are two parallel engines fed from the primary ring,
|
||||
* the CE (Constant Engine) and the DE (Drawing Engine). Since
|
||||
* resource descriptors have moved to memory, the CE allows you to
|
||||
* prime the caches while the DE is updating register state so that
|
||||
* the resource descriptors will be already in cache when the draw is
|
||||
* processed. To accomplish this, the userspace driver submits two
|
||||
* IBs, one for the CE and one for the DE. If there is a CE IB (called
|
||||
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior
|
||||
* to SI there was just a DE IB.
|
||||
*/
|
||||
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
|
||||
struct radeon_ib *const_ib)
|
||||
{
|
||||
struct radeon_ring *ring = &rdev->ring[ib->ring];
|
||||
int r = 0;
|
||||
|
||||
if (!ib->length_dw || !ring->ready) {
|
||||
/* TODO: Nothings in the ib we should report. */
|
||||
dev_err(rdev->dev, "couldn't schedule ib\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* 64 dwords should be enough for fence too */
|
||||
r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
|
||||
/* grab a vm id if necessary */
|
||||
if (ib->vm) {
|
||||
struct radeon_fence *vm_id_fence;
|
||||
vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
|
||||
radeon_semaphore_sync_to(ib->semaphore, vm_id_fence);
|
||||
}
|
||||
|
||||
/* sync with other rings */
|
||||
r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
|
||||
if (ib->vm)
|
||||
radeon_vm_flush(rdev, ib->vm, ib->ring);
|
||||
|
||||
if (const_ib) {
|
||||
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
|
||||
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
|
||||
}
|
||||
radeon_ring_ib_execute(rdev, ib->ring, ib);
|
||||
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
|
||||
radeon_ring_unlock_undo(rdev, ring);
|
||||
return r;
|
||||
}
|
||||
if (const_ib) {
|
||||
const_ib->fence = radeon_fence_ref(ib->fence);
|
||||
}
|
||||
|
||||
if (ib->vm)
|
||||
radeon_vm_fence(rdev, ib->vm, ib->fence);
|
||||
|
||||
radeon_ring_unlock_commit(rdev, ring);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Initialize the suballocator to manage a pool of memory
|
||||
* for use as IBs (all asics).
|
||||
* Returns 0 on success, error on failure.
|
||||
*/
|
||||
int radeon_ib_pool_init(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (rdev->ib_pool_ready) {
|
||||
return 0;
|
||||
}
|
||||
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
|
||||
RADEON_IB_POOL_SIZE*64*1024,
|
||||
RADEON_GPU_PAGE_SIZE,
|
||||
RADEON_GEM_DOMAIN_GTT);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
rdev->ib_pool_ready = true;
|
||||
if (radeon_debugfs_sa_init(rdev)) {
|
||||
dev_err(rdev->dev, "failed to register debugfs file for SA\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Tear down the suballocator managing the pool of memory
|
||||
* for use as IBs (all asics).
|
||||
*/
|
||||
void radeon_ib_pool_fini(struct radeon_device *rdev)
|
||||
{
|
||||
if (rdev->ib_pool_ready) {
|
||||
radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
|
||||
radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
|
||||
rdev->ib_pool_ready = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_ib_ring_tests - test IBs on the rings
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
*
|
||||
* Test an IB (Indirect Buffer) on each ring.
|
||||
* If the test fails, disable the ring.
|
||||
* Returns 0 on success, error if the primary GFX ring
|
||||
* IB test fails.
|
||||
*/
|
||||
int radeon_ib_ring_tests(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
|
||||
struct radeon_ring *ring = &rdev->ring[i];
|
||||
|
||||
if (!ring->ready)
|
||||
continue;
|
||||
|
||||
r = radeon_ib_test(rdev, i, ring);
|
||||
if (r) {
|
||||
ring->ready = false;
|
||||
rdev->needs_reset = false;
|
||||
|
||||
if (i == RADEON_RING_TYPE_GFX_INDEX) {
|
||||
/* oh, oh, that's really bad */
|
||||
DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
|
||||
rdev->accel_working = false;
|
||||
return r;
|
||||
|
||||
} else {
|
||||
/* still not good, but we can live with it */
|
||||
DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Rings
|
||||
@ -433,11 +183,21 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig
|
||||
*/
|
||||
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
/* If we are emitting the HDP flush via the ring buffer, we need to
|
||||
* do it before padding.
|
||||
*/
|
||||
if (rdev->asic->ring[ring->idx]->hdp_flush)
|
||||
rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);
|
||||
/* We pad to match fetch size */
|
||||
while (ring->wptr & ring->align_mask) {
|
||||
radeon_ring_write(ring, ring->nop);
|
||||
}
|
||||
mb();
|
||||
/* If we are emitting the HDP flush via MMIO, we need to do it after
|
||||
* all CPU writes to VRAM finished.
|
||||
*/
|
||||
if (rdev->asic->mmio_hdp_flush)
|
||||
rdev->asic->mmio_hdp_flush(rdev);
|
||||
radeon_ring_set_wptr(rdev, ring);
|
||||
}
|
||||
|
||||
@ -641,6 +401,8 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
|
||||
if (ring->ring_obj == NULL) {
|
||||
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT,
|
||||
(rdev->flags & RADEON_IS_PCIE) ?
|
||||
RADEON_GEM_GTT_WC : 0,
|
||||
NULL, &ring->ring_obj);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) ring create failed\n", r);
|
||||
@ -791,22 +553,6 @@ static struct drm_info_list radeon_debugfs_ring_info_list[] = {
|
||||
{"radeon_ring_vce2", radeon_debugfs_ring_info, 0, &si_vce2_index},
|
||||
};
|
||||
|
||||
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
static struct drm_info_list radeon_debugfs_sa_list[] = {
|
||||
{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
@ -828,12 +574,3 @@ static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ri
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_debugfs_sa_init(struct radeon_device *rdev)
|
||||
{
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
|
||||
|
||||
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
|
||||
struct radeon_sa_manager *sa_manager,
|
||||
unsigned size, u32 align, u32 domain)
|
||||
unsigned size, u32 align, u32 domain, u32 flags)
|
||||
{
|
||||
int i, r;
|
||||
|
||||
@ -65,7 +65,7 @@ int radeon_sa_bo_manager_init(struct radeon_device *rdev,
|
||||
}
|
||||
|
||||
r = radeon_bo_create(rdev, size, align, true,
|
||||
domain, NULL, &sa_manager->bo);
|
||||
domain, flags, NULL, &sa_manager->bo);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
|
||||
return r;
|
||||
|
@ -56,13 +56,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
|
||||
/* Number of tests =
|
||||
* (Total GTT - IB pool - writeback page - ring buffers) / test size
|
||||
*/
|
||||
n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
|
||||
for (i = 0; i < RADEON_NUM_RINGS; ++i)
|
||||
n -= rdev->ring[i].ring_size;
|
||||
if (rdev->wb.wb_obj)
|
||||
n -= RADEON_GPU_PAGE_SIZE;
|
||||
if (rdev->ih.ring_obj)
|
||||
n -= rdev->ih.ring_size;
|
||||
n = rdev->mc.gtt_size - rdev->gart_pin_size;
|
||||
n /= size;
|
||||
|
||||
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
|
||||
@ -73,7 +67,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
|
||||
}
|
||||
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
NULL, &vram_obj);
|
||||
0, NULL, &vram_obj);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create VRAM object\n");
|
||||
goto out_cleanup;
|
||||
@ -93,7 +87,7 @@ static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
|
||||
struct radeon_fence *fence = NULL;
|
||||
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_GTT, NULL, gtt_obj + i);
|
||||
RADEON_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create GTT object %d\n", i);
|
||||
goto out_lclean;
|
||||
|
@ -72,8 +72,8 @@ TRACE_EVENT(radeon_vm_bo_update,
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->soffset = bo_va->soffset;
|
||||
__entry->eoffset = bo_va->eoffset;
|
||||
__entry->soffset = bo_va->it.start;
|
||||
__entry->eoffset = bo_va->it.last + 1;
|
||||
__entry->flags = bo_va->flags;
|
||||
),
|
||||
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
|
||||
@ -104,6 +104,24 @@ TRACE_EVENT(radeon_vm_set_page,
|
||||
__entry->flags, __entry->count)
|
||||
);
|
||||
|
||||
TRACE_EVENT(radeon_vm_flush,
|
||||
TP_PROTO(uint64_t pd_addr, unsigned ring, unsigned id),
|
||||
TP_ARGS(pd_addr, ring, id),
|
||||
TP_STRUCT__entry(
|
||||
__field(u64, pd_addr)
|
||||
__field(u32, ring)
|
||||
__field(u32, id)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pd_addr = pd_addr;
|
||||
__entry->ring = ring;
|
||||
__entry->id = id;
|
||||
),
|
||||
TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
|
||||
__entry->pd_addr, __entry->ring, __entry->id)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(radeon_fence_request,
|
||||
|
||||
TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
|
||||
|
@ -521,6 +521,8 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct radeon_ttm_tt *gtt = (void*)ttm;
|
||||
uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
|
||||
RADEON_GART_PAGE_WRITE;
|
||||
int r;
|
||||
|
||||
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
|
||||
@ -528,8 +530,10 @@ static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
|
||||
WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
|
||||
ttm->num_pages, bo_mem, ttm);
|
||||
}
|
||||
r = radeon_gart_bind(gtt->rdev, gtt->offset,
|
||||
ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
|
||||
if (ttm->caching_state == tt_cached)
|
||||
flags |= RADEON_GART_PAGE_SNOOP;
|
||||
r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
|
||||
ttm->pages, gtt->ttm.dma_address, flags);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
|
||||
ttm->num_pages, (unsigned)gtt->offset);
|
||||
@ -726,7 +730,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
|
||||
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM,
|
||||
RADEON_GEM_DOMAIN_VRAM, 0,
|
||||
NULL, &rdev->stollen_vga_memory);
|
||||
if (r) {
|
||||
return r;
|
||||
|
167
drivers/gpu/drm/radeon/radeon_ucode.c
Normal file
167
drivers/gpu/drm/radeon/radeon_ucode.c
Normal file
@ -0,0 +1,167 @@
|
||||
/*
|
||||
* Copyright 2014 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <drm/drmP.h>
|
||||
#include "radeon.h"
|
||||
#include "radeon_ucode.h"
|
||||
|
||||
static void radeon_ucode_print_common_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
|
||||
DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
|
||||
DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
|
||||
DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
|
||||
DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
|
||||
DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
|
||||
DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
|
||||
DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
|
||||
DRM_DEBUG("ucode_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(hdr->ucode_array_offset_bytes));
|
||||
DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
|
||||
}
|
||||
|
||||
void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("MC\n");
|
||||
radeon_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct mc_firmware_header_v1_0 *mc_hdr =
|
||||
container_of(hdr, struct mc_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("io_debug_size_bytes: %u\n",
|
||||
le32_to_cpu(mc_hdr->io_debug_size_bytes));
|
||||
DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
|
||||
le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
|
||||
} else {
|
||||
DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("SMC\n");
|
||||
radeon_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct smc_firmware_header_v1_0 *smc_hdr =
|
||||
container_of(hdr, struct smc_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
|
||||
} else {
|
||||
DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("GFX\n");
|
||||
radeon_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct gfx_firmware_header_v1_0 *gfx_hdr =
|
||||
container_of(hdr, struct gfx_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(gfx_hdr->ucode_feature_version));
|
||||
DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
|
||||
DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
|
||||
} else {
|
||||
DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("RLC\n");
|
||||
radeon_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct rlc_firmware_header_v1_0 *rlc_hdr =
|
||||
container_of(hdr, struct rlc_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(rlc_hdr->ucode_feature_version));
|
||||
DRM_DEBUG("save_and_restore_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->save_and_restore_offset));
|
||||
DRM_DEBUG("clear_state_descriptor_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
|
||||
DRM_DEBUG("avail_scratch_ram_locations: %u\n",
|
||||
le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
|
||||
DRM_DEBUG("master_pkt_description_offset: %u\n",
|
||||
le32_to_cpu(rlc_hdr->master_pkt_description_offset));
|
||||
} else {
|
||||
DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
|
||||
{
|
||||
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
|
||||
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
|
||||
|
||||
DRM_DEBUG("SDMA\n");
|
||||
radeon_ucode_print_common_hdr(hdr);
|
||||
|
||||
if (version_major == 1) {
|
||||
const struct sdma_firmware_header_v1_0 *sdma_hdr =
|
||||
container_of(hdr, struct sdma_firmware_header_v1_0, header);
|
||||
|
||||
DRM_DEBUG("ucode_feature_version: %u\n",
|
||||
le32_to_cpu(sdma_hdr->ucode_feature_version));
|
||||
DRM_DEBUG("ucode_change_version: %u\n",
|
||||
le32_to_cpu(sdma_hdr->ucode_change_version));
|
||||
DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
|
||||
DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
|
||||
} else {
|
||||
DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
|
||||
version_major, version_minor);
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_ucode_validate(const struct firmware *fw)
|
||||
{
|
||||
const struct common_firmware_header *hdr =
|
||||
(const struct common_firmware_header *)fw->data;
|
||||
|
||||
if (fw->size == le32_to_cpu(hdr->size_bytes))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -153,4 +153,75 @@
|
||||
#define HAWAII_SMC_UCODE_START 0x20000
|
||||
#define HAWAII_SMC_UCODE_SIZE 0x1FDEC
|
||||
|
||||
struct common_firmware_header {
|
||||
uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
|
||||
uint32_t header_size_bytes; /* size of just the header in bytes */
|
||||
uint16_t header_version_major; /* header version */
|
||||
uint16_t header_version_minor; /* header version */
|
||||
uint16_t ip_version_major; /* IP version */
|
||||
uint16_t ip_version_minor; /* IP version */
|
||||
uint32_t ucode_version;
|
||||
uint32_t ucode_size_bytes; /* size of ucode in bytes */
|
||||
uint32_t ucode_array_offset_bytes; /* payload offset from the start of the header */
|
||||
uint32_t crc32; /* crc32 checksum of the payload */
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct mc_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t io_debug_size_bytes; /* size of debug array in dwords */
|
||||
uint32_t io_debug_array_offset_bytes; /* payload offset from the start of the header */
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct smc_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_start_addr;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct gfx_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_feature_version;
|
||||
uint32_t jt_offset; /* jt location */
|
||||
uint32_t jt_size; /* size of jt */
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct rlc_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_feature_version;
|
||||
uint32_t save_and_restore_offset;
|
||||
uint32_t clear_state_descriptor_offset;
|
||||
uint32_t avail_scratch_ram_locations;
|
||||
uint32_t master_pkt_description_offset;
|
||||
};
|
||||
|
||||
/* version_major=1, version_minor=0 */
|
||||
struct sdma_firmware_header_v1_0 {
|
||||
struct common_firmware_header header;
|
||||
uint32_t ucode_feature_version;
|
||||
uint32_t ucode_change_version;
|
||||
uint32_t jt_offset; /* jt location */
|
||||
uint32_t jt_size; /* size of jt */
|
||||
};
|
||||
|
||||
/* header is fixed size */
|
||||
union radeon_firmware_header {
|
||||
struct common_firmware_header common;
|
||||
struct mc_firmware_header_v1_0 mc;
|
||||
struct smc_firmware_header_v1_0 smc;
|
||||
struct gfx_firmware_header_v1_0 gfx;
|
||||
struct rlc_firmware_header_v1_0 rlc;
|
||||
struct sdma_firmware_header_v1_0 sdma;
|
||||
uint8_t raw[0x100];
|
||||
};
|
||||
|
||||
void radeon_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
|
||||
void radeon_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
|
||||
void radeon_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
|
||||
void radeon_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
|
||||
void radeon_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
|
||||
int radeon_ucode_validate(const struct firmware *fw);
|
||||
|
||||
#endif
|
||||
|
@ -117,7 +117,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
|
||||
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
|
||||
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
|
||||
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->uvd.vcpu_bo);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
|
||||
return r;
|
||||
@ -674,7 +674,7 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
|
||||
int r, i;
|
||||
|
||||
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -720,7 +720,7 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
|
||||
int r, i;
|
||||
|
||||
r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &bo);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
|
||||
size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
|
||||
RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
|
||||
r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo);
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &rdev->vce.vcpu_bo);
|
||||
if (r) {
|
||||
dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
|
||||
return r;
|
||||
|
@ -238,8 +238,8 @@ void radeon_vm_flush(struct radeon_device *rdev,
|
||||
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
|
||||
|
||||
/* if we can't remember our last VM flush then flush now! */
|
||||
/* XXX figure out why we have to flush all the time */
|
||||
if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
|
||||
if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
|
||||
trace_radeon_vm_flush(pd_addr, ring, vm->id);
|
||||
vm->pd_gpu_addr = pd_addr;
|
||||
radeon_ring_vm_flush(rdev, ring, vm);
|
||||
}
|
||||
@ -325,23 +325,57 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
|
||||
}
|
||||
bo_va->vm = vm;
|
||||
bo_va->bo = bo;
|
||||
bo_va->soffset = 0;
|
||||
bo_va->eoffset = 0;
|
||||
bo_va->it.start = 0;
|
||||
bo_va->it.last = 0;
|
||||
bo_va->flags = 0;
|
||||
bo_va->valid = false;
|
||||
bo_va->addr = 0;
|
||||
bo_va->ref_count = 1;
|
||||
INIT_LIST_HEAD(&bo_va->bo_list);
|
||||
INIT_LIST_HEAD(&bo_va->vm_list);
|
||||
INIT_LIST_HEAD(&bo_va->vm_status);
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
list_add(&bo_va->vm_list, &vm->va);
|
||||
list_add_tail(&bo_va->bo_list, &bo->va);
|
||||
mutex_unlock(&vm->mutex);
|
||||
|
||||
return bo_va;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_set_pages - helper to call the right asic function
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: hw access flags
|
||||
*
|
||||
* Traces the parameters and calls the right asic functions
|
||||
* to setup the page table using the DMA.
|
||||
*/
|
||||
static void radeon_vm_set_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
||||
|
||||
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
|
||||
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
|
||||
radeon_asic_vm_copy_pages(rdev, ib, pe, src, count);
|
||||
|
||||
} else if ((flags & R600_PTE_SYSTEM) || (count < 3)) {
|
||||
radeon_asic_vm_write_pages(rdev, ib, pe, addr,
|
||||
count, incr, flags);
|
||||
|
||||
} else {
|
||||
radeon_asic_vm_set_pages(rdev, ib, pe, addr,
|
||||
count, incr, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_clear_bo - initially clear the page dir/table
|
||||
*
|
||||
@ -376,14 +410,15 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
|
||||
addr = radeon_bo_gpu_offset(bo);
|
||||
entries = radeon_bo_size(bo) / 8;
|
||||
|
||||
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
|
||||
NULL, entries * 2 + 64);
|
||||
r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
ib.length_dw = 0;
|
||||
|
||||
radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
|
||||
radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0);
|
||||
radeon_asic_vm_pad_ib(rdev, &ib);
|
||||
WARN_ON(ib.length_dw > 64);
|
||||
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r)
|
||||
@ -419,11 +454,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
|
||||
uint32_t flags)
|
||||
{
|
||||
uint64_t size = radeon_bo_size(bo_va->bo);
|
||||
uint64_t eoffset, last_offset = 0;
|
||||
struct radeon_vm *vm = bo_va->vm;
|
||||
struct radeon_bo_va *tmp;
|
||||
struct list_head *head;
|
||||
unsigned last_pfn, pt_idx;
|
||||
uint64_t eoffset;
|
||||
int r;
|
||||
|
||||
if (soffset) {
|
||||
@ -445,51 +478,49 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
|
||||
}
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
head = &vm->va;
|
||||
last_offset = 0;
|
||||
list_for_each_entry(tmp, &vm->va, vm_list) {
|
||||
if (bo_va == tmp) {
|
||||
/* skip over currently modified bo */
|
||||
continue;
|
||||
if (bo_va->it.start || bo_va->it.last) {
|
||||
if (bo_va->addr) {
|
||||
/* add a clone of the bo_va to clear the old address */
|
||||
struct radeon_bo_va *tmp;
|
||||
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
|
||||
tmp->it.start = bo_va->it.start;
|
||||
tmp->it.last = bo_va->it.last;
|
||||
tmp->vm = vm;
|
||||
tmp->addr = bo_va->addr;
|
||||
tmp->bo = radeon_bo_ref(bo_va->bo);
|
||||
list_add(&tmp->vm_status, &vm->freed);
|
||||
}
|
||||
|
||||
if (soffset >= last_offset && eoffset <= tmp->soffset) {
|
||||
/* bo can be added before this one */
|
||||
break;
|
||||
}
|
||||
if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
|
||||
interval_tree_remove(&bo_va->it, &vm->va);
|
||||
bo_va->it.start = 0;
|
||||
bo_va->it.last = 0;
|
||||
}
|
||||
|
||||
soffset /= RADEON_GPU_PAGE_SIZE;
|
||||
eoffset /= RADEON_GPU_PAGE_SIZE;
|
||||
if (soffset || eoffset) {
|
||||
struct interval_tree_node *it;
|
||||
it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
|
||||
if (it) {
|
||||
struct radeon_bo_va *tmp;
|
||||
tmp = container_of(it, struct radeon_bo_va, it);
|
||||
/* bo and tmp overlap, invalid offset */
|
||||
dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
|
||||
bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
|
||||
(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
|
||||
dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
|
||||
"(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
|
||||
soffset, tmp->bo, tmp->it.start, tmp->it.last);
|
||||
mutex_unlock(&vm->mutex);
|
||||
return -EINVAL;
|
||||
}
|
||||
last_offset = tmp->eoffset;
|
||||
head = &tmp->vm_list;
|
||||
bo_va->it.start = soffset;
|
||||
bo_va->it.last = eoffset - 1;
|
||||
interval_tree_insert(&bo_va->it, &vm->va);
|
||||
}
|
||||
|
||||
if (bo_va->soffset) {
|
||||
/* add a clone of the bo_va to clear the old address */
|
||||
tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
|
||||
if (!tmp) {
|
||||
mutex_unlock(&vm->mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
tmp->soffset = bo_va->soffset;
|
||||
tmp->eoffset = bo_va->eoffset;
|
||||
tmp->vm = vm;
|
||||
list_add(&tmp->vm_status, &vm->freed);
|
||||
}
|
||||
|
||||
bo_va->soffset = soffset;
|
||||
bo_va->eoffset = eoffset;
|
||||
bo_va->flags = flags;
|
||||
bo_va->valid = false;
|
||||
list_move(&bo_va->vm_list, head);
|
||||
bo_va->addr = 0;
|
||||
|
||||
soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
|
||||
eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
|
||||
soffset >>= radeon_vm_block_size;
|
||||
eoffset >>= radeon_vm_block_size;
|
||||
|
||||
BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
|
||||
|
||||
@ -510,7 +541,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
|
||||
|
||||
r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
|
||||
RADEON_GPU_PAGE_SIZE, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -611,7 +642,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
|
||||
ndw = 64;
|
||||
|
||||
/* assume the worst case */
|
||||
ndw += vm->max_pde_used * 16;
|
||||
ndw += vm->max_pde_used * 6;
|
||||
|
||||
/* update too big for an IB */
|
||||
if (ndw > 0xfffff)
|
||||
@ -640,9 +671,9 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
|
||||
((last_pt + incr * count) != pt)) {
|
||||
|
||||
if (count) {
|
||||
radeon_asic_vm_set_page(rdev, &ib, last_pde,
|
||||
last_pt, count, incr,
|
||||
R600_PTE_VALID);
|
||||
radeon_vm_set_pages(rdev, &ib, last_pde,
|
||||
last_pt, count, incr,
|
||||
R600_PTE_VALID);
|
||||
}
|
||||
|
||||
count = 1;
|
||||
@ -654,12 +685,14 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
|
||||
}
|
||||
|
||||
if (count)
|
||||
radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
|
||||
incr, R600_PTE_VALID);
|
||||
radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count,
|
||||
incr, R600_PTE_VALID);
|
||||
|
||||
if (ib.length_dw != 0) {
|
||||
radeon_asic_vm_pad_ib(rdev, &ib);
|
||||
radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
|
||||
radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
|
||||
WARN_ON(ib.length_dw > ndw);
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
@ -725,30 +758,30 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
|
||||
(frag_start >= frag_end)) {
|
||||
|
||||
count = (pe_end - pe_start) / 8;
|
||||
radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags);
|
||||
radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* handle the 4K area at the beginning */
|
||||
if (pe_start != frag_start) {
|
||||
count = (frag_start - pe_start) / 8;
|
||||
radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags);
|
||||
radeon_vm_set_pages(rdev, ib, pe_start, addr, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags);
|
||||
addr += RADEON_GPU_PAGE_SIZE * count;
|
||||
}
|
||||
|
||||
/* handle the area in the middle */
|
||||
count = (frag_end - frag_start) / 8;
|
||||
radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags | frag_flags);
|
||||
radeon_vm_set_pages(rdev, ib, frag_start, addr, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags | frag_flags);
|
||||
|
||||
/* handle the 4K area at the end */
|
||||
if (frag_end != pe_end) {
|
||||
addr += RADEON_GPU_PAGE_SIZE * count;
|
||||
count = (pe_end - frag_end) / 8;
|
||||
radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags);
|
||||
radeon_vm_set_pages(rdev, ib, frag_end, addr, count,
|
||||
RADEON_GPU_PAGE_SIZE, flags);
|
||||
}
|
||||
}
|
||||
|
||||
@ -777,9 +810,6 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
|
||||
unsigned count = 0;
|
||||
uint64_t addr;
|
||||
|
||||
start = start / RADEON_GPU_PAGE_SIZE;
|
||||
end = end / RADEON_GPU_PAGE_SIZE;
|
||||
|
||||
/* walk over the address space and update the page tables */
|
||||
for (addr = start; addr < end; ) {
|
||||
uint64_t pt_idx = addr >> radeon_vm_block_size;
|
||||
@ -842,55 +872,73 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
|
||||
{
|
||||
struct radeon_vm *vm = bo_va->vm;
|
||||
struct radeon_ib ib;
|
||||
unsigned nptes, ndw;
|
||||
unsigned nptes, ncmds, ndw;
|
||||
uint64_t addr;
|
||||
uint32_t flags;
|
||||
int r;
|
||||
|
||||
|
||||
if (!bo_va->soffset) {
|
||||
if (!bo_va->it.start) {
|
||||
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
|
||||
bo_va->bo, vm);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
|
||||
return 0;
|
||||
list_del_init(&bo_va->vm_status);
|
||||
|
||||
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
|
||||
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
|
||||
bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
|
||||
if (mem) {
|
||||
addr = mem->start << PAGE_SHIFT;
|
||||
if (mem->mem_type != TTM_PL_SYSTEM) {
|
||||
bo_va->flags |= RADEON_VM_PAGE_VALID;
|
||||
bo_va->valid = true;
|
||||
}
|
||||
if (mem->mem_type == TTM_PL_TT) {
|
||||
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
|
||||
if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
|
||||
bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
|
||||
|
||||
} else {
|
||||
addr += rdev->vm_manager.vram_base_offset;
|
||||
}
|
||||
} else {
|
||||
addr = 0;
|
||||
bo_va->valid = false;
|
||||
}
|
||||
|
||||
if (addr == bo_va->addr)
|
||||
return 0;
|
||||
bo_va->addr = addr;
|
||||
|
||||
trace_radeon_vm_bo_update(bo_va);
|
||||
|
||||
nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
|
||||
nptes = bo_va->it.last - bo_va->it.start + 1;
|
||||
|
||||
/* reserve space for one command every (1 << BLOCK_SIZE) entries
|
||||
or 2k dwords (whatever is smaller) */
|
||||
ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1;
|
||||
|
||||
/* padding, etc. */
|
||||
ndw = 64;
|
||||
|
||||
if (radeon_vm_block_size > 11)
|
||||
/* reserve space for one header for every 2k dwords */
|
||||
ndw += (nptes >> 11) * 4;
|
||||
else
|
||||
/* reserve space for one header for
|
||||
every (1 << BLOCK_SIZE) entries */
|
||||
ndw += (nptes >> radeon_vm_block_size) * 4;
|
||||
flags = radeon_vm_page_flags(bo_va->flags);
|
||||
if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) {
|
||||
/* only copy commands needed */
|
||||
ndw += ncmds * 7;
|
||||
|
||||
/* reserve space for pte addresses */
|
||||
ndw += nptes * 2;
|
||||
} else if (flags & R600_PTE_SYSTEM) {
|
||||
/* header for write data commands */
|
||||
ndw += ncmds * 4;
|
||||
|
||||
/* body of write data command */
|
||||
ndw += nptes * 2;
|
||||
|
||||
} else {
|
||||
/* set page commands needed */
|
||||
ndw += ncmds * 10;
|
||||
|
||||
/* two extra commands for begin/end of fragment */
|
||||
ndw += 2 * 10;
|
||||
}
|
||||
|
||||
/* update too big for an IB */
|
||||
if (ndw > 0xfffff)
|
||||
@ -901,8 +949,12 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
|
||||
return r;
|
||||
ib.length_dw = 0;
|
||||
|
||||
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
|
||||
addr, radeon_vm_page_flags(bo_va->flags));
|
||||
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
|
||||
bo_va->it.last + 1, addr,
|
||||
radeon_vm_page_flags(bo_va->flags));
|
||||
|
||||
radeon_asic_vm_pad_ib(rdev, &ib);
|
||||
WARN_ON(ib.length_dw > ndw);
|
||||
|
||||
radeon_semaphore_sync_to(ib.semaphore, vm->fence);
|
||||
r = radeon_ib_schedule(rdev, &ib, NULL);
|
||||
@ -936,8 +988,8 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
|
||||
int r;
|
||||
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
|
||||
list_del(&bo_va->vm_status);
|
||||
r = radeon_vm_bo_update(rdev, bo_va, NULL);
|
||||
radeon_bo_unref(&bo_va->bo);
|
||||
kfree(bo_va);
|
||||
if (r)
|
||||
return r;
|
||||
@ -946,6 +998,31 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_clear_invalids - clear invalidated BOs in the PT
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @vm: requested vm
|
||||
*
|
||||
* Make sure all invalidated BOs are cleared in the PT.
|
||||
* Returns 0 for success.
|
||||
*
|
||||
* PTs have to be reserved and mutex must be locked!
|
||||
*/
|
||||
int radeon_vm_clear_invalids(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_bo_va *bo_va, *tmp;
|
||||
int r;
|
||||
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
|
||||
r = radeon_vm_bo_update(rdev, bo_va, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_vm_bo_rmv - remove a bo to a specific vm
|
||||
*
|
||||
@ -964,10 +1041,11 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
|
||||
list_del(&bo_va->bo_list);
|
||||
|
||||
mutex_lock(&vm->mutex);
|
||||
list_del(&bo_va->vm_list);
|
||||
interval_tree_remove(&bo_va->it, &vm->va);
|
||||
list_del(&bo_va->vm_status);
|
||||
|
||||
if (bo_va->soffset) {
|
||||
bo_va->bo = NULL;
|
||||
if (bo_va->addr) {
|
||||
bo_va->bo = radeon_bo_ref(bo_va->bo);
|
||||
list_add(&bo_va->vm_status, &vm->freed);
|
||||
} else {
|
||||
kfree(bo_va);
|
||||
@ -991,7 +1069,12 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
|
||||
struct radeon_bo_va *bo_va;
|
||||
|
||||
list_for_each_entry(bo_va, &bo->va, bo_list) {
|
||||
bo_va->valid = false;
|
||||
if (bo_va->addr) {
|
||||
mutex_lock(&bo_va->vm->mutex);
|
||||
list_del(&bo_va->vm_status);
|
||||
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
|
||||
mutex_unlock(&bo_va->vm->mutex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1016,7 +1099,8 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
vm->last_flush = NULL;
|
||||
vm->last_id_use = NULL;
|
||||
mutex_init(&vm->mutex);
|
||||
INIT_LIST_HEAD(&vm->va);
|
||||
vm->va = RB_ROOT;
|
||||
INIT_LIST_HEAD(&vm->invalidated);
|
||||
INIT_LIST_HEAD(&vm->freed);
|
||||
|
||||
pd_size = radeon_vm_directory_size(rdev);
|
||||
@ -1031,7 +1115,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
}
|
||||
|
||||
r = radeon_bo_create(rdev, pd_size, align, true,
|
||||
RADEON_GEM_DOMAIN_VRAM, NULL,
|
||||
RADEON_GEM_DOMAIN_VRAM, 0, NULL,
|
||||
&vm->page_directory);
|
||||
if (r)
|
||||
return r;
|
||||
@ -1060,11 +1144,11 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
struct radeon_bo_va *bo_va, *tmp;
|
||||
int i, r;
|
||||
|
||||
if (!list_empty(&vm->va)) {
|
||||
if (!RB_EMPTY_ROOT(&vm->va)) {
|
||||
dev_err(rdev->dev, "still active bo inside vm\n");
|
||||
}
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
|
||||
list_del_init(&bo_va->vm_list);
|
||||
rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
|
||||
interval_tree_remove(&bo_va->it, &vm->va);
|
||||
r = radeon_bo_reserve(bo_va->bo, false);
|
||||
if (!r) {
|
||||
list_del_init(&bo_va->bo_list);
|
||||
@ -1072,8 +1156,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
|
||||
kfree(bo_va);
|
||||
}
|
||||
}
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
|
||||
radeon_bo_unref(&bo_va->bo);
|
||||
kfree(bo_va);
|
||||
}
|
||||
|
||||
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
|
||||
radeon_bo_unref(&vm->page_tables[i].bo);
|
||||
|
@ -109,7 +109,6 @@ int rs400_gart_enable(struct radeon_device *rdev)
|
||||
uint32_t size_reg;
|
||||
uint32_t tmp;
|
||||
|
||||
radeon_gart_restore(rdev);
|
||||
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
|
||||
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
|
||||
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
|
||||
@ -209,17 +208,24 @@ void rs400_gart_fini(struct radeon_device *rdev)
|
||||
radeon_gart_table_ram_free(rdev);
|
||||
}
|
||||
|
||||
#define RS400_PTE_UNSNOOPED (1 << 0)
|
||||
#define RS400_PTE_WRITEABLE (1 << 2)
|
||||
#define RS400_PTE_READABLE (1 << 3)
|
||||
|
||||
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
|
||||
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr, uint32_t flags)
|
||||
{
|
||||
uint32_t entry;
|
||||
u32 *gtt = rdev->gart.ptr;
|
||||
|
||||
entry = (lower_32_bits(addr) & PAGE_MASK) |
|
||||
((upper_32_bits(addr) & 0xff) << 4) |
|
||||
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
|
||||
((upper_32_bits(addr) & 0xff) << 4);
|
||||
if (flags & RADEON_GART_PAGE_READ)
|
||||
addr |= RS400_PTE_READABLE;
|
||||
if (flags & RADEON_GART_PAGE_WRITE)
|
||||
addr |= RS400_PTE_WRITEABLE;
|
||||
if (!(flags & RADEON_GART_PAGE_SNOOP))
|
||||
entry |= RS400_PTE_UNSNOOPED;
|
||||
entry = cpu_to_le32(entry);
|
||||
gtt[i] = entry;
|
||||
}
|
||||
|
@ -555,7 +555,6 @@ static int rs600_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* Enable bus master */
|
||||
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
|
||||
WREG32(RADEON_BUS_CNTL, tmp);
|
||||
@ -626,15 +625,21 @@ static void rs600_gart_fini(struct radeon_device *rdev)
|
||||
radeon_gart_table_vram_free(rdev);
|
||||
}
|
||||
|
||||
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i, uint64_t addr)
|
||||
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
uint64_t addr, uint32_t flags)
|
||||
{
|
||||
void __iomem *ptr = (void *)rdev->gart.ptr;
|
||||
|
||||
addr = addr & 0xFFFFFFFFFFFFF000ULL;
|
||||
if (addr == rdev->dummy_page.addr)
|
||||
addr |= R600_PTE_SYSTEM | R600_PTE_SNOOPED;
|
||||
else
|
||||
addr |= R600_PTE_GART;
|
||||
addr |= R600_PTE_SYSTEM;
|
||||
if (flags & RADEON_GART_PAGE_VALID)
|
||||
addr |= R600_PTE_VALID;
|
||||
if (flags & RADEON_GART_PAGE_READ)
|
||||
addr |= R600_PTE_READABLE;
|
||||
if (flags & RADEON_GART_PAGE_WRITE)
|
||||
addr |= R600_PTE_WRITEABLE;
|
||||
if (flags & RADEON_GART_PAGE_SNOOP)
|
||||
addr |= R600_PTE_SNOOPED;
|
||||
writeq(addr, ptr + (i * 8));
|
||||
}
|
||||
|
||||
|
@ -900,7 +900,6 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* Setup L2 cache */
|
||||
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
|
||||
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
|
||||
|
@ -42,6 +42,14 @@ MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/TAHITI_mc2.bin");
|
||||
MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/TAHITI_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/tahiti_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_me.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_ce.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
|
||||
MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
|
||||
@ -49,6 +57,14 @@ MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/PITCAIRN_mc2.bin");
|
||||
MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/PITCAIRN_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/pitcairn_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/pitcairn_me.bin");
|
||||
MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
|
||||
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/VERDE_me.bin");
|
||||
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
|
||||
@ -56,6 +72,14 @@ MODULE_FIRMWARE("radeon/VERDE_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/VERDE_mc2.bin");
|
||||
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/VERDE_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/verde_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/verde_me.bin");
|
||||
MODULE_FIRMWARE("radeon/verde_ce.bin");
|
||||
MODULE_FIRMWARE("radeon/verde_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/verde_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/verde_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_me.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_ce.bin");
|
||||
@ -63,6 +87,14 @@ MODULE_FIRMWARE("radeon/OLAND_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_mc2.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/OLAND_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/oland_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_me.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_ce.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/HAINAN_me.bin");
|
||||
MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
|
||||
@ -71,6 +103,13 @@ MODULE_FIRMWARE("radeon/HAINAN_mc2.bin");
|
||||
MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
|
||||
|
||||
MODULE_FIRMWARE("radeon/hainan_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_me.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_ce.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_smc.bin");
|
||||
|
||||
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
|
||||
static void si_pcie_gen3_enable(struct radeon_device *rdev);
|
||||
static void si_program_aspm(struct radeon_device *rdev);
|
||||
@ -1470,38 +1509,54 @@ static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
|
||||
/* ucode loading */
|
||||
int si_mc_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const __be32 *fw_data;
|
||||
const __be32 *fw_data = NULL;
|
||||
const __le32 *new_fw_data = NULL;
|
||||
u32 running, blackout = 0;
|
||||
u32 *io_mc_regs;
|
||||
u32 *io_mc_regs = NULL;
|
||||
const __le32 *new_io_mc_regs = NULL;
|
||||
int i, regs_size, ucode_size;
|
||||
|
||||
if (!rdev->mc_fw)
|
||||
return -EINVAL;
|
||||
|
||||
ucode_size = rdev->mc_fw->size / 4;
|
||||
if (rdev->new_fw) {
|
||||
const struct mc_firmware_header_v1_0 *hdr =
|
||||
(const struct mc_firmware_header_v1_0 *)rdev->mc_fw->data;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_TAHITI:
|
||||
io_mc_regs = (u32 *)&tahiti_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
case CHIP_PITCAIRN:
|
||||
io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
case CHIP_VERDE:
|
||||
default:
|
||||
io_mc_regs = (u32 *)&verde_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
io_mc_regs = (u32 *)&oland_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
io_mc_regs = (u32 *)&hainan_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
radeon_ucode_print_mc_hdr(&hdr->header);
|
||||
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
|
||||
new_io_mc_regs = (const __le32 *)
|
||||
(rdev->mc_fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
|
||||
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
new_fw_data = (const __le32 *)
|
||||
(rdev->mc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
} else {
|
||||
ucode_size = rdev->mc_fw->size / 4;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_TAHITI:
|
||||
io_mc_regs = (u32 *)&tahiti_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
case CHIP_PITCAIRN:
|
||||
io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
case CHIP_VERDE:
|
||||
default:
|
||||
io_mc_regs = (u32 *)&verde_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
io_mc_regs = (u32 *)&oland_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
io_mc_regs = (u32 *)&hainan_io_mc_regs;
|
||||
regs_size = TAHITI_IO_MC_REGS_SIZE;
|
||||
break;
|
||||
}
|
||||
fw_data = (const __be32 *)rdev->mc_fw->data;
|
||||
}
|
||||
|
||||
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
|
||||
@ -1518,13 +1573,21 @@ int si_mc_load_microcode(struct radeon_device *rdev)
|
||||
|
||||
/* load mc io regs */
|
||||
for (i = 0; i < regs_size; i++) {
|
||||
WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
|
||||
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
|
||||
if (rdev->new_fw) {
|
||||
WREG32(MC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++));
|
||||
WREG32(MC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++));
|
||||
} else {
|
||||
WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
|
||||
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
|
||||
}
|
||||
}
|
||||
/* load the MC ucode */
|
||||
fw_data = (const __be32 *)rdev->mc_fw->data;
|
||||
for (i = 0; i < ucode_size; i++)
|
||||
WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
|
||||
for (i = 0; i < ucode_size; i++) {
|
||||
if (rdev->new_fw)
|
||||
WREG32(MC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++));
|
||||
else
|
||||
WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
|
||||
}
|
||||
|
||||
/* put the engine back into the active state */
|
||||
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
|
||||
@ -1553,18 +1616,19 @@ int si_mc_load_microcode(struct radeon_device *rdev)
|
||||
static int si_init_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const char *chip_name;
|
||||
const char *rlc_chip_name;
|
||||
const char *new_chip_name;
|
||||
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
|
||||
size_t smc_req_size, mc2_req_size;
|
||||
char fw_name[30];
|
||||
int err;
|
||||
int new_fw = 0;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_TAHITI:
|
||||
chip_name = "TAHITI";
|
||||
rlc_chip_name = "TAHITI";
|
||||
new_chip_name = "tahiti";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
me_req_size = SI_PM4_UCODE_SIZE * 4;
|
||||
ce_req_size = SI_CE_UCODE_SIZE * 4;
|
||||
@ -1575,7 +1639,7 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
break;
|
||||
case CHIP_PITCAIRN:
|
||||
chip_name = "PITCAIRN";
|
||||
rlc_chip_name = "PITCAIRN";
|
||||
new_chip_name = "pitcairn";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
me_req_size = SI_PM4_UCODE_SIZE * 4;
|
||||
ce_req_size = SI_CE_UCODE_SIZE * 4;
|
||||
@ -1586,7 +1650,7 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
break;
|
||||
case CHIP_VERDE:
|
||||
chip_name = "VERDE";
|
||||
rlc_chip_name = "VERDE";
|
||||
new_chip_name = "verde";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
me_req_size = SI_PM4_UCODE_SIZE * 4;
|
||||
ce_req_size = SI_CE_UCODE_SIZE * 4;
|
||||
@ -1597,7 +1661,7 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
chip_name = "OLAND";
|
||||
rlc_chip_name = "OLAND";
|
||||
new_chip_name = "oland";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
me_req_size = SI_PM4_UCODE_SIZE * 4;
|
||||
ce_req_size = SI_CE_UCODE_SIZE * 4;
|
||||
@ -1607,7 +1671,7 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
chip_name = "HAINAN";
|
||||
rlc_chip_name = "HAINAN";
|
||||
new_chip_name = "hainan";
|
||||
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
|
||||
me_req_size = SI_PM4_UCODE_SIZE * 4;
|
||||
ce_req_size = SI_CE_UCODE_SIZE * 4;
|
||||
@ -1618,86 +1682,178 @@ static int si_init_microcode(struct radeon_device *rdev)
|
||||
default: BUG();
|
||||
}
|
||||
|
||||
DRM_INFO("Loading %s Microcode\n", chip_name);
|
||||
DRM_INFO("Loading %s Microcode\n", new_chip_name);
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
|
||||
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
if (rdev->pfp_fw->size != pfp_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->pfp_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
|
||||
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
if (rdev->me_fw->size != me_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->me_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
|
||||
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
if (rdev->ce_fw->size != ce_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->ce_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
|
||||
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
if (rdev->rlc_fw->size != rlc_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_rlc: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->rlc_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
|
||||
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
|
||||
err = request_firmware(&rdev->pfp_fw, fw_name, rdev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
if (rdev->pfp_fw->size != pfp_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->pfp_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
err = radeon_ucode_validate(rdev->pfp_fw);
|
||||
if (err) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: validation failed for firmware \"%s\"\n",
|
||||
fw_name);
|
||||
goto out;
|
||||
} else {
|
||||
new_fw++;
|
||||
}
|
||||
}
|
||||
if ((rdev->mc_fw->size != mc_req_size) &&
|
||||
(rdev->mc_fw->size != mc2_req_size)) {
|
||||
printk(KERN_ERR
|
||||
"si_mc: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->mc_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", new_chip_name);
|
||||
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
|
||||
err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
if (rdev->me_fw->size != me_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->me_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
err = radeon_ucode_validate(rdev->me_fw);
|
||||
if (err) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: validation failed for firmware \"%s\"\n",
|
||||
fw_name);
|
||||
goto out;
|
||||
} else {
|
||||
new_fw++;
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", new_chip_name);
|
||||
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
|
||||
err = request_firmware(&rdev->ce_fw, fw_name, rdev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
if (rdev->ce_fw->size != ce_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->ce_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
err = radeon_ucode_validate(rdev->ce_fw);
|
||||
if (err) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: validation failed for firmware \"%s\"\n",
|
||||
fw_name);
|
||||
goto out;
|
||||
} else {
|
||||
new_fw++;
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", new_chip_name);
|
||||
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", chip_name);
|
||||
err = request_firmware(&rdev->rlc_fw, fw_name, rdev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
if (rdev->rlc_fw->size != rlc_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_rlc: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->rlc_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
err = radeon_ucode_validate(rdev->rlc_fw);
|
||||
if (err) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: validation failed for firmware \"%s\"\n",
|
||||
fw_name);
|
||||
goto out;
|
||||
} else {
|
||||
new_fw++;
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
|
||||
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
|
||||
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
if ((rdev->mc_fw->size != mc_req_size) &&
|
||||
(rdev->mc_fw->size != mc2_req_size)) {
|
||||
printk(KERN_ERR
|
||||
"si_mc: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->mc_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
DRM_INFO("%s: %zu bytes\n", fw_name, rdev->mc_fw->size);
|
||||
} else {
|
||||
err = radeon_ucode_validate(rdev->mc_fw);
|
||||
if (err) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: validation failed for firmware \"%s\"\n",
|
||||
fw_name);
|
||||
goto out;
|
||||
} else {
|
||||
new_fw++;
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
|
||||
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
printk(KERN_ERR
|
||||
"smc: error loading firmware \"%s\"\n",
|
||||
fw_name);
|
||||
release_firmware(rdev->smc_fw);
|
||||
rdev->smc_fw = NULL;
|
||||
err = 0;
|
||||
} else if (rdev->smc_fw->size != smc_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_smc: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->smc_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
|
||||
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
|
||||
if (err) {
|
||||
printk(KERN_ERR
|
||||
"smc: error loading firmware \"%s\"\n",
|
||||
fw_name);
|
||||
release_firmware(rdev->smc_fw);
|
||||
rdev->smc_fw = NULL;
|
||||
err = 0;
|
||||
} else if (rdev->smc_fw->size != smc_req_size) {
|
||||
printk(KERN_ERR
|
||||
"si_smc: Bogus length %zu in firmware \"%s\"\n",
|
||||
rdev->smc_fw->size, fw_name);
|
||||
err = -EINVAL;
|
||||
}
|
||||
} else {
|
||||
err = radeon_ucode_validate(rdev->smc_fw);
|
||||
if (err) {
|
||||
printk(KERN_ERR
|
||||
"si_cp: validation failed for firmware \"%s\"\n",
|
||||
fw_name);
|
||||
goto out;
|
||||
} else {
|
||||
new_fw++;
|
||||
}
|
||||
}
|
||||
|
||||
if (new_fw == 0) {
|
||||
rdev->new_fw = false;
|
||||
} else if (new_fw < 6) {
|
||||
printk(KERN_ERR "si_fw: mixing new and old firmware!\n");
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
rdev->new_fw = true;
|
||||
}
|
||||
out:
|
||||
if (err) {
|
||||
if (err != -EINVAL)
|
||||
@ -3282,34 +3438,77 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
|
||||
|
||||
static int si_cp_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const __be32 *fw_data;
|
||||
int i;
|
||||
|
||||
if (!rdev->me_fw || !rdev->pfp_fw)
|
||||
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw)
|
||||
return -EINVAL;
|
||||
|
||||
si_cp_enable(rdev, false);
|
||||
|
||||
/* PFP */
|
||||
fw_data = (const __be32 *)rdev->pfp_fw->data;
|
||||
WREG32(CP_PFP_UCODE_ADDR, 0);
|
||||
for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
|
||||
WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
WREG32(CP_PFP_UCODE_ADDR, 0);
|
||||
if (rdev->new_fw) {
|
||||
const struct gfx_firmware_header_v1_0 *pfp_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)rdev->pfp_fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *ce_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)rdev->ce_fw->data;
|
||||
const struct gfx_firmware_header_v1_0 *me_hdr =
|
||||
(const struct gfx_firmware_header_v1_0 *)rdev->me_fw->data;
|
||||
const __le32 *fw_data;
|
||||
u32 fw_size;
|
||||
|
||||
/* CE */
|
||||
fw_data = (const __be32 *)rdev->ce_fw->data;
|
||||
WREG32(CP_CE_UCODE_ADDR, 0);
|
||||
for (i = 0; i < SI_CE_UCODE_SIZE; i++)
|
||||
WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
WREG32(CP_CE_UCODE_ADDR, 0);
|
||||
radeon_ucode_print_gfx_hdr(&pfp_hdr->header);
|
||||
radeon_ucode_print_gfx_hdr(&ce_hdr->header);
|
||||
radeon_ucode_print_gfx_hdr(&me_hdr->header);
|
||||
|
||||
/* ME */
|
||||
fw_data = (const __be32 *)rdev->me_fw->data;
|
||||
WREG32(CP_ME_RAM_WADDR, 0);
|
||||
for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
|
||||
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
|
||||
WREG32(CP_ME_RAM_WADDR, 0);
|
||||
/* PFP */
|
||||
fw_data = (const __le32 *)
|
||||
(rdev->pfp_fw->data + le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
|
||||
WREG32(CP_PFP_UCODE_ADDR, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(CP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
|
||||
WREG32(CP_PFP_UCODE_ADDR, 0);
|
||||
|
||||
/* CE */
|
||||
fw_data = (const __le32 *)
|
||||
(rdev->ce_fw->data + le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
|
||||
WREG32(CP_CE_UCODE_ADDR, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(CP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
|
||||
WREG32(CP_CE_UCODE_ADDR, 0);
|
||||
|
||||
/* ME */
|
||||
fw_data = (const __be32 *)
|
||||
(rdev->me_fw->data + le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
|
||||
fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
|
||||
WREG32(CP_ME_RAM_WADDR, 0);
|
||||
for (i = 0; i < fw_size; i++)
|
||||
WREG32(CP_ME_RAM_DATA, le32_to_cpup(fw_data++));
|
||||
WREG32(CP_ME_RAM_WADDR, 0);
|
||||
} else {
|
||||
const __be32 *fw_data;
|
||||
|
||||
/* PFP */
|
||||
fw_data = (const __be32 *)rdev->pfp_fw->data;
|
||||
WREG32(CP_PFP_UCODE_ADDR, 0);
|
||||
for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
|
||||
WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
WREG32(CP_PFP_UCODE_ADDR, 0);
|
||||
|
||||
/* CE */
|
||||
fw_data = (const __be32 *)rdev->ce_fw->data;
|
||||
WREG32(CP_CE_UCODE_ADDR, 0);
|
||||
for (i = 0; i < SI_CE_UCODE_SIZE; i++)
|
||||
WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
WREG32(CP_CE_UCODE_ADDR, 0);
|
||||
|
||||
/* ME */
|
||||
fw_data = (const __be32 *)rdev->me_fw->data;
|
||||
WREG32(CP_ME_RAM_WADDR, 0);
|
||||
for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
|
||||
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
|
||||
WREG32(CP_ME_RAM_WADDR, 0);
|
||||
}
|
||||
|
||||
WREG32(CP_PFP_UCODE_ADDR, 0);
|
||||
WREG32(CP_CE_UCODE_ADDR, 0);
|
||||
@ -4048,7 +4247,6 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
|
||||
r = radeon_gart_table_vram_pin(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
radeon_gart_restore(rdev);
|
||||
/* Setup TLB control */
|
||||
WREG32(MC_VM_MX_L1_TLB_CNTL,
|
||||
(0xA << 7) |
|
||||
@ -4815,7 +5013,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
|
||||
/* write new base address */
|
||||
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
|
||||
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
|
||||
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
|
||||
WRITE_DATA_DST_SEL(0)));
|
||||
|
||||
if (vm->id < 8) {
|
||||
@ -5592,7 +5790,6 @@ static void si_enable_lbpw(struct radeon_device *rdev, bool enable)
|
||||
static int si_rlc_resume(struct radeon_device *rdev)
|
||||
{
|
||||
u32 i;
|
||||
const __be32 *fw_data;
|
||||
|
||||
if (!rdev->rlc_fw)
|
||||
return -EINVAL;
|
||||
@ -5615,10 +5812,26 @@ static int si_rlc_resume(struct radeon_device *rdev)
|
||||
WREG32(RLC_MC_CNTL, 0);
|
||||
WREG32(RLC_UCODE_CNTL, 0);
|
||||
|
||||
fw_data = (const __be32 *)rdev->rlc_fw->data;
|
||||
for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
|
||||
WREG32(RLC_UCODE_ADDR, i);
|
||||
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
if (rdev->new_fw) {
|
||||
const struct rlc_firmware_header_v1_0 *hdr =
|
||||
(const struct rlc_firmware_header_v1_0 *)rdev->rlc_fw->data;
|
||||
u32 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
|
||||
const __le32 *fw_data = (const __le32 *)
|
||||
(rdev->rlc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
|
||||
radeon_ucode_print_rlc_hdr(&hdr->header);
|
||||
|
||||
for (i = 0; i < fw_size; i++) {
|
||||
WREG32(RLC_UCODE_ADDR, i);
|
||||
WREG32(RLC_UCODE_DATA, le32_to_cpup(fw_data++));
|
||||
}
|
||||
} else {
|
||||
const __be32 *fw_data =
|
||||
(const __be32 *)rdev->rlc_fw->data;
|
||||
for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
|
||||
WREG32(RLC_UCODE_ADDR, i);
|
||||
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
|
||||
}
|
||||
}
|
||||
WREG32(RLC_UCODE_ADDR, 0);
|
||||
|
||||
@ -6318,7 +6531,8 @@ restart_ih:
|
||||
case 16: /* D5 page flip */
|
||||
case 18: /* D6 page flip */
|
||||
DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1);
|
||||
radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
|
||||
if (radeon_use_pflipirq > 0)
|
||||
radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1);
|
||||
break;
|
||||
case 42: /* HPD hotplug */
|
||||
switch (src_data) {
|
||||
|
@ -56,7 +56,89 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_vm_set_page - update the page tables using the DMA
|
||||
* si_dma_vm_copy_pages - update PTEs by copying them from the GART
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @src: src addr where to copy from
|
||||
* @count: number of page entries to update
|
||||
*
|
||||
* Update PTEs by copying them from the GART using the DMA (SI).
|
||||
*/
|
||||
void si_dma_vm_copy_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe, uint64_t src,
|
||||
unsigned count)
|
||||
{
|
||||
while (count) {
|
||||
unsigned bytes = count * 8;
|
||||
if (bytes > 0xFFFF8)
|
||||
bytes = 0xFFFF8;
|
||||
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
|
||||
1, 0, 0, bytes);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
|
||||
|
||||
pe += bytes;
|
||||
src += bytes;
|
||||
count -= bytes / 8;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_vm_write_pages - update PTEs by writing them manually
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
* @pe: addr of the page entry
|
||||
* @addr: dst addr to write into pe
|
||||
* @count: number of page entries to update
|
||||
* @incr: increase next addr by incr bytes
|
||||
* @flags: access flags
|
||||
*
|
||||
* Update PTEs by writing them manually using the DMA (SI).
|
||||
*/
|
||||
void si_dma_vm_write_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
if (flags & R600_PTE_SYSTEM) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
} else if (flags & R600_PTE_VALID) {
|
||||
value = addr;
|
||||
} else {
|
||||
value = 0;
|
||||
}
|
||||
addr += incr;
|
||||
value |= flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* si_dma_vm_set_pages - update the page tables using the DMA
|
||||
*
|
||||
* @rdev: radeon_device pointer
|
||||
* @ib: indirect buffer to fill with commands
|
||||
@ -68,81 +150,39 @@ bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
*
|
||||
* Update the page tables using the DMA (SI).
|
||||
*/
|
||||
void si_dma_vm_set_page(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
void si_dma_vm_set_pages(struct radeon_device *rdev,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t pe,
|
||||
uint64_t addr, unsigned count,
|
||||
uint32_t incr, uint32_t flags)
|
||||
{
|
||||
uint64_t value;
|
||||
unsigned ndw;
|
||||
|
||||
trace_radeon_vm_set_page(pe, addr, count, incr, flags);
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags == R600_PTE_GART) {
|
||||
uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8;
|
||||
while (count) {
|
||||
unsigned bytes = count * 8;
|
||||
if (bytes > 0xFFFF8)
|
||||
bytes = 0xFFFF8;
|
||||
if (flags & R600_PTE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
|
||||
1, 0, 0, bytes);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(pe);
|
||||
ib->ptr[ib->length_dw++] = lower_32_bits(src);
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
|
||||
|
||||
pe += bytes;
|
||||
src += bytes;
|
||||
count -= bytes / 8;
|
||||
}
|
||||
} else if (flags & R600_PTE_SYSTEM) {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
/* for non-physically contiguous pages (system) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
|
||||
ib->ptr[ib->length_dw++] = pe;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
|
||||
value = radeon_vm_map_gart(rdev, addr);
|
||||
value &= 0xFFFFFFFFFFFFF000ULL;
|
||||
addr += incr;
|
||||
value |= flags;
|
||||
ib->ptr[ib->length_dw++] = value;
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
while (count) {
|
||||
ndw = count * 2;
|
||||
if (ndw > 0xFFFFE)
|
||||
ndw = 0xFFFFE;
|
||||
|
||||
if (flags & R600_PTE_VALID)
|
||||
value = addr;
|
||||
else
|
||||
value = 0;
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
/* for physically contiguous pages (vram) */
|
||||
ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
|
||||
ib->ptr[ib->length_dw++] = pe; /* dst addr */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
|
||||
ib->ptr[ib->length_dw++] = flags; /* mask */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
ib->ptr[ib->length_dw++] = value; /* value */
|
||||
ib->ptr[ib->length_dw++] = upper_32_bits(value);
|
||||
ib->ptr[ib->length_dw++] = incr; /* increment size */
|
||||
ib->ptr[ib->length_dw++] = 0;
|
||||
pe += ndw * 4;
|
||||
addr += (ndw / 2) * incr;
|
||||
count -= ndw / 2;
|
||||
}
|
||||
while (ib->length_dw & 0x7)
|
||||
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
|
||||
|
@ -3812,6 +3812,27 @@ void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
|
||||
voltage_table->count = max_voltage_steps;
|
||||
}
|
||||
|
||||
static int si_get_svi2_voltage_table(struct radeon_device *rdev,
|
||||
struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
|
||||
struct atom_voltage_table *voltage_table)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (voltage_dependency_table == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
voltage_table->mask_low = 0;
|
||||
voltage_table->phase_delay = 0;
|
||||
|
||||
voltage_table->count = voltage_dependency_table->count;
|
||||
for (i = 0; i < voltage_table->count; i++) {
|
||||
voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
|
||||
voltage_table->entries[i].smio_low = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_construct_voltage_tables(struct radeon_device *rdev)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
@ -3819,15 +3840,25 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
|
||||
struct si_power_info *si_pi = si_get_pi(rdev);
|
||||
int ret;
|
||||
|
||||
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
|
||||
VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (pi->voltage_control) {
|
||||
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
|
||||
VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
|
||||
si_trim_voltage_table_to_fit_state_table(rdev,
|
||||
SISLANDS_MAX_NO_VREG_STEPS,
|
||||
&eg_pi->vddc_voltage_table);
|
||||
if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
|
||||
si_trim_voltage_table_to_fit_state_table(rdev,
|
||||
SISLANDS_MAX_NO_VREG_STEPS,
|
||||
&eg_pi->vddc_voltage_table);
|
||||
} else if (si_pi->voltage_control_svi2) {
|
||||
ret = si_get_svi2_voltage_table(rdev,
|
||||
&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
|
||||
&eg_pi->vddc_voltage_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (eg_pi->vddci_control) {
|
||||
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
|
||||
@ -3840,6 +3871,13 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
|
||||
SISLANDS_MAX_NO_VREG_STEPS,
|
||||
&eg_pi->vddci_voltage_table);
|
||||
}
|
||||
if (si_pi->vddci_control_svi2) {
|
||||
ret = si_get_svi2_voltage_table(rdev,
|
||||
&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
|
||||
&eg_pi->vddci_voltage_table);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (pi->mvdd_control) {
|
||||
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
|
||||
@ -3893,46 +3931,55 @@ static int si_populate_smc_voltage_tables(struct radeon_device *rdev,
|
||||
struct si_power_info *si_pi = si_get_pi(rdev);
|
||||
u8 i;
|
||||
|
||||
if (eg_pi->vddc_voltage_table.count) {
|
||||
si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
|
||||
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
|
||||
cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
|
||||
if (si_pi->voltage_control_svi2) {
|
||||
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
|
||||
si_pi->svc_gpio_id);
|
||||
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
|
||||
si_pi->svd_gpio_id);
|
||||
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
|
||||
2);
|
||||
} else {
|
||||
if (eg_pi->vddc_voltage_table.count) {
|
||||
si_populate_smc_voltage_table(rdev, &eg_pi->vddc_voltage_table, table);
|
||||
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
|
||||
cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
|
||||
|
||||
for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
|
||||
if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
|
||||
table->maxVDDCIndexInPPTable = i;
|
||||
break;
|
||||
for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
|
||||
if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
|
||||
table->maxVDDCIndexInPPTable = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (eg_pi->vddci_voltage_table.count) {
|
||||
si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
|
||||
if (eg_pi->vddci_voltage_table.count) {
|
||||
si_populate_smc_voltage_table(rdev, &eg_pi->vddci_voltage_table, table);
|
||||
|
||||
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
|
||||
cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
|
||||
}
|
||||
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
|
||||
cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
|
||||
}
|
||||
|
||||
|
||||
if (si_pi->mvdd_voltage_table.count) {
|
||||
si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
|
||||
if (si_pi->mvdd_voltage_table.count) {
|
||||
si_populate_smc_voltage_table(rdev, &si_pi->mvdd_voltage_table, table);
|
||||
|
||||
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
|
||||
cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
|
||||
}
|
||||
table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
|
||||
cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
|
||||
}
|
||||
|
||||
if (si_pi->vddc_phase_shed_control) {
|
||||
if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
|
||||
&rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
|
||||
si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
|
||||
if (si_pi->vddc_phase_shed_control) {
|
||||
if (si_validate_phase_shedding_tables(rdev, &si_pi->vddc_phase_shed_table,
|
||||
&rdev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
|
||||
si_populate_smc_voltage_table(rdev, &si_pi->vddc_phase_shed_table, table);
|
||||
|
||||
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
|
||||
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
|
||||
table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
|
||||
cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
|
||||
|
||||
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
|
||||
(u32)si_pi->vddc_phase_shed_table.phase_delay);
|
||||
} else {
|
||||
si_pi->vddc_phase_shed_control = false;
|
||||
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
|
||||
(u32)si_pi->vddc_phase_shed_table.phase_delay);
|
||||
} else {
|
||||
si_pi->vddc_phase_shed_control = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -5798,16 +5845,17 @@ int si_dpm_enable(struct radeon_device *rdev)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
|
||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
|
||||
struct si_power_info *si_pi = si_get_pi(rdev);
|
||||
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
|
||||
int ret;
|
||||
|
||||
if (si_is_smc_running(rdev))
|
||||
return -EINVAL;
|
||||
if (pi->voltage_control)
|
||||
if (pi->voltage_control || si_pi->voltage_control_svi2)
|
||||
si_enable_voltage_control(rdev, true);
|
||||
if (pi->mvdd_control)
|
||||
si_get_mvdd_configuration(rdev);
|
||||
if (pi->voltage_control) {
|
||||
if (pi->voltage_control || si_pi->voltage_control_svi2) {
|
||||
ret = si_construct_voltage_tables(rdev);
|
||||
if (ret) {
|
||||
DRM_ERROR("si_construct_voltage_tables failed\n");
|
||||
@ -6406,16 +6454,32 @@ int si_dpm_init(struct radeon_device *rdev)
|
||||
ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
|
||||
|
||||
pi->voltage_control =
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_GPIO_LUT);
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
|
||||
VOLTAGE_OBJ_GPIO_LUT);
|
||||
if (!pi->voltage_control) {
|
||||
si_pi->voltage_control_svi2 =
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
|
||||
VOLTAGE_OBJ_SVID2);
|
||||
if (si_pi->voltage_control_svi2)
|
||||
radeon_atom_get_svi2_info(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
|
||||
&si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
|
||||
}
|
||||
|
||||
pi->mvdd_control =
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, VOLTAGE_OBJ_GPIO_LUT);
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
|
||||
VOLTAGE_OBJ_GPIO_LUT);
|
||||
|
||||
eg_pi->vddci_control =
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, VOLTAGE_OBJ_GPIO_LUT);
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
|
||||
VOLTAGE_OBJ_GPIO_LUT);
|
||||
if (!eg_pi->vddci_control)
|
||||
si_pi->vddci_control_svi2 =
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
|
||||
VOLTAGE_OBJ_SVID2);
|
||||
|
||||
si_pi->vddc_phase_shed_control =
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
|
||||
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC,
|
||||
VOLTAGE_OBJ_PHASE_LUT);
|
||||
|
||||
rv770_get_engine_memory_ss(rdev);
|
||||
|
||||
|
@ -170,6 +170,8 @@ struct si_power_info {
|
||||
bool vddc_phase_shed_control;
|
||||
bool pspp_notify_required;
|
||||
bool sclk_deep_sleep_above_low;
|
||||
bool voltage_control_svi2;
|
||||
bool vddci_control_svi2;
|
||||
/* smc offsets */
|
||||
u32 sram_end;
|
||||
u32 state_table_start;
|
||||
@ -192,6 +194,9 @@ struct si_power_info {
|
||||
SMC_SIslands_MCRegisters smc_mc_reg_table;
|
||||
SISLANDS_SMC_STATETABLE smc_statetable;
|
||||
PP_SIslands_PAPMParameters papm_parm;
|
||||
/* SVI2 */
|
||||
u8 svd_gpio_id;
|
||||
u8 svc_gpio_id;
|
||||
};
|
||||
|
||||
#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
|
||||
|
@ -219,36 +219,48 @@ int si_load_smc_ucode(struct radeon_device *rdev, u32 limit)
|
||||
if (!rdev->smc_fw)
|
||||
return -EINVAL;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_TAHITI:
|
||||
ucode_start_address = TAHITI_SMC_UCODE_START;
|
||||
ucode_size = TAHITI_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_PITCAIRN:
|
||||
ucode_start_address = PITCAIRN_SMC_UCODE_START;
|
||||
ucode_size = PITCAIRN_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_VERDE:
|
||||
ucode_start_address = VERDE_SMC_UCODE_START;
|
||||
ucode_size = VERDE_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
ucode_start_address = OLAND_SMC_UCODE_START;
|
||||
ucode_size = OLAND_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
ucode_start_address = HAINAN_SMC_UCODE_START;
|
||||
ucode_size = HAINAN_SMC_UCODE_SIZE;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown asic in smc ucode loader\n");
|
||||
BUG();
|
||||
if (rdev->new_fw) {
|
||||
const struct smc_firmware_header_v1_0 *hdr =
|
||||
(const struct smc_firmware_header_v1_0 *)rdev->smc_fw->data;
|
||||
|
||||
radeon_ucode_print_smc_hdr(&hdr->header);
|
||||
|
||||
ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
|
||||
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
|
||||
src = (const u8 *)
|
||||
(rdev->smc_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
|
||||
} else {
|
||||
switch (rdev->family) {
|
||||
case CHIP_TAHITI:
|
||||
ucode_start_address = TAHITI_SMC_UCODE_START;
|
||||
ucode_size = TAHITI_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_PITCAIRN:
|
||||
ucode_start_address = PITCAIRN_SMC_UCODE_START;
|
||||
ucode_size = PITCAIRN_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_VERDE:
|
||||
ucode_start_address = VERDE_SMC_UCODE_START;
|
||||
ucode_size = VERDE_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_OLAND:
|
||||
ucode_start_address = OLAND_SMC_UCODE_START;
|
||||
ucode_size = OLAND_SMC_UCODE_SIZE;
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
ucode_start_address = HAINAN_SMC_UCODE_START;
|
||||
ucode_size = HAINAN_SMC_UCODE_SIZE;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("unknown asic in smc ucode loader\n");
|
||||
BUG();
|
||||
}
|
||||
src = (const u8 *)rdev->smc_fw->data;
|
||||
}
|
||||
|
||||
if (ucode_size & 3)
|
||||
return -EINVAL;
|
||||
|
||||
src = (const u8 *)rdev->smc_fw->data;
|
||||
spin_lock_irqsave(&rdev->smc_idx_lock, flags);
|
||||
WREG32(SMC_IND_INDEX_0, ucode_start_address);
|
||||
WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
|
||||
|
@ -241,6 +241,9 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
|
||||
#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4
|
||||
#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC
|
||||
#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100
|
||||
#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118
|
||||
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
|
||||
#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
|
||||
|
||||
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
|
||||
#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
|
||||
|
@ -796,7 +796,9 @@ struct drm_radeon_gem_info {
|
||||
uint64_t vram_visible;
|
||||
};
|
||||
|
||||
#define RADEON_GEM_NO_BACKING_STORE 1
|
||||
#define RADEON_GEM_NO_BACKING_STORE (1 << 0)
|
||||
#define RADEON_GEM_GTT_UC (1 << 1)
|
||||
#define RADEON_GEM_GTT_WC (1 << 2)
|
||||
|
||||
struct drm_radeon_gem_create {
|
||||
uint64_t size;
|
||||
|
Loading…
Reference in New Issue
Block a user