mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 13:14:07 +08:00
drm fixes for 5.12-rc3
core: - Clear holes when converting compat ioctl's between 32-bits and 64-bits. docs: - Use gitlab for drm bugzilla now. ttm: - Fix ttm page pool accounting. fbdev: - Fix oops in drm_fbdev_cleanup() shmem: - Assorted fixes for shmem helpers. qxl: - unpin qxl bos created as pinned when freeing them, and make ttm only warn once on this behavior. - Zero head.surface_id correctly in qxl. atyfb: - Use LCD management for atyfb on PPC_MAC. meson: - Shutdown kms poll helper in meson correctly. nouveau: - fix regression in bo syncing i915: - Wedge the GPU if command parser setup fails amdgpu: - Fix aux backlight control - Add a backlight override parameter - Various display fixes - PCIe DPM fix for vega - Polaris watermark fixes - Additional S0ix fix radeon: - Fix GEM regression - Fix AGP dependency handling -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJgSsPTAAoJEAx081l5xIa+8TwP/jRf13D9movGBCOyW1R7XCNq TSONwIDqG0xcFaH9W0zHIkndeNGpEYsqbkXEtm0au50Jx6Rm3lFwd4tRxi+yoHmJ aD7bf/TN7kNQ3lt8n84VXFkjDMvKeHVc5BghqdfpXO/RfwXbSfxsEfZcqFwgEGzU XAfPO8PJLJ1vBRYhIdRNnq0QA54utJOeMZIa6hVT+gzZD5VpSMiHLUPLnKuL9fKe hEkvedXOIoh6W8mTd56lnWzbGfoqVW5OCQgCGI4Q2Xd+n2UOLluzdDrwBxLAlU58 w+mWNoGLZlebR1e8H2h47P03/tfYTiCn8uacwvsKR2WoIPaQEZLe188WpSCXZVEq XWXrF0VfWUcYnGOzRNky2Z4n7w4xuVQGhXMfKkxcvFcjghFBeHANe7XbUzrJWrAR fjXz9p3q/DRcXI4CyKl0qPmx63zF3YFpmG09TjqxxZZfuFV+uNj12iPISRpL1UZk 3OAx/1DOcMhRfU7e6HrgWX1UTu5FTh0m7dcVD7fxW/dCKNaOEab+V/unpbuyAEh4 +WI6fi2x710cnwyRee9lffLqM3wO/140cIzzdWnn+6/xEzymfxGzYqEHK6w5iRld siOVglJQM+zfxAym0r8Ijj4g1DI/oi7jUgg77aEu1NpsXAuqYh/Ey9V8yBXH59EL +IHba72PM28swWPSH4/8 =nUAy -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2021-03-12-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Regular fixes for rc3. The i915 pull was based on the rc1 tag so I just cherry-picked the single fix from there to avoid it. The misc and amd trees seem to be on okay bases. It's a bunch of fixes across the tree, amdgpu has most of them a few ttm fixes around qxl, and nouveau. core: - Clear holes when converting compat ioctl's between 32-bits and 64-bits. docs: - Use gitlab for drm bugzilla now. ttm: - Fix ttm page pool accounting. fbdev: - Fix oops in drm_fbdev_cleanup() shmem: - Assorted fixes for shmem helpers. qxl: - unpin qxl bos created as pinned when freeing them, and make ttm only warn once on this behavior. - Zero head.surface_id correctly in qxl. atyfb: - Use LCD management for atyfb on PPC_MAC. meson: - Shutdown kms poll helper in meson correctly. nouveau: - fix regression in bo syncing i915: - Wedge the GPU if command parser setup fails amdgpu: - Fix aux backlight control - Add a backlight override parameter - Various display fixes - PCIe DPM fix for vega - Polaris watermark fixes - Additional S0ix fix radeon: - Fix GEM regression - Fix AGP dependency handling" * tag 'drm-fixes-2021-03-12-1' of git://anongit.freedesktop.org/drm/drm: (33 commits) drm/nouveau: fix dma syncing for loops (v2) drm/i915: Wedge the GPU if command parser setup fails drm/compat: Clear bounce structures drm/shmem-helpers: vunmap: Don't put pages for dma-buf drm: meson_drv add shutdown function drm/shmem-helper: Don't remove the offset in vm_area_struct pgoff drm/shmem-helper: Check for purged buffers in fault handler qxl: Fix uninitialised struct field head.surface_id drm/ttm: Fix TTM page pool accounting drm/ttm: soften TTM warnings drm: Use USB controller's DMA mask when importing dmabufs MAINTAINERS: update drm bug reporting URL fbdev: atyfb: use LCD management functions for PPC_PMAC also fbdev: atyfb: always declare aty_{ld,st}_lcd() drm/qxl: fix lockdep issue in qxl_alloc_release_reserved drm/qxl: unpin release objects drm/fb-helper: only unmap if buffer not null drm/amdgpu: fix S0ix handling when the CONFIG_AMD_PMC=m drm/radeon: fix AGP dependency drm/radeon: also init GEM funcs in radeon_gem_prime_import_sg_table ...
This commit is contained in:
commit
f78d76e72a
@ -613,6 +613,27 @@ Some of these date from the very introduction of KMS in 2008 ...
|
||||
|
||||
Level: Intermediate
|
||||
|
||||
Remove automatic page mapping from dma-buf importing
|
||||
----------------------------------------------------
|
||||
|
||||
When importing dma-bufs, the dma-buf and PRIME frameworks automatically map
|
||||
imported pages into the importer's DMA area. drm_gem_prime_fd_to_handle() and
|
||||
drm_gem_prime_handle_to_fd() require that importers call dma_buf_attach()
|
||||
even if they never do actual device DMA, but only CPU access through
|
||||
dma_buf_vmap(). This is a problem for USB devices, which do not support DMA
|
||||
operations.
|
||||
|
||||
To fix the issue, automatic page mappings should be removed from the
|
||||
buffer-sharing code. Fixing this is a bit more involved, since the import/export
|
||||
cache is also tied to &drm_gem_object.import_attach. Meanwhile we paper over
|
||||
this problem for USB devices by fishing out the USB host controller device, as
|
||||
long as that supports DMA. Otherwise importing can still needlessly fail.
|
||||
|
||||
Contact: Thomas Zimmermann <tzimmermann@suse.de>, Daniel Vetter
|
||||
|
||||
Level: Advanced
|
||||
|
||||
|
||||
Better Testing
|
||||
==============
|
||||
|
||||
|
@ -5835,7 +5835,7 @@ M: David Airlie <airlied@linux.ie>
|
||||
M: Daniel Vetter <daniel@ffwll.ch>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
B: https://bugs.freedesktop.org/
|
||||
B: https://gitlab.freedesktop.org/drm
|
||||
C: irc://chat.freenode.net/dri-devel
|
||||
T: git git://anongit.freedesktop.org/drm/drm
|
||||
F: Documentation/devicetree/bindings/display/
|
||||
|
@ -228,6 +228,7 @@ source "drivers/gpu/drm/arm/Kconfig"
|
||||
config DRM_RADEON
|
||||
tristate "ATI Radeon"
|
||||
depends on DRM && PCI && MMU
|
||||
depends on AGP || !AGP
|
||||
select FW_LOADER
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_TTM
|
||||
|
@ -180,6 +180,7 @@ extern uint amdgpu_smu_memory_pool_size;
|
||||
extern uint amdgpu_dc_feature_mask;
|
||||
extern uint amdgpu_dc_debug_mask;
|
||||
extern uint amdgpu_dm_abm_level;
|
||||
extern int amdgpu_backlight;
|
||||
extern struct amdgpu_mgpu_info mgpu_info;
|
||||
extern int amdgpu_ras_enable;
|
||||
extern uint amdgpu_ras_mask;
|
||||
|
@ -903,7 +903,7 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
|
||||
*/
|
||||
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
#if defined(CONFIG_AMD_PMC)
|
||||
#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return true;
|
||||
|
@ -781,6 +781,10 @@ uint amdgpu_dm_abm_level;
|
||||
MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
|
||||
module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
|
||||
|
||||
int amdgpu_backlight = -1;
|
||||
MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
|
||||
module_param_named(backlight, amdgpu_backlight, bint, 0444);
|
||||
|
||||
/**
|
||||
* DOC: tmz (int)
|
||||
* Trusted Memory Zone (TMZ) is a method to protect data being written
|
||||
|
@ -146,7 +146,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
||||
size = mode_cmd->pitches[0] * height;
|
||||
aligned_size = ALIGN(size, PAGE_SIZE);
|
||||
ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags,
|
||||
ttm_bo_type_kernel, NULL, &gobj);
|
||||
ttm_bo_type_device, NULL, &gobj);
|
||||
if (ret) {
|
||||
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
|
||||
return -ENOMEM;
|
||||
|
@ -2267,6 +2267,11 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
|
||||
caps->ext_caps->bits.hdr_aux_backlight_control == 1)
|
||||
caps->aux_support = true;
|
||||
|
||||
if (amdgpu_backlight == 0)
|
||||
caps->aux_support = false;
|
||||
else if (amdgpu_backlight == 1)
|
||||
caps->aux_support = true;
|
||||
|
||||
/* From the specification (CTA-861-G), for calculating the maximum
|
||||
* luminance we need to use:
|
||||
* Luminance = 50*2**(CV/32)
|
||||
@ -3185,19 +3190,6 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
|
||||
#endif
|
||||
}
|
||||
|
||||
static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
|
||||
{
|
||||
bool rc;
|
||||
|
||||
if (!link)
|
||||
return 1;
|
||||
|
||||
rc = dc_link_set_backlight_level_nits(link, true, brightness,
|
||||
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
|
||||
|
||||
return rc ? 0 : 1;
|
||||
}
|
||||
|
||||
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
|
||||
unsigned *min, unsigned *max)
|
||||
{
|
||||
@ -3260,9 +3252,10 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
||||
brightness = convert_brightness_from_user(&caps, bd->props.brightness);
|
||||
// Change brightness based on AUX property
|
||||
if (caps.aux_support)
|
||||
return set_backlight_via_aux(link, brightness);
|
||||
|
||||
rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
|
||||
rc = dc_link_set_backlight_level_nits(link, true, brightness,
|
||||
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
|
||||
else
|
||||
rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
|
||||
|
||||
return rc ? 0 : 1;
|
||||
}
|
||||
@ -3270,11 +3263,27 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
|
||||
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
|
||||
{
|
||||
struct amdgpu_display_manager *dm = bl_get_data(bd);
|
||||
int ret = dc_link_get_backlight_level(dm->backlight_link);
|
||||
struct amdgpu_dm_backlight_caps caps;
|
||||
|
||||
if (ret == DC_ERROR_UNEXPECTED)
|
||||
return bd->props.brightness;
|
||||
return convert_brightness_to_user(&dm->backlight_caps, ret);
|
||||
amdgpu_dm_update_backlight_caps(dm);
|
||||
caps = dm->backlight_caps;
|
||||
|
||||
if (caps.aux_support) {
|
||||
struct dc_link *link = (struct dc_link *)dm->backlight_link;
|
||||
u32 avg, peak;
|
||||
bool rc;
|
||||
|
||||
rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
|
||||
if (!rc)
|
||||
return bd->props.brightness;
|
||||
return convert_brightness_to_user(&caps, avg);
|
||||
} else {
|
||||
int ret = dc_link_get_backlight_level(dm->backlight_link);
|
||||
|
||||
if (ret == DC_ERROR_UNEXPECTED)
|
||||
return bd->props.brightness;
|
||||
return convert_brightness_to_user(&caps, ret);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct backlight_ops amdgpu_dm_backlight_ops = {
|
||||
@ -4716,6 +4725,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
||||
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
|
||||
dc_plane_state->dcc = plane_info.dcc;
|
||||
dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
|
||||
dc_plane_state->flip_int_enabled = true;
|
||||
|
||||
/*
|
||||
* Always set input transfer function, since plane state is refreshed
|
||||
|
@ -2602,7 +2602,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
|
||||
if (pipe_ctx->plane_state == NULL)
|
||||
frame_ramp = 0;
|
||||
} else {
|
||||
ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -887,6 +887,7 @@ struct dc_plane_state {
|
||||
int layer_index;
|
||||
|
||||
union surface_update_flags update_flags;
|
||||
bool flip_int_enabled;
|
||||
/* private to DC core */
|
||||
struct dc_plane_status status;
|
||||
struct dc_context *ctx;
|
||||
|
@ -1257,6 +1257,16 @@ void hubp1_soft_reset(struct hubp *hubp, bool reset)
|
||||
REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
|
||||
}
|
||||
|
||||
void hubp1_set_flip_int(struct hubp *hubp)
|
||||
{
|
||||
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
|
||||
|
||||
REG_UPDATE(DCSURF_SURFACE_FLIP_INTERRUPT,
|
||||
SURFACE_FLIP_INT_MASK, 1);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void hubp1_init(struct hubp *hubp)
|
||||
{
|
||||
//do nothing
|
||||
@ -1290,6 +1300,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
|
||||
.dmdata_load = NULL,
|
||||
.hubp_soft_reset = hubp1_soft_reset,
|
||||
.hubp_in_blank = hubp1_in_blank,
|
||||
.hubp_set_flip_int = hubp1_set_flip_int,
|
||||
};
|
||||
|
||||
/*****************************************/
|
||||
|
@ -74,6 +74,7 @@
|
||||
SRI(DCSURF_SURFACE_EARLIEST_INUSE_C, HUBPREQ, id),\
|
||||
SRI(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C, HUBPREQ, id),\
|
||||
SRI(DCSURF_SURFACE_CONTROL, HUBPREQ, id),\
|
||||
SRI(DCSURF_SURFACE_FLIP_INTERRUPT, HUBPREQ, id),\
|
||||
SRI(HUBPRET_CONTROL, HUBPRET, id),\
|
||||
SRI(DCN_EXPANSION_MODE, HUBPREQ, id),\
|
||||
SRI(DCHUBP_REQ_SIZE_CONFIG, HUBP, id),\
|
||||
@ -183,6 +184,7 @@
|
||||
uint32_t DCSURF_SURFACE_EARLIEST_INUSE_C; \
|
||||
uint32_t DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C; \
|
||||
uint32_t DCSURF_SURFACE_CONTROL; \
|
||||
uint32_t DCSURF_SURFACE_FLIP_INTERRUPT; \
|
||||
uint32_t HUBPRET_CONTROL; \
|
||||
uint32_t DCN_EXPANSION_MODE; \
|
||||
uint32_t DCHUBP_REQ_SIZE_CONFIG; \
|
||||
@ -332,6 +334,7 @@
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\
|
||||
HUBP_SF(HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK, mask_sh),\
|
||||
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\
|
||||
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\
|
||||
HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\
|
||||
@ -531,6 +534,7 @@
|
||||
type PRIMARY_SURFACE_DCC_IND_64B_BLK;\
|
||||
type SECONDARY_SURFACE_DCC_EN;\
|
||||
type SECONDARY_SURFACE_DCC_IND_64B_BLK;\
|
||||
type SURFACE_FLIP_INT_MASK;\
|
||||
type DET_BUF_PLANE1_BASE_ADDRESS;\
|
||||
type CROSSBAR_SRC_CB_B;\
|
||||
type CROSSBAR_SRC_CR_R;\
|
||||
@ -777,4 +781,6 @@ void hubp1_read_state_common(struct hubp *hubp);
|
||||
bool hubp1_in_blank(struct hubp *hubp);
|
||||
void hubp1_soft_reset(struct hubp *hubp, bool reset);
|
||||
|
||||
void hubp1_set_flip_int(struct hubp *hubp);
|
||||
|
||||
#endif
|
||||
|
@ -2196,6 +2196,13 @@ static void dcn10_enable_plane(
|
||||
if (dc->debug.sanity_checks) {
|
||||
hws->funcs.verify_allow_pstate_change_high(dc);
|
||||
}
|
||||
|
||||
if (!pipe_ctx->top_pipe
|
||||
&& pipe_ctx->plane_state
|
||||
&& pipe_ctx->plane_state->flip_int_enabled
|
||||
&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
|
||||
pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
|
||||
|
||||
}
|
||||
|
||||
void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
|
||||
|
@ -1597,6 +1597,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {
|
||||
.validate_dml_output = hubp2_validate_dml_output,
|
||||
.hubp_in_blank = hubp1_in_blank,
|
||||
.hubp_soft_reset = hubp1_soft_reset,
|
||||
.hubp_set_flip_int = hubp1_set_flip_int,
|
||||
};
|
||||
|
||||
|
||||
|
@ -1146,6 +1146,12 @@ void dcn20_enable_plane(
|
||||
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
|
||||
}
|
||||
|
||||
if (!pipe_ctx->top_pipe
|
||||
&& pipe_ctx->plane_state
|
||||
&& pipe_ctx->plane_state->flip_int_enabled
|
||||
&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
|
||||
pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
|
||||
|
||||
// if (dc->debug.sanity_checks) {
|
||||
// dcn10_verify_allow_pstate_change_high(dc);
|
||||
// }
|
||||
|
@ -838,6 +838,7 @@ static struct hubp_funcs dcn21_hubp_funcs = {
|
||||
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
|
||||
.hubp_init = hubp21_init,
|
||||
.validate_dml_output = hubp21_validate_dml_output,
|
||||
.hubp_set_flip_int = hubp1_set_flip_int,
|
||||
};
|
||||
|
||||
bool hubp21_construct(
|
||||
|
@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
||||
.num_banks = 8,
|
||||
.num_chans = 4,
|
||||
.vmm_page_size_bytes = 4096,
|
||||
.dram_clock_change_latency_us = 11.72,
|
||||
.dram_clock_change_latency_us = 23.84,
|
||||
.return_bus_width_bytes = 64,
|
||||
.dispclk_dppclk_vco_speed_mhz = 3600,
|
||||
.xfc_bus_transport_time_us = 4,
|
||||
@ -1062,8 +1062,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
|
||||
{
|
||||
int i;
|
||||
|
||||
DC_FP_START();
|
||||
|
||||
if (dc->bb_overrides.sr_exit_time_ns) {
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us =
|
||||
@ -1088,8 +1086,6 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
|
||||
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
|
||||
}
|
||||
}
|
||||
|
||||
DC_FP_END();
|
||||
}
|
||||
|
||||
void dcn21_calculate_wm(
|
||||
@ -1339,7 +1335,7 @@ static noinline bool dcn21_validate_bandwidth_fp(struct dc *dc,
|
||||
int vlevel = 0;
|
||||
int pipe_split_from[MAX_PIPES];
|
||||
int pipe_cnt = 0;
|
||||
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
|
||||
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
|
||||
DC_LOGGER_INIT(dc->ctx->logger);
|
||||
|
||||
BW_VAL_TRACE_COUNT();
|
||||
|
@ -511,6 +511,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
|
||||
.hubp_init = hubp3_init,
|
||||
.hubp_in_blank = hubp1_in_blank,
|
||||
.hubp_soft_reset = hubp1_soft_reset,
|
||||
.hubp_set_flip_int = hubp1_set_flip_int,
|
||||
};
|
||||
|
||||
bool hubp3_construct(
|
||||
|
@ -2508,6 +2508,19 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
|
||||
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
|
||||
};
|
||||
|
||||
#define CTX ctx
|
||||
|
||||
#define REG(reg_name) \
|
||||
(DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
|
||||
|
||||
static uint32_t read_pipe_fuses(struct dc_context *ctx)
|
||||
{
|
||||
uint32_t value = REG_READ(CC_DC_PIPE_DIS);
|
||||
/* Support for max 6 pipes */
|
||||
value = value & 0x3f;
|
||||
return value;
|
||||
}
|
||||
|
||||
static bool dcn30_resource_construct(
|
||||
uint8_t num_virtual_links,
|
||||
struct dc *dc,
|
||||
@ -2517,6 +2530,15 @@ static bool dcn30_resource_construct(
|
||||
struct dc_context *ctx = dc->ctx;
|
||||
struct irq_service_init_data init_data;
|
||||
struct ddc_service_init_data ddc_init_data;
|
||||
uint32_t pipe_fuses = read_pipe_fuses(ctx);
|
||||
uint32_t num_pipes = 0;
|
||||
|
||||
if (!(pipe_fuses == 0 || pipe_fuses == 0x3e)) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
dm_error("DC: Unexpected fuse recipe for navi2x !\n");
|
||||
/* fault to single pipe */
|
||||
pipe_fuses = 0x3e;
|
||||
}
|
||||
|
||||
DC_FP_START();
|
||||
|
||||
@ -2650,6 +2672,15 @@ static bool dcn30_resource_construct(
|
||||
/* PP Lib and SMU interfaces */
|
||||
init_soc_bounding_box(dc, pool);
|
||||
|
||||
num_pipes = dcn3_0_ip.max_num_dpp;
|
||||
|
||||
for (i = 0; i < dcn3_0_ip.max_num_dpp; i++)
|
||||
if (pipe_fuses & 1 << i)
|
||||
num_pipes--;
|
||||
|
||||
dcn3_0_ip.max_num_dpp = num_pipes;
|
||||
dcn3_0_ip.max_num_otg = num_pipes;
|
||||
|
||||
dml_init_instance(&dc->dml, &dcn3_0_soc, &dcn3_0_ip, DML_PROJECT_DCN30);
|
||||
|
||||
/* IRQ */
|
||||
|
@ -1619,12 +1619,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
|
||||
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
|
||||
}
|
||||
|
||||
static void calculate_wm_set_for_vlevel(
|
||||
int vlevel,
|
||||
struct wm_range_table_entry *table_entry,
|
||||
struct dcn_watermarks *wm_set,
|
||||
struct display_mode_lib *dml,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt)
|
||||
{
|
||||
double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
|
||||
|
||||
ASSERT(vlevel < dml->soc.num_states);
|
||||
/* only pipe 0 is read for voltage and dcf/soc clocks */
|
||||
pipes[0].clks_cfg.voltage = vlevel;
|
||||
pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
|
||||
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
|
||||
|
||||
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
|
||||
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
|
||||
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
|
||||
|
||||
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
|
||||
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
|
||||
|
||||
}
|
||||
|
||||
static void dcn301_calculate_wm_and_dlg(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt,
|
||||
int vlevel_req)
|
||||
{
|
||||
int i, pipe_idx;
|
||||
int vlevel, vlevel_max;
|
||||
struct wm_range_table_entry *table_entry;
|
||||
struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
|
||||
|
||||
ASSERT(bw_params);
|
||||
|
||||
vlevel_max = bw_params->clk_table.num_entries - 1;
|
||||
|
||||
/* WM Set D */
|
||||
table_entry = &bw_params->wm_table.entries[WM_D];
|
||||
if (table_entry->wm_type == WM_TYPE_RETRAINING)
|
||||
vlevel = 0;
|
||||
else
|
||||
vlevel = vlevel_max;
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
/* WM Set C */
|
||||
table_entry = &bw_params->wm_table.entries[WM_C];
|
||||
vlevel = min(max(vlevel_req, 2), vlevel_max);
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
/* WM Set B */
|
||||
table_entry = &bw_params->wm_table.entries[WM_B];
|
||||
vlevel = min(max(vlevel_req, 1), vlevel_max);
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
|
||||
/* WM Set A */
|
||||
table_entry = &bw_params->wm_table.entries[WM_A];
|
||||
vlevel = min(vlevel_req, vlevel_max);
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
|
||||
if (dc->config.forced_clocks) {
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
|
||||
}
|
||||
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
|
||||
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
|
||||
|
||||
pipe_idx++;
|
||||
}
|
||||
|
||||
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
|
||||
}
|
||||
|
||||
static struct resource_funcs dcn301_res_pool_funcs = {
|
||||
.destroy = dcn301_destroy_resource_pool,
|
||||
.link_enc_create = dcn301_link_encoder_create,
|
||||
.panel_cntl_create = dcn301_panel_cntl_create,
|
||||
.validate_bandwidth = dcn30_validate_bandwidth,
|
||||
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
|
||||
.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
|
||||
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
|
||||
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
||||
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
|
||||
|
@ -191,6 +191,8 @@ struct hubp_funcs {
|
||||
bool (*hubp_in_blank)(struct hubp *hubp);
|
||||
void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
|
||||
|
||||
void (*hubp_set_flip_int)(struct hubp *hubp);
|
||||
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -5216,10 +5216,10 @@ static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
|
||||
for (j = 0; j < dep_sclk_table->count; j++) {
|
||||
valid_entry = false;
|
||||
for (k = 0; k < watermarks->num_wm_sets; k++) {
|
||||
if (dep_sclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz &&
|
||||
dep_sclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz &&
|
||||
dep_mclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz &&
|
||||
dep_mclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz) {
|
||||
if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 &&
|
||||
dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 &&
|
||||
dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 &&
|
||||
dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) {
|
||||
valid_entry = true;
|
||||
table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
|
||||
break;
|
||||
|
@ -1505,6 +1505,48 @@ static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
|
||||
struct vega10_hwmgr *data =
|
||||
(struct vega10_hwmgr *)(hwmgr->backend);
|
||||
uint32_t pcie_gen = 0, pcie_width = 0;
|
||||
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
||||
int i;
|
||||
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
|
||||
pcie_gen = 3;
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
||||
pcie_gen = 2;
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
|
||||
pcie_gen = 1;
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
|
||||
pcie_gen = 0;
|
||||
|
||||
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
|
||||
pcie_width = 6;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
|
||||
pcie_width = 5;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
|
||||
pcie_width = 4;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
|
||||
pcie_width = 3;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
|
||||
pcie_width = 2;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
|
||||
pcie_width = 1;
|
||||
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
if (pp_table->PcieGenSpeed[i] > pcie_gen)
|
||||
pp_table->PcieGenSpeed[i] = pcie_gen;
|
||||
|
||||
if (pp_table->PcieLaneCount[i] > pcie_width)
|
||||
pp_table->PcieLaneCount[i] = pcie_width;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
int result = -1;
|
||||
@ -2556,6 +2598,11 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
|
||||
"Failed to initialize Link Level!",
|
||||
return result);
|
||||
|
||||
result = vega10_override_pcie_parameters(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"Failed to override pcie parameters!",
|
||||
return result);
|
||||
|
||||
result = vega10_populate_all_graphic_levels(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"Failed to initialize Graphics Level!",
|
||||
@ -2922,6 +2969,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool enable)
|
||||
{
|
||||
struct vega10_hwmgr *data = hwmgr->backend;
|
||||
|
@ -481,6 +481,67 @@ static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
|
||||
dpm_state->hard_max_level = 0xffff;
|
||||
}
|
||||
|
||||
static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
|
||||
struct vega12_hwmgr *data =
|
||||
(struct vega12_hwmgr *)(hwmgr->backend);
|
||||
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
|
||||
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
|
||||
pcie_gen = 3;
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
||||
pcie_gen = 2;
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
|
||||
pcie_gen = 1;
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
|
||||
pcie_gen = 0;
|
||||
|
||||
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
|
||||
pcie_width = 6;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
|
||||
pcie_width = 5;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
|
||||
pcie_width = 4;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
|
||||
pcie_width = 3;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
|
||||
pcie_width = 2;
|
||||
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
|
||||
pcie_width = 1;
|
||||
|
||||
/* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
|
||||
* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
|
||||
* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
|
||||
*/
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
|
||||
pp_table->PcieGenSpeed[i];
|
||||
pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
|
||||
pp_table->PcieLaneCount[i];
|
||||
|
||||
if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
|
||||
pp_table->PcieLaneCount[i]) {
|
||||
smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
|
||||
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
|
||||
NULL);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"[OverridePcieParameters] Attempt to override pcie params failed!",
|
||||
return ret);
|
||||
}
|
||||
|
||||
/* update the pptable */
|
||||
pp_table->PcieGenSpeed[i] = pcie_gen_arg;
|
||||
pp_table->PcieLaneCount[i] = pcie_width_arg;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
|
||||
PPCLK_e clk_id, uint32_t *num_of_levels)
|
||||
{
|
||||
@ -968,6 +1029,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
|
||||
"Failed to enable all smu features!",
|
||||
return result);
|
||||
|
||||
result = vega12_override_pcie_parameters(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!result,
|
||||
"[EnableDPMTasks] Failed to override pcie parameters!",
|
||||
return result);
|
||||
|
||||
tmp_result = vega12_power_control_set_level(hwmgr);
|
||||
PP_ASSERT_WITH_CODE(!tmp_result,
|
||||
"Failed to power control set level!",
|
||||
|
@ -831,7 +831,9 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
|
||||
struct vega20_hwmgr *data =
|
||||
(struct vega20_hwmgr *)(hwmgr->backend);
|
||||
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
|
||||
uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg, pcie_gen_arg, pcie_width_arg;
|
||||
PPTable_t *pp_table = &(data->smc_state_table.pp_table);
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
|
||||
@ -860,17 +862,27 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
|
||||
* Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
|
||||
* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
|
||||
*/
|
||||
smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
|
||||
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
|
||||
NULL);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"[OverridePcieParameters] Attempt to override pcie params failed!",
|
||||
return ret);
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
pcie_gen_arg = (pp_table->PcieGenSpeed[i] > pcie_gen) ? pcie_gen :
|
||||
pp_table->PcieGenSpeed[i];
|
||||
pcie_width_arg = (pp_table->PcieLaneCount[i] > pcie_width) ? pcie_width :
|
||||
pp_table->PcieLaneCount[i];
|
||||
|
||||
data->pcie_parameters_override = true;
|
||||
data->pcie_gen_level1 = pcie_gen;
|
||||
data->pcie_width_level1 = pcie_width;
|
||||
if (pcie_gen_arg != pp_table->PcieGenSpeed[i] || pcie_width_arg !=
|
||||
pp_table->PcieLaneCount[i]) {
|
||||
smu_pcie_arg = (i << 16) | (pcie_gen_arg << 8) | pcie_width_arg;
|
||||
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
|
||||
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
|
||||
NULL);
|
||||
PP_ASSERT_WITH_CODE(!ret,
|
||||
"[OverridePcieParameters] Attempt to override pcie params failed!",
|
||||
return ret);
|
||||
}
|
||||
|
||||
/* update the pptable */
|
||||
pp_table->PcieGenSpeed[i] = pcie_gen_arg;
|
||||
pp_table->PcieLaneCount[i] = pcie_width_arg;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3319,9 +3331,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
data->od8_settings.od8_settings_array;
|
||||
OverDriveTable_t *od_table =
|
||||
&(data->smc_state_table.overdrive_table);
|
||||
struct phm_ppt_v3_information *pptable_information =
|
||||
(struct phm_ppt_v3_information *)hwmgr->pptable;
|
||||
PPTable_t *pptable = (PPTable_t *)pptable_information->smc_pptable;
|
||||
PPTable_t *pptable = &(data->smc_state_table.pp_table);
|
||||
struct pp_clock_levels_with_latency clocks;
|
||||
struct vega20_single_dpm_table *fclk_dpm_table =
|
||||
&(data->dpm_table.fclk_table);
|
||||
@ -3420,13 +3430,9 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
current_lane_width =
|
||||
vega20_get_current_pcie_link_width_level(hwmgr);
|
||||
for (i = 0; i < NUM_LINK_LEVELS; i++) {
|
||||
if (i == 1 && data->pcie_parameters_override) {
|
||||
gen_speed = data->pcie_gen_level1;
|
||||
lane_width = data->pcie_width_level1;
|
||||
} else {
|
||||
gen_speed = pptable->PcieGenSpeed[i];
|
||||
lane_width = pptable->PcieLaneCount[i];
|
||||
}
|
||||
gen_speed = pptable->PcieGenSpeed[i];
|
||||
lane_width = pptable->PcieLaneCount[i];
|
||||
|
||||
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
|
||||
(gen_speed == 0) ? "2.5GT/s," :
|
||||
(gen_speed == 1) ? "5.0GT/s," :
|
||||
|
@ -2048,7 +2048,7 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
|
||||
|
||||
if (shadow)
|
||||
vfree(shadow);
|
||||
else
|
||||
else if (fb_helper->buffer)
|
||||
drm_client_buffer_vunmap(fb_helper->buffer);
|
||||
|
||||
drm_client_framebuffer_delete(fb_helper->buffer);
|
||||
|
@ -357,13 +357,14 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
|
||||
if (--shmem->vmap_use_count > 0)
|
||||
return;
|
||||
|
||||
if (obj->import_attach)
|
||||
if (obj->import_attach) {
|
||||
dma_buf_vunmap(obj->import_attach->dmabuf, map);
|
||||
else
|
||||
} else {
|
||||
vunmap(shmem->vaddr);
|
||||
drm_gem_shmem_put_pages(shmem);
|
||||
}
|
||||
|
||||
shmem->vaddr = NULL;
|
||||
drm_gem_shmem_put_pages(shmem);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -525,14 +526,28 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
|
||||
loff_t num_pages = obj->size >> PAGE_SHIFT;
|
||||
vm_fault_t ret;
|
||||
struct page *page;
|
||||
pgoff_t page_offset;
|
||||
|
||||
if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
|
||||
return VM_FAULT_SIGBUS;
|
||||
/* We don't use vmf->pgoff since that has the fake offset */
|
||||
page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
|
||||
|
||||
page = shmem->pages[vmf->pgoff];
|
||||
mutex_lock(&shmem->pages_lock);
|
||||
|
||||
return vmf_insert_page(vma, vmf->address, page);
|
||||
if (page_offset >= num_pages ||
|
||||
WARN_ON_ONCE(!shmem->pages) ||
|
||||
shmem->madv < 0) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
} else {
|
||||
page = shmem->pages[page_offset];
|
||||
|
||||
ret = vmf_insert_page(vma, vmf->address, page);
|
||||
}
|
||||
|
||||
mutex_unlock(&shmem->pages_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
|
||||
@ -581,9 +596,6 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
||||
struct drm_gem_shmem_object *shmem;
|
||||
int ret;
|
||||
|
||||
/* Remove the fake offset */
|
||||
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
|
||||
|
||||
if (obj->import_attach) {
|
||||
/* Drop the reference drm_gem_mmap_obj() acquired.*/
|
||||
drm_gem_object_put(obj);
|
||||
|
@ -99,6 +99,8 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
|
||||
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
|
||||
return -EFAULT;
|
||||
|
||||
memset(&v, 0, sizeof(v));
|
||||
|
||||
v = (struct drm_version) {
|
||||
.name_len = v32.name_len,
|
||||
.name = compat_ptr(v32.name),
|
||||
@ -137,6 +139,9 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
|
||||
|
||||
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
|
||||
return -EFAULT;
|
||||
|
||||
memset(&uq, 0, sizeof(uq));
|
||||
|
||||
uq = (struct drm_unique){
|
||||
.unique_len = uq32.unique_len,
|
||||
.unique = compat_ptr(uq32.unique),
|
||||
@ -265,6 +270,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
|
||||
if (copy_from_user(&c32, argp, sizeof(c32)))
|
||||
return -EFAULT;
|
||||
|
||||
memset(&client, 0, sizeof(client));
|
||||
|
||||
client.idx = c32.idx;
|
||||
|
||||
err = drm_ioctl_kernel(file, drm_getclient, &client, 0);
|
||||
@ -852,6 +859,8 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
||||
if (copy_from_user(&req32, argp, sizeof(req32)))
|
||||
return -EFAULT;
|
||||
|
||||
memset(&req, 0, sizeof(req));
|
||||
|
||||
req.request.type = req32.request.type;
|
||||
req.request.sequence = req32.request.sequence;
|
||||
req.request.signal = req32.request.signal;
|
||||
@ -889,6 +898,8 @@ static int compat_drm_mode_addfb2(struct file *file, unsigned int cmd,
|
||||
struct drm_mode_fb_cmd2 req64;
|
||||
int err;
|
||||
|
||||
memset(&req64, 0, sizeof(req64));
|
||||
|
||||
if (copy_from_user(&req64, argp,
|
||||
offsetof(drm_mode_fb_cmd232_t, modifier)))
|
||||
return -EFAULT;
|
||||
|
@ -713,9 +713,12 @@ static int engine_setup_common(struct intel_engine_cs *engine)
|
||||
goto err_status;
|
||||
}
|
||||
|
||||
err = intel_engine_init_cmd_parser(engine);
|
||||
if (err)
|
||||
goto err_cmd_parser;
|
||||
|
||||
intel_engine_init_active(engine, ENGINE_PHYSICAL);
|
||||
intel_engine_init_execlists(engine);
|
||||
intel_engine_init_cmd_parser(engine);
|
||||
intel_engine_init__pm(engine);
|
||||
intel_engine_init_retire(engine);
|
||||
|
||||
@ -732,6 +735,8 @@ static int engine_setup_common(struct intel_engine_cs *engine)
|
||||
|
||||
return 0;
|
||||
|
||||
err_cmd_parser:
|
||||
intel_breadcrumbs_free(engine->breadcrumbs);
|
||||
err_status:
|
||||
cleanup_status_page(engine);
|
||||
return err;
|
||||
|
@ -940,7 +940,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
|
||||
* struct intel_engine_cs based on whether the platform requires software
|
||||
* command parsing.
|
||||
*/
|
||||
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||
{
|
||||
const struct drm_i915_cmd_table *cmd_tables;
|
||||
int cmd_table_count;
|
||||
@ -948,7 +948,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||
|
||||
if (!IS_GEN(engine->i915, 7) && !(IS_GEN(engine->i915, 9) &&
|
||||
engine->class == COPY_ENGINE_CLASS))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
switch (engine->class) {
|
||||
case RENDER_CLASS:
|
||||
@ -1013,19 +1013,19 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(engine->class);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
|
||||
drm_err(&engine->i915->drm,
|
||||
"%s: command descriptions are not sorted\n",
|
||||
engine->name);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
if (!validate_regs_sorted(engine)) {
|
||||
drm_err(&engine->i915->drm,
|
||||
"%s: registers are not sorted\n", engine->name);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = init_hash_table(engine, cmd_tables, cmd_table_count);
|
||||
@ -1033,10 +1033,17 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
|
||||
drm_err(&engine->i915->drm,
|
||||
"%s: initialised failed!\n", engine->name);
|
||||
fini_hash_table(engine);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
engine->flags |= I915_ENGINE_USING_CMD_PARSER;
|
||||
|
||||
out:
|
||||
if (intel_engine_requires_cmd_parser(engine) &&
|
||||
!intel_engine_using_cmd_parser(engine))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1952,7 +1952,7 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
|
||||
|
||||
/* i915_cmd_parser.c */
|
||||
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
|
||||
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
|
||||
int intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
|
||||
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
|
||||
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
|
||||
struct i915_vma *batch,
|
||||
|
@ -482,6 +482,16 @@ static int meson_probe_remote(struct platform_device *pdev,
|
||||
return count;
|
||||
}
|
||||
|
||||
static void meson_drv_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct meson_drm *priv = dev_get_drvdata(&pdev->dev);
|
||||
struct drm_device *drm = priv->drm;
|
||||
|
||||
DRM_DEBUG_DRIVER("\n");
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
}
|
||||
|
||||
static int meson_drv_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct component_match *match = NULL;
|
||||
@ -553,6 +563,7 @@ static const struct dev_pm_ops meson_drv_pm_ops = {
|
||||
|
||||
static struct platform_driver meson_drm_platform_driver = {
|
||||
.probe = meson_drv_probe,
|
||||
.shutdown = meson_drv_shutdown,
|
||||
.driver = {
|
||||
.name = "meson-drm",
|
||||
.of_match_table = dt_match,
|
||||
|
@ -556,7 +556,8 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
||||
if (nvbo->force_coherent)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ttm_dma->num_pages; ++i) {
|
||||
i = 0;
|
||||
while (i < ttm_dma->num_pages) {
|
||||
struct page *p = ttm_dma->pages[i];
|
||||
size_t num_pages = 1;
|
||||
|
||||
@ -587,7 +588,8 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
||||
if (nvbo->force_coherent)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ttm_dma->num_pages; ++i) {
|
||||
i = 0;
|
||||
while (i < ttm_dma->num_pages) {
|
||||
struct page *p = ttm_dma->pages[i];
|
||||
size_t num_pages = 1;
|
||||
|
||||
|
@ -328,6 +328,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
|
||||
|
||||
head.id = i;
|
||||
head.flags = 0;
|
||||
head.surface_id = 0;
|
||||
oldcount = qdev->monitors_config->count;
|
||||
if (crtc->state->active) {
|
||||
struct drm_display_mode *mode = &crtc->mode;
|
||||
|
@ -321,7 +321,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
|
||||
int type, struct qxl_release **release,
|
||||
struct qxl_bo **rbo)
|
||||
{
|
||||
struct qxl_bo *bo;
|
||||
struct qxl_bo *bo, *free_bo = NULL;
|
||||
int idr_ret;
|
||||
int ret = 0;
|
||||
union qxl_release_info *info;
|
||||
@ -347,7 +347,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
|
||||
|
||||
mutex_lock(&qdev->release_mutex);
|
||||
if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
|
||||
qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
|
||||
free_bo = qdev->current_release_bo[cur_idx];
|
||||
qdev->current_release_bo_offset[cur_idx] = 0;
|
||||
qdev->current_release_bo[cur_idx] = NULL;
|
||||
}
|
||||
@ -355,6 +355,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
|
||||
ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
|
||||
if (ret) {
|
||||
mutex_unlock(&qdev->release_mutex);
|
||||
if (free_bo) {
|
||||
qxl_bo_unpin(free_bo);
|
||||
qxl_bo_unref(&free_bo);
|
||||
}
|
||||
qxl_release_free(qdev, *release);
|
||||
return ret;
|
||||
}
|
||||
@ -370,6 +374,10 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
|
||||
*rbo = bo;
|
||||
|
||||
mutex_unlock(&qdev->release_mutex);
|
||||
if (free_bo) {
|
||||
qxl_bo_unpin(free_bo);
|
||||
qxl_bo_unref(&free_bo);
|
||||
}
|
||||
|
||||
ret = qxl_release_list_add(*release, bo);
|
||||
qxl_bo_unref(&bo);
|
||||
|
@ -574,6 +574,8 @@ struct radeon_gem {
|
||||
struct list_head objects;
|
||||
};
|
||||
|
||||
extern const struct drm_gem_object_funcs radeon_gem_object_funcs;
|
||||
|
||||
int radeon_gem_init(struct radeon_device *rdev);
|
||||
void radeon_gem_fini(struct radeon_device *rdev);
|
||||
int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
|
||||
|
@ -43,7 +43,7 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
||||
int radeon_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void radeon_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
|
||||
static const struct drm_gem_object_funcs radeon_gem_object_funcs;
|
||||
const struct drm_gem_object_funcs radeon_gem_object_funcs;
|
||||
|
||||
static void radeon_gem_object_free(struct drm_gem_object *gobj)
|
||||
{
|
||||
@ -227,7 +227,7 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
|
||||
return r;
|
||||
}
|
||||
|
||||
static const struct drm_gem_object_funcs radeon_gem_object_funcs = {
|
||||
const struct drm_gem_object_funcs radeon_gem_object_funcs = {
|
||||
.free = radeon_gem_object_free,
|
||||
.open = radeon_gem_object_open,
|
||||
.close = radeon_gem_object_close,
|
||||
|
@ -56,6 +56,8 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
bo->tbo.base.funcs = &radeon_gem_object_funcs;
|
||||
|
||||
mutex_lock(&rdev->gem.mutex);
|
||||
list_add_tail(&bo->list, &rdev->gem.objects);
|
||||
mutex_unlock(&rdev->gem.mutex);
|
||||
|
@ -83,6 +83,7 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)");
|
||||
|
||||
struct gm12u320_device {
|
||||
struct drm_device dev;
|
||||
struct device *dmadev;
|
||||
struct drm_simple_display_pipe pipe;
|
||||
struct drm_connector conn;
|
||||
unsigned char *cmd_buf;
|
||||
@ -601,6 +602,22 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
|
||||
DRM_FORMAT_MOD_INVALID
|
||||
};
|
||||
|
||||
/*
|
||||
* FIXME: Dma-buf sharing requires DMA support by the importing device.
|
||||
* This function is a workaround to make USB devices work as well.
|
||||
* See todo.rst for how to fix the issue in the dma-buf framework.
|
||||
*/
|
||||
static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct gm12u320_device *gm12u320 = to_gm12u320(dev);
|
||||
|
||||
if (!gm12u320->dmadev)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev);
|
||||
}
|
||||
|
||||
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
|
||||
|
||||
static const struct drm_driver gm12u320_drm_driver = {
|
||||
@ -614,6 +631,7 @@ static const struct drm_driver gm12u320_drm_driver = {
|
||||
|
||||
.fops = &gm12u320_fops,
|
||||
DRM_GEM_SHMEM_DRIVER_OPS,
|
||||
.gem_prime_import = gm12u320_gem_prime_import,
|
||||
};
|
||||
|
||||
static const struct drm_mode_config_funcs gm12u320_mode_config_funcs = {
|
||||
@ -640,15 +658,18 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
|
||||
struct gm12u320_device, dev);
|
||||
if (IS_ERR(gm12u320))
|
||||
return PTR_ERR(gm12u320);
|
||||
dev = &gm12u320->dev;
|
||||
|
||||
gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
|
||||
if (!gm12u320->dmadev)
|
||||
drm_warn(dev, "buffer sharing not supported"); /* not an error */
|
||||
|
||||
INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
|
||||
mutex_init(&gm12u320->fb_update.lock);
|
||||
|
||||
dev = &gm12u320->dev;
|
||||
|
||||
ret = drmm_mode_config_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put_device;
|
||||
|
||||
dev->mode_config.min_width = GM12U320_USER_WIDTH;
|
||||
dev->mode_config.max_width = GM12U320_USER_WIDTH;
|
||||
@ -658,15 +679,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
|
||||
|
||||
ret = gm12u320_usb_alloc(gm12u320);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put_device;
|
||||
|
||||
ret = gm12u320_set_ecomode(gm12u320);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put_device;
|
||||
|
||||
ret = gm12u320_conn_init(gm12u320);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put_device;
|
||||
|
||||
ret = drm_simple_display_pipe_init(&gm12u320->dev,
|
||||
&gm12u320->pipe,
|
||||
@ -676,24 +697,31 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
|
||||
gm12u320_pipe_modifiers,
|
||||
&gm12u320->conn);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put_device;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
usb_set_intfdata(interface, dev);
|
||||
ret = drm_dev_register(dev, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_put_device;
|
||||
|
||||
drm_fbdev_generic_setup(dev, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_device:
|
||||
put_device(gm12u320->dmadev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gm12u320_usb_disconnect(struct usb_interface *interface)
|
||||
{
|
||||
struct drm_device *dev = usb_get_intfdata(interface);
|
||||
struct gm12u320_device *gm12u320 = to_gm12u320(dev);
|
||||
|
||||
put_device(gm12u320->dmadev);
|
||||
gm12u320->dmadev = NULL;
|
||||
drm_dev_unplug(dev);
|
||||
drm_atomic_helper_shutdown(dev);
|
||||
}
|
||||
|
@ -136,7 +136,8 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
|
||||
struct ttm_bo_device *bdev = bo->bdev;
|
||||
struct ttm_resource_manager *man;
|
||||
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
if (!bo->deleted)
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
|
||||
if (bo->pin_count) {
|
||||
ttm_bo_del_from_lru(bo);
|
||||
@ -508,8 +509,11 @@ static void ttm_bo_release(struct kref *kref)
|
||||
* Make pinned bos immediately available to
|
||||
* shrinkers, now that they are queued for
|
||||
* destruction.
|
||||
*
|
||||
* FIXME: QXL is triggering this. Can be removed when the
|
||||
* driver is fixed.
|
||||
*/
|
||||
if (WARN_ON(bo->pin_count)) {
|
||||
if (WARN_ON_ONCE(bo->pin_count)) {
|
||||
bo->pin_count = 0;
|
||||
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
|
||||
}
|
||||
|
@ -268,13 +268,13 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
|
||||
/* Remove a pool_type from the global shrinker list and free all pages */
|
||||
static void ttm_pool_type_fini(struct ttm_pool_type *pt)
|
||||
{
|
||||
struct page *p, *tmp;
|
||||
struct page *p;
|
||||
|
||||
mutex_lock(&shrinker_lock);
|
||||
list_del(&pt->shrinker_list);
|
||||
mutex_unlock(&shrinker_lock);
|
||||
|
||||
list_for_each_entry_safe(p, tmp, &pt->pages, lru)
|
||||
while ((p = ttm_pool_type_take(pt)))
|
||||
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
|
||||
}
|
||||
|
||||
|
@ -32,6 +32,22 @@ static int udl_usb_resume(struct usb_interface *interface)
|
||||
return drm_mode_config_helper_resume(dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Dma-buf sharing requires DMA support by the importing device.
|
||||
* This function is a workaround to make USB devices work as well.
|
||||
* See todo.rst for how to fix the issue in the dma-buf framework.
|
||||
*/
|
||||
static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct udl_device *udl = to_udl(dev);
|
||||
|
||||
if (!udl->dmadev)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev);
|
||||
}
|
||||
|
||||
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
|
||||
|
||||
static const struct drm_driver driver = {
|
||||
@ -40,6 +56,7 @@ static const struct drm_driver driver = {
|
||||
/* GEM hooks */
|
||||
.fops = &udl_driver_fops,
|
||||
DRM_GEM_SHMEM_DRIVER_OPS,
|
||||
.gem_prime_import = udl_driver_gem_prime_import,
|
||||
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
|
@ -50,6 +50,7 @@ struct urb_list {
|
||||
struct udl_device {
|
||||
struct drm_device drm;
|
||||
struct device *dev;
|
||||
struct device *dmadev;
|
||||
|
||||
struct drm_simple_display_pipe display_pipe;
|
||||
|
||||
|
@ -315,6 +315,10 @@ int udl_init(struct udl_device *udl)
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
|
||||
if (!udl->dmadev)
|
||||
drm_warn(dev, "buffer sharing not supported"); /* not an error */
|
||||
|
||||
mutex_init(&udl->gem_lock);
|
||||
|
||||
if (!udl_parse_vendor_descriptor(udl)) {
|
||||
@ -343,12 +347,18 @@ int udl_init(struct udl_device *udl)
|
||||
err:
|
||||
if (udl->urbs.count)
|
||||
udl_free_urb_list(dev);
|
||||
put_device(udl->dmadev);
|
||||
DRM_ERROR("%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int udl_drop_usb(struct drm_device *dev)
|
||||
{
|
||||
struct udl_device *udl = to_udl(dev);
|
||||
|
||||
udl_free_urb_list(dev);
|
||||
put_device(udl->dmadev);
|
||||
udl->dmadev = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -748,6 +748,38 @@ void usb_put_intf(struct usb_interface *intf)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_put_intf);
|
||||
|
||||
/**
|
||||
* usb_intf_get_dma_device - acquire a reference on the usb interface's DMA endpoint
|
||||
* @intf: the usb interface
|
||||
*
|
||||
* While a USB device cannot perform DMA operations by itself, many USB
|
||||
* controllers can. A call to usb_intf_get_dma_device() returns the DMA endpoint
|
||||
* for the given USB interface, if any. The returned device structure must be
|
||||
* released with put_device().
|
||||
*
|
||||
* See also usb_get_dma_device().
|
||||
*
|
||||
* Returns: A reference to the usb interface's DMA endpoint; or NULL if none
|
||||
* exists.
|
||||
*/
|
||||
struct device *usb_intf_get_dma_device(struct usb_interface *intf)
|
||||
{
|
||||
struct usb_device *udev = interface_to_usbdev(intf);
|
||||
struct device *dmadev;
|
||||
|
||||
if (!udev->bus)
|
||||
return NULL;
|
||||
|
||||
dmadev = get_device(udev->bus->sysdev);
|
||||
if (!dmadev || !dmadev->dma_mask) {
|
||||
put_device(dmadev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return dmadev;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_intf_get_dma_device);
|
||||
|
||||
/* USB device locking
|
||||
*
|
||||
* USB devices and interfaces are locked using the semaphore in their
|
||||
|
@ -287,11 +287,8 @@ static inline void aty_st_8(int regindex, u8 val, const struct atyfb_par *par)
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) || \
|
||||
defined (CONFIG_FB_ATY_BACKLIGHT)
|
||||
extern void aty_st_lcd(int index, u32 val, const struct atyfb_par *par);
|
||||
extern u32 aty_ld_lcd(int index, const struct atyfb_par *par);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DAC operations
|
||||
|
@ -133,7 +133,7 @@
|
||||
#define PRINTKE(fmt, args...) printk(KERN_ERR "atyfb: " fmt, ## args)
|
||||
|
||||
#if defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_GENERIC_LCD) || \
|
||||
defined(CONFIG_FB_ATY_BACKLIGHT)
|
||||
defined(CONFIG_FB_ATY_BACKLIGHT) || defined (CONFIG_PPC_PMAC)
|
||||
static const u32 lt_lcd_regs[] = {
|
||||
CNFG_PANEL_LG,
|
||||
LCD_GEN_CNTL_LG,
|
||||
@ -175,8 +175,8 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par)
|
||||
return aty_ld_le32(LCD_DATA, par);
|
||||
}
|
||||
}
|
||||
#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) \
|
||||
defined(CONFIG_FB_ATY_GENERIC_LCD) */
|
||||
#else /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) ||
|
||||
defined(CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_PPC_PMAC) */
|
||||
void aty_st_lcd(int index, u32 val, const struct atyfb_par *par)
|
||||
{ }
|
||||
|
||||
@ -184,7 +184,8 @@ u32 aty_ld_lcd(int index, const struct atyfb_par *par)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined (CONFIG_FB_ATY_GENERIC_LCD) */
|
||||
#endif /* defined(CONFIG_PMAC_BACKLIGHT) || defined(CONFIG_FB_ATY_BACKLIGHT) ||
|
||||
defined (CONFIG_FB_ATY_GENERIC_LCD) || defined(CONFIG_PPC_PMAC) */
|
||||
|
||||
#ifdef CONFIG_FB_ATY_GENERIC_LCD
|
||||
/*
|
||||
|
@ -746,6 +746,8 @@ extern int usb_lock_device_for_reset(struct usb_device *udev,
|
||||
extern int usb_reset_device(struct usb_device *dev);
|
||||
extern void usb_queue_reset_device(struct usb_interface *dev);
|
||||
|
||||
extern struct device *usb_intf_get_dma_device(struct usb_interface *intf);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
|
||||
bool enable);
|
||||
|
Loading…
Reference in New Issue
Block a user