mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 21:54:11 +08:00
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "A bunch of radeon fixes for oops on module unload, and problems with resetting the dma engine, one nouveau fix for black boxes in rendering on my mbp retina, one sti fix, and a couple of intel fixes" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: drm/nouveau: ltc/gf100-: fix cbc issues on certain boards drm/bochs: add missing drm_connector_register call drm/cirrus: add missing drm_connector_register call drm/radeon: Fix typo 'addr' -> 'entry' in rs400_gart_set_page drm/nouveau/runpm: fix module unload drm/radeon/px: fix module unload vgaswitcheroo: add vga_switcheroo_fini_domain_pm_ops drm/radeon: don't reset dma on r6xx-evergreen init drm/radeon: don't reset sdma on CIK init drm/radeon: don't reset dma on NI/SI init drm/radeon/dpm: fix resume on mullins drm/radeon: Disable HDP flush before every CS again for < r600 drm/radeon: delete unused PTE_* defines drm/i915: Add limited color range readout for HDMI/DP ports on g4x/vlv/chv drm: sti: do not iterate over the info frame array drm/i915: Fix SRC_COPY width on 830/845g
This commit is contained in:
commit
1734a6e47f
@ -250,6 +250,7 @@ static void bochs_connector_init(struct drm_device *dev)
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
drm_connector_helper_add(connector,
|
||||
&bochs_connector_connector_helper_funcs);
|
||||
drm_connector_register(connector);
|
||||
}
|
||||
|
||||
|
||||
|
@ -555,6 +555,7 @@ static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
|
||||
|
||||
drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
|
||||
|
||||
drm_connector_register(connector);
|
||||
return connector;
|
||||
}
|
||||
|
||||
|
@ -1631,6 +1631,10 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
|
||||
tmp & DP_COLOR_RANGE_16_235)
|
||||
pipe_config->limited_color_range = true;
|
||||
|
||||
pipe_config->has_dp_encoder = true;
|
||||
|
||||
intel_dp_get_m_n(crtc, pipe_config);
|
||||
|
@ -712,7 +712,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_config *pipe_config)
|
||||
{
|
||||
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
|
||||
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
u32 tmp, flags = 0;
|
||||
int dotclock;
|
||||
|
||||
@ -734,6 +735,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
|
||||
if (tmp & HDMI_MODE_SELECT_HDMI)
|
||||
pipe_config->has_audio = true;
|
||||
|
||||
if (!HAS_PCH_SPLIT(dev) &&
|
||||
tmp & HDMI_COLOR_RANGE_16_235)
|
||||
pipe_config->limited_color_range = true;
|
||||
|
||||
pipe_config->adjusted_mode.flags |= flags;
|
||||
|
||||
if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc)
|
||||
|
@ -1400,7 +1400,7 @@ i830_dispatch_execbuffer(struct intel_engine_cs *ring,
|
||||
*/
|
||||
intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
|
||||
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
|
||||
intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024);
|
||||
intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
|
||||
intel_ring_emit(ring, cs_offset);
|
||||
intel_ring_emit(ring, 4096);
|
||||
intel_ring_emit(ring, offset);
|
||||
|
@ -200,7 +200,6 @@ nvc0_bar_init(struct nouveau_object *object)
|
||||
|
||||
nv_mask(priv, 0x000200, 0x00000100, 0x00000000);
|
||||
nv_mask(priv, 0x000200, 0x00000100, 0x00000100);
|
||||
nv_mask(priv, 0x100c80, 0x00000001, 0x00000000);
|
||||
|
||||
nv_wr32(priv, 0x001704, 0x80000000 | priv->bar[1].mem->addr >> 12);
|
||||
if (priv->bar[0].mem)
|
||||
|
@ -60,6 +60,7 @@ nvc0_fb_init(struct nouveau_object *object)
|
||||
|
||||
if (priv->r100c10_page)
|
||||
nv_wr32(priv, 0x100c10, priv->r100c10 >> 8);
|
||||
nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -98,6 +98,7 @@ static int
|
||||
gf100_ltc_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nvkm_ltc_priv *priv = (void *)object;
|
||||
u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ltc_init(priv);
|
||||
@ -107,6 +108,7 @@ gf100_ltc_init(struct nouveau_object *object)
|
||||
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
|
||||
nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
|
||||
nv_wr32(priv, 0x17e8d4, priv->tag_base);
|
||||
nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,7 @@ static int
|
||||
gk104_ltc_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nvkm_ltc_priv *priv = (void *)object;
|
||||
u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ltc_init(priv);
|
||||
@ -37,6 +38,7 @@ gk104_ltc_init(struct nouveau_object *object)
|
||||
nv_wr32(priv, 0x17e8d8, priv->ltc_nr);
|
||||
nv_wr32(priv, 0x17e000, priv->ltc_nr);
|
||||
nv_wr32(priv, 0x17e8d4, priv->tag_base);
|
||||
nv_mask(priv, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -98,6 +98,7 @@ static int
|
||||
gm107_ltc_init(struct nouveau_object *object)
|
||||
{
|
||||
struct nvkm_ltc_priv *priv = (void *)object;
|
||||
u32 lpg128 = !(nv_rd32(priv, 0x100c80) & 0x00000001);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_ltc_init(priv);
|
||||
@ -106,6 +107,7 @@ gm107_ltc_init(struct nouveau_object *object)
|
||||
|
||||
nv_wr32(priv, 0x17e27c, priv->ltc_nr);
|
||||
nv_wr32(priv, 0x17e278, priv->tag_base);
|
||||
nv_mask(priv, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,16 @@ void
|
||||
nouveau_vga_fini(struct nouveau_drm *drm)
|
||||
{
|
||||
struct drm_device *dev = drm->dev;
|
||||
bool runtime = false;
|
||||
|
||||
if (nouveau_runtime_pm == 1)
|
||||
runtime = true;
|
||||
if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
|
||||
runtime = true;
|
||||
|
||||
vga_switcheroo_unregister_client(dev->pdev);
|
||||
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
|
||||
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
|
@ -489,13 +489,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
|
||||
r = cik_sdma_load_microcode(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -33,6 +33,8 @@
|
||||
#define KV_MINIMUM_ENGINE_CLOCK 800
|
||||
#define SMC_RAM_END 0x40000
|
||||
|
||||
static int kv_enable_nb_dpm(struct radeon_device *rdev,
|
||||
bool enable);
|
||||
static void kv_init_graphics_levels(struct radeon_device *rdev);
|
||||
static int kv_calculate_ds_divider(struct radeon_device *rdev);
|
||||
static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
|
||||
@ -1295,6 +1297,9 @@ void kv_dpm_disable(struct radeon_device *rdev)
|
||||
{
|
||||
kv_smc_bapm_enable(rdev, false);
|
||||
|
||||
if (rdev->family == CHIP_MULLINS)
|
||||
kv_enable_nb_dpm(rdev, false);
|
||||
|
||||
/* powerup blocks */
|
||||
kv_dpm_powergate_acp(rdev, false);
|
||||
kv_dpm_powergate_samu(rdev, false);
|
||||
@ -1769,15 +1774,24 @@ static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kv_enable_nb_dpm(struct radeon_device *rdev)
|
||||
static int kv_enable_nb_dpm(struct radeon_device *rdev,
|
||||
bool enable)
|
||||
{
|
||||
struct kv_power_info *pi = kv_get_pi(rdev);
|
||||
int ret = 0;
|
||||
|
||||
if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
|
||||
ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
|
||||
if (ret == 0)
|
||||
pi->nb_dpm_enabled = true;
|
||||
if (enable) {
|
||||
if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
|
||||
ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
|
||||
if (ret == 0)
|
||||
pi->nb_dpm_enabled = true;
|
||||
}
|
||||
} else {
|
||||
if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
|
||||
ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Disable);
|
||||
if (ret == 0)
|
||||
pi->nb_dpm_enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1864,7 +1878,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
|
||||
}
|
||||
kv_update_sclk_t(rdev);
|
||||
if (rdev->family == CHIP_MULLINS)
|
||||
kv_enable_nb_dpm(rdev);
|
||||
kv_enable_nb_dpm(rdev, true);
|
||||
}
|
||||
} else {
|
||||
if (pi->enable_dpm) {
|
||||
@ -1889,7 +1903,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev)
|
||||
}
|
||||
kv_update_acp_boot_level(rdev);
|
||||
kv_update_sclk_t(rdev);
|
||||
kv_enable_nb_dpm(rdev);
|
||||
kv_enable_nb_dpm(rdev, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -191,12 +191,6 @@ int cayman_dma_resume(struct radeon_device *rdev)
|
||||
u32 reg_offset, wb_offset;
|
||||
int i, r;
|
||||
|
||||
/* Reset dma */
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
if (i == 0) {
|
||||
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
|
||||
|
@ -821,6 +821,20 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
|
||||
return RREG32(RADEON_CRTC2_CRNT_FRAME);
|
||||
}
|
||||
|
||||
/**
|
||||
* r100_ring_hdp_flush - flush Host Data Path via the ring buffer
|
||||
* rdev: radeon device structure
|
||||
* ring: ring buffer struct for emitting packets
|
||||
*/
|
||||
static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
|
||||
}
|
||||
|
||||
/* Who ever call radeon_fence_emit should call ring_lock and ask
|
||||
* for enough space (today caller are ib schedule and buffer move) */
|
||||
void r100_fence_ring_emit(struct radeon_device *rdev,
|
||||
@ -1056,20 +1070,6 @@ void r100_gfx_set_wptr(struct radeon_device *rdev,
|
||||
(void)RREG32(RADEON_CP_RB_WPTR);
|
||||
}
|
||||
|
||||
/**
|
||||
* r100_ring_hdp_flush - flush Host Data Path via the ring buffer
|
||||
* rdev: radeon device structure
|
||||
* ring: ring buffer struct for emitting packets
|
||||
*/
|
||||
void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
|
||||
{
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
|
||||
RADEON_HDP_READ_BUFFER_INVALIDATE);
|
||||
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
|
||||
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
|
||||
}
|
||||
|
||||
static void r100_cp_load_microcode(struct radeon_device *rdev)
|
||||
{
|
||||
const __be32 *fw_data;
|
||||
|
@ -124,15 +124,6 @@ int r600_dma_resume(struct radeon_device *rdev)
|
||||
u32 rb_bufsz;
|
||||
int r;
|
||||
|
||||
/* Reset dma */
|
||||
if (rdev->family >= CHIP_RV770)
|
||||
WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
|
||||
else
|
||||
WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
|
||||
RREG32(SRBM_SOFT_RESET);
|
||||
udelay(50);
|
||||
WREG32(SRBM_SOFT_RESET, 0);
|
||||
|
||||
WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
|
||||
WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
|
||||
|
||||
|
@ -44,13 +44,6 @@
|
||||
#define R6XX_MAX_PIPES 8
|
||||
#define R6XX_MAX_PIPES_MASK 0xff
|
||||
|
||||
/* PTE flags */
|
||||
#define PTE_VALID (1 << 0)
|
||||
#define PTE_SYSTEM (1 << 1)
|
||||
#define PTE_SNOOPED (1 << 2)
|
||||
#define PTE_READABLE (1 << 5)
|
||||
#define PTE_WRITEABLE (1 << 6)
|
||||
|
||||
/* tiling bits */
|
||||
#define ARRAY_LINEAR_GENERAL 0x00000000
|
||||
#define ARRAY_LINEAR_ALIGNED 0x00000001
|
||||
|
@ -185,7 +185,6 @@ static struct radeon_asic_ring r100_gfx_ring = {
|
||||
.get_rptr = &r100_gfx_get_rptr,
|
||||
.get_wptr = &r100_gfx_get_wptr,
|
||||
.set_wptr = &r100_gfx_set_wptr,
|
||||
.hdp_flush = &r100_ring_hdp_flush,
|
||||
};
|
||||
|
||||
static struct radeon_asic r100_asic = {
|
||||
@ -332,7 +331,6 @@ static struct radeon_asic_ring r300_gfx_ring = {
|
||||
.get_rptr = &r100_gfx_get_rptr,
|
||||
.get_wptr = &r100_gfx_get_wptr,
|
||||
.set_wptr = &r100_gfx_set_wptr,
|
||||
.hdp_flush = &r100_ring_hdp_flush,
|
||||
};
|
||||
|
||||
static struct radeon_asic r300_asic = {
|
||||
|
@ -148,8 +148,7 @@ u32 r100_gfx_get_wptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring);
|
||||
void r100_gfx_set_wptr(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring);
|
||||
void r100_ring_hdp_flush(struct radeon_device *rdev,
|
||||
struct radeon_ring *ring);
|
||||
|
||||
/*
|
||||
* r200,rv250,rs300,rv280
|
||||
*/
|
||||
|
@ -1393,7 +1393,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
|
||||
r = radeon_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
goto failed;
|
||||
|
||||
r = radeon_ib_ring_tests(rdev);
|
||||
if (r)
|
||||
@ -1413,7 +1413,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
radeon_agp_disable(rdev);
|
||||
r = radeon_init(rdev);
|
||||
if (r)
|
||||
return r;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if ((radeon_testing & 1)) {
|
||||
@ -1435,6 +1435,11 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
|
||||
}
|
||||
return 0;
|
||||
|
||||
failed:
|
||||
if (runtime)
|
||||
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void radeon_debugfs_remove_files(struct radeon_device *rdev);
|
||||
@ -1455,6 +1460,8 @@ void radeon_device_fini(struct radeon_device *rdev)
|
||||
radeon_bo_evict_vram(rdev);
|
||||
radeon_fini(rdev);
|
||||
vga_switcheroo_unregister_client(rdev->pdev);
|
||||
if (rdev->flags & RADEON_IS_PX)
|
||||
vga_switcheroo_fini_domain_pm_ops(rdev->dev);
|
||||
vga_client_register(rdev->pdev, NULL, NULL, NULL);
|
||||
if (rdev->rio_mem)
|
||||
pci_iounmap(rdev->pdev, rdev->rio_mem);
|
||||
|
@ -83,7 +83,7 @@
|
||||
* CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
|
||||
* 2.39.0 - Add INFO query for number of active CUs
|
||||
* 2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
|
||||
* CS to GPU
|
||||
* CS to GPU on >= r600
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 2
|
||||
#define KMS_DRIVER_MINOR 40
|
||||
|
@ -221,9 +221,9 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
|
||||
entry = (lower_32_bits(addr) & PAGE_MASK) |
|
||||
((upper_32_bits(addr) & 0xff) << 4);
|
||||
if (flags & RADEON_GART_PAGE_READ)
|
||||
addr |= RS400_PTE_READABLE;
|
||||
entry |= RS400_PTE_READABLE;
|
||||
if (flags & RADEON_GART_PAGE_WRITE)
|
||||
addr |= RS400_PTE_WRITEABLE;
|
||||
entry |= RS400_PTE_WRITEABLE;
|
||||
if (!(flags & RADEON_GART_PAGE_SNOOP))
|
||||
entry |= RS400_PTE_UNSNOOPED;
|
||||
entry = cpu_to_le32(entry);
|
||||
|
@ -298,7 +298,6 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
|
||||
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
|
||||
|
||||
val = frame[0xC];
|
||||
val |= frame[0xD] << 8;
|
||||
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
|
||||
|
||||
/* Enable transmission slot for AVI infoframe
|
||||
|
@ -660,6 +660,12 @@ int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *
|
||||
}
|
||||
EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
|
||||
|
||||
void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
|
||||
{
|
||||
dev->pm_domain = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
|
||||
|
||||
static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
@ -64,6 +64,7 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
|
||||
void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
|
||||
|
||||
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
|
||||
void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
|
||||
int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
|
||||
#else
|
||||
|
||||
@ -82,6 +83,7 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
|
||||
static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
|
||||
|
||||
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
|
||||
static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
|
||||
static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user