mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 05:34:00 +08:00
drm fixes for 5.5-rc3
exynos: - component delete fix i915: - Fix to drop an unused and harmful display W/A - Fix to define EHL power wells independent of ICL - Fix for priority inversion on bonded requests - Fix in mmio offset calculation of DSB instance - Fix memory leak from get_task_pid when banning clients - Fixes to avoid dereference of uninitialized ops in dma_fence tracing and keep reference to execbuf object until submitted. - vGPU state setting locking fix (Zhenyu) - Fix vGPU display dmabuf as read-only (Zhenyu) - Properly handle vGPU display dmabuf page pin when rendering (Tina) - Fix one guest boot warning to handle guc reset state (Fred) -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJd/S2DAAoJEAx081l5xIa+lfAP+wdMacLoJtgBnsXR/P3ezVME xjb+YWeXYOeJ/8PXKqtwZCsdmOOQM3CVp/rDKn0z/L2EKkY790aeIXuGd6PtKQKO ZO9x6oeXQfV780HA5SZzUaoazyWtVVT72t0SOqr0NqRCqWJ/21ZGqX+v5axQsx1E kkgwI+TgbYXGQ+wBNnm2P0WZQgE8j5k0YaZxmG4faButsYAuaJe3w6dqWFhbjnwz 9qHDGajU2y6d3JMAoCcza/vV4iOEkAHCkqr63y2/ryTpH/5QuH8n4ceSWgoCv2G4 G/kIvMa62Ob1iXf353fxtV64SR+z0NSEURMvjqo64Vr0lF+tklcPEDIaYcnp/D5N 4elyHHUddOIHOuwaczB930/aZJ90EHgESgiXQfqks1fuWldj2k3V/mI5PvCh2HtE G2vPSjdh6jvbqeKmGDsWkoleekPlJgpOKFQXA2HyI2NK5/O476jI5meY9CCYtMf3 K05YHLF3ltzrwPwagUqz3wWRNWj5TqMlwk62V5GeOVtnagPkFjKq31ndI3TxJO1L gSq1YoeD4TsWisdGxa1uQbSjJ8Z3c3OHTNaUnsD7gOOgxQXwPcdyvPoS3Q5GNW67 6rPhzZRKwVmaOfFQ8K3Ch5z67D0VfSS4t8V5P6owFOGm9oQEvkyeh+CMo80IL+7e GoKvFw0ct72ejbms5ept =RFC2 -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-12-21' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Probably the last one before Christmas, I'll see if there is much demand over next few weeks for more fixes, I expect it'll be quiet enough. This has one exynos fix, and a bunch of i915 core and i915 GVT fixes. Summary: exynos: - component delete fix i915: - Fix to drop an unused and harmful display W/A - Fix to define EHL power wells independent of ICL - Fix for priority inversion on bonded requests - Fix in mmio offset calculation of DSB instance - Fix memory leak from get_task_pid when banning clients - Fixes to avoid dereference of uninitialized ops in dma_fence tracing and keep reference to execbuf object until submitted. - vGPU state setting locking fix (Zhenyu) - Fix vGPU display dmabuf as read-only (Zhenyu) - Properly handle vGPU display dmabuf page pin when rendering (Tina) - Fix one guest boot warning to handle guc reset state (Fred)" * tag 'drm-fixes-2019-12-21' of git://anongit.freedesktop.org/drm/drm: drm/exynos: gsc: add missed component_del drm/i915: Fix pid leak with banned clients drm/i915/gem: Keep request alive while attaching fences drm/i915: Fix WARN_ON condition for cursor plane ddb allocation drm/i915/gvt: Fix guest boot warning drm/i915/tgl: Drop Wa#1178 drm/i915/ehl: Define EHL powerwells independently of ICL drm/i915: Set fence_work.ops before dma_fence_init drm/i915: Copy across scheduler behaviour flags across submit fences drm/i915/dsb: Fix in mmio offset calculation of DSB instance drm/i915/gvt: Pin vgpu dma address before using drm/i915/gvt: set guest display buffer as readonly drm/i915/gvt: use vgpu lock for active state setting
This commit is contained in:
commit
a7c88728da
@ -1313,6 +1313,7 @@ static int gsc_remove(struct platform_device *pdev)
|
|||||||
{
|
{
|
||||||
struct device *dev = &pdev->dev;
|
struct device *dev = &pdev->dev;
|
||||||
|
|
||||||
|
component_del(dev, &gsc_component_ops);
|
||||||
pm_runtime_dont_use_autosuspend(dev);
|
pm_runtime_dont_use_autosuspend(dev);
|
||||||
pm_runtime_disable(dev);
|
pm_runtime_disable(dev);
|
||||||
|
|
||||||
|
@ -3688,6 +3688,151 @@ static const struct i915_power_well_desc icl_power_wells[] = {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct i915_power_well_desc ehl_power_wells[] = {
|
||||||
|
{
|
||||||
|
.name = "always-on",
|
||||||
|
.always_on = true,
|
||||||
|
.domains = POWER_DOMAIN_MASK,
|
||||||
|
.ops = &i9xx_always_on_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "power well 1",
|
||||||
|
/* Handled by the DMC firmware */
|
||||||
|
.always_on = true,
|
||||||
|
.domains = 0,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = SKL_DISP_PW_1,
|
||||||
|
{
|
||||||
|
.hsw.regs = &hsw_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_PW_1,
|
||||||
|
.hsw.has_fuses = true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "DC off",
|
||||||
|
.domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
|
||||||
|
.ops = &gen9_dc_off_power_well_ops,
|
||||||
|
.id = SKL_DISP_DC_OFF,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "power well 2",
|
||||||
|
.domains = ICL_PW_2_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = SKL_DISP_PW_2,
|
||||||
|
{
|
||||||
|
.hsw.regs = &hsw_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_PW_2,
|
||||||
|
.hsw.has_fuses = true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "power well 3",
|
||||||
|
.domains = ICL_PW_3_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &hsw_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
|
||||||
|
.hsw.irq_pipe_mask = BIT(PIPE_B),
|
||||||
|
.hsw.has_vga = true,
|
||||||
|
.hsw.has_fuses = true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "DDI A IO",
|
||||||
|
.domains = ICL_DDI_IO_A_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &icl_ddi_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_DDI_A,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "DDI B IO",
|
||||||
|
.domains = ICL_DDI_IO_B_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &icl_ddi_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_DDI_B,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "DDI C IO",
|
||||||
|
.domains = ICL_DDI_IO_C_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &icl_ddi_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_DDI_C,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "DDI D IO",
|
||||||
|
.domains = ICL_DDI_IO_D_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &icl_ddi_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_DDI_D,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "AUX A",
|
||||||
|
.domains = ICL_AUX_A_IO_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &icl_aux_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_AUX_A,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "AUX B",
|
||||||
|
.domains = ICL_AUX_B_IO_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &icl_aux_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_AUX_B,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "AUX C",
|
||||||
|
.domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &icl_aux_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_AUX_C,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "AUX D",
|
||||||
|
.domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &icl_aux_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_AUX_D,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
.name = "power well 4",
|
||||||
|
.domains = ICL_PW_4_POWER_DOMAINS,
|
||||||
|
.ops = &hsw_power_well_ops,
|
||||||
|
.id = DISP_PW_ID_NONE,
|
||||||
|
{
|
||||||
|
.hsw.regs = &hsw_power_well_regs,
|
||||||
|
.hsw.idx = ICL_PW_CTL_IDX_PW_4,
|
||||||
|
.hsw.has_fuses = true,
|
||||||
|
.hsw.irq_pipe_mask = BIT(PIPE_C),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
static const struct i915_power_well_desc tgl_power_wells[] = {
|
static const struct i915_power_well_desc tgl_power_wells[] = {
|
||||||
{
|
{
|
||||||
.name = "always-on",
|
.name = "always-on",
|
||||||
@ -3832,7 +3977,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
|||||||
{
|
{
|
||||||
.name = "AUX A",
|
.name = "AUX A",
|
||||||
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
|
.domains = TGL_AUX_A_IO_POWER_DOMAINS,
|
||||||
.ops = &icl_combo_phy_aux_power_well_ops,
|
.ops = &hsw_power_well_ops,
|
||||||
.id = DISP_PW_ID_NONE,
|
.id = DISP_PW_ID_NONE,
|
||||||
{
|
{
|
||||||
.hsw.regs = &icl_aux_power_well_regs,
|
.hsw.regs = &icl_aux_power_well_regs,
|
||||||
@ -3842,7 +3987,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
|||||||
{
|
{
|
||||||
.name = "AUX B",
|
.name = "AUX B",
|
||||||
.domains = TGL_AUX_B_IO_POWER_DOMAINS,
|
.domains = TGL_AUX_B_IO_POWER_DOMAINS,
|
||||||
.ops = &icl_combo_phy_aux_power_well_ops,
|
.ops = &hsw_power_well_ops,
|
||||||
.id = DISP_PW_ID_NONE,
|
.id = DISP_PW_ID_NONE,
|
||||||
{
|
{
|
||||||
.hsw.regs = &icl_aux_power_well_regs,
|
.hsw.regs = &icl_aux_power_well_regs,
|
||||||
@ -3852,7 +3997,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
|
|||||||
{
|
{
|
||||||
.name = "AUX C",
|
.name = "AUX C",
|
||||||
.domains = TGL_AUX_C_IO_POWER_DOMAINS,
|
.domains = TGL_AUX_C_IO_POWER_DOMAINS,
|
||||||
.ops = &icl_combo_phy_aux_power_well_ops,
|
.ops = &hsw_power_well_ops,
|
||||||
.id = DISP_PW_ID_NONE,
|
.id = DISP_PW_ID_NONE,
|
||||||
{
|
{
|
||||||
.hsw.regs = &icl_aux_power_well_regs,
|
.hsw.regs = &icl_aux_power_well_regs,
|
||||||
@ -4162,6 +4307,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
|||||||
*/
|
*/
|
||||||
if (IS_GEN(dev_priv, 12)) {
|
if (IS_GEN(dev_priv, 12)) {
|
||||||
err = set_power_wells(power_domains, tgl_power_wells);
|
err = set_power_wells(power_domains, tgl_power_wells);
|
||||||
|
} else if (IS_ELKHARTLAKE(dev_priv)) {
|
||||||
|
err = set_power_wells(power_domains, ehl_power_wells);
|
||||||
} else if (IS_GEN(dev_priv, 11)) {
|
} else if (IS_GEN(dev_priv, 11)) {
|
||||||
err = set_power_wells(power_domains, icl_power_wells);
|
err = set_power_wells(power_domains, icl_power_wells);
|
||||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||||
|
@ -2167,8 +2167,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
ext_data.fpriv = file->driver_priv;
|
ext_data.fpriv = file->driver_priv;
|
||||||
if (client_is_banned(ext_data.fpriv)) {
|
if (client_is_banned(ext_data.fpriv)) {
|
||||||
DRM_DEBUG("client %s[%d] banned from creating ctx\n",
|
DRM_DEBUG("client %s[%d] banned from creating ctx\n",
|
||||||
current->comm,
|
current->comm, task_pid_nr(current));
|
||||||
pid_nr(get_task_pid(current, PIDTYPE_PID)));
|
|
||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2694,6 +2694,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||||||
err = eb_submit(&eb);
|
err = eb_submit(&eb);
|
||||||
err_request:
|
err_request:
|
||||||
add_to_client(eb.request, file);
|
add_to_client(eb.request, file);
|
||||||
|
i915_request_get(eb.request);
|
||||||
i915_request_add(eb.request);
|
i915_request_add(eb.request);
|
||||||
|
|
||||||
if (fences)
|
if (fences)
|
||||||
@ -2709,6 +2710,7 @@ err_request:
|
|||||||
fput(out_fence->file);
|
fput(out_fence->file);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
i915_request_put(eb.request);
|
||||||
|
|
||||||
err_batch_unpin:
|
err_batch_unpin:
|
||||||
if (eb.batch_flags & I915_DISPATCH_SECURE)
|
if (eb.batch_flags & I915_DISPATCH_SECURE)
|
||||||
|
@ -36,13 +36,32 @@
|
|||||||
|
|
||||||
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
|
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
|
||||||
|
|
||||||
|
static int vgpu_pin_dma_address(struct intel_vgpu *vgpu,
|
||||||
|
unsigned long size,
|
||||||
|
dma_addr_t dma_addr)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (intel_gvt_hypervisor_dma_pin_guest_page(vgpu, dma_addr))
|
||||||
|
ret = -EINVAL;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vgpu_unpin_dma_address(struct intel_vgpu *vgpu,
|
||||||
|
dma_addr_t dma_addr)
|
||||||
|
{
|
||||||
|
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, dma_addr);
|
||||||
|
}
|
||||||
|
|
||||||
static int vgpu_gem_get_pages(
|
static int vgpu_gem_get_pages(
|
||||||
struct drm_i915_gem_object *obj)
|
struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||||
|
struct intel_vgpu *vgpu;
|
||||||
struct sg_table *st;
|
struct sg_table *st;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int i, ret;
|
int i, j, ret;
|
||||||
gen8_pte_t __iomem *gtt_entries;
|
gen8_pte_t __iomem *gtt_entries;
|
||||||
struct intel_vgpu_fb_info *fb_info;
|
struct intel_vgpu_fb_info *fb_info;
|
||||||
u32 page_num;
|
u32 page_num;
|
||||||
@ -51,6 +70,10 @@ static int vgpu_gem_get_pages(
|
|||||||
if (WARN_ON(!fb_info))
|
if (WARN_ON(!fb_info))
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
vgpu = fb_info->obj->vgpu;
|
||||||
|
if (WARN_ON(!vgpu))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||||
if (unlikely(!st))
|
if (unlikely(!st))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -64,21 +87,53 @@ static int vgpu_gem_get_pages(
|
|||||||
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
|
gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
|
||||||
(fb_info->start >> PAGE_SHIFT);
|
(fb_info->start >> PAGE_SHIFT);
|
||||||
for_each_sg(st->sgl, sg, page_num, i) {
|
for_each_sg(st->sgl, sg, page_num, i) {
|
||||||
|
dma_addr_t dma_addr =
|
||||||
|
GEN8_DECODE_PTE(readq(>t_entries[i]));
|
||||||
|
if (vgpu_pin_dma_address(vgpu, PAGE_SIZE, dma_addr)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
sg->offset = 0;
|
sg->offset = 0;
|
||||||
sg->length = PAGE_SIZE;
|
sg->length = PAGE_SIZE;
|
||||||
sg_dma_address(sg) =
|
|
||||||
GEN8_DECODE_PTE(readq(>t_entries[i]));
|
|
||||||
sg_dma_len(sg) = PAGE_SIZE;
|
sg_dma_len(sg) = PAGE_SIZE;
|
||||||
|
sg_dma_address(sg) = dma_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
|
__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
|
||||||
|
out:
|
||||||
|
if (ret) {
|
||||||
|
dma_addr_t dma_addr;
|
||||||
|
|
||||||
|
for_each_sg(st->sgl, sg, i, j) {
|
||||||
|
dma_addr = sg_dma_address(sg);
|
||||||
|
if (dma_addr)
|
||||||
|
vgpu_unpin_dma_address(vgpu, dma_addr);
|
||||||
|
}
|
||||||
|
sg_free_table(st);
|
||||||
|
kfree(st);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
|
static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
|
||||||
struct sg_table *pages)
|
struct sg_table *pages)
|
||||||
{
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
|
|
||||||
|
if (obj->base.dma_buf) {
|
||||||
|
struct intel_vgpu_fb_info *fb_info = obj->gvt_info;
|
||||||
|
struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
|
||||||
|
struct intel_vgpu *vgpu = obj->vgpu;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for_each_sg(pages->sgl, sg, fb_info->size, i)
|
||||||
|
vgpu_unpin_dma_address(vgpu,
|
||||||
|
sg_dma_address(sg));
|
||||||
|
}
|
||||||
|
|
||||||
sg_free_table(pages);
|
sg_free_table(pages);
|
||||||
kfree(pages);
|
kfree(pages);
|
||||||
}
|
}
|
||||||
@ -163,6 +218,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
|
|||||||
drm_gem_private_object_init(dev, &obj->base,
|
drm_gem_private_object_init(dev, &obj->base,
|
||||||
roundup(info->size, PAGE_SIZE));
|
roundup(info->size, PAGE_SIZE));
|
||||||
i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
|
i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class);
|
||||||
|
i915_gem_object_set_readonly(obj);
|
||||||
|
|
||||||
obj->read_domains = I915_GEM_DOMAIN_GTT;
|
obj->read_domains = I915_GEM_DOMAIN_GTT;
|
||||||
obj->write_domain = 0;
|
obj->write_domain = 0;
|
||||||
|
@ -341,6 +341,10 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||||
engine_mask |= BIT(VCS1);
|
engine_mask |= BIT(VCS1);
|
||||||
}
|
}
|
||||||
|
if (data & GEN9_GRDOM_GUC) {
|
||||||
|
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
|
||||||
|
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
|
||||||
|
}
|
||||||
engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
|
engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1636,6 +1640,16 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int guc_status_read(struct intel_vgpu *vgpu,
|
||||||
|
unsigned int offset, void *p_data,
|
||||||
|
unsigned int bytes)
|
||||||
|
{
|
||||||
|
/* keep MIA_IN_RESET before clearing */
|
||||||
|
read_vreg(vgpu, offset, p_data, bytes);
|
||||||
|
vgpu_vreg(vgpu, offset) &= ~GS_MIA_IN_RESET;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
|
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
|
||||||
unsigned int offset, void *p_data, unsigned int bytes)
|
unsigned int offset, void *p_data, unsigned int bytes)
|
||||||
{
|
{
|
||||||
@ -2672,6 +2686,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||||||
|
|
||||||
MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
|
MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
|
||||||
MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
|
MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
|
||||||
|
MMIO_DH(GUC_STATUS, D_ALL, guc_status_read, NULL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,6 +62,8 @@ struct intel_gvt_mpt {
|
|||||||
unsigned long size, dma_addr_t *dma_addr);
|
unsigned long size, dma_addr_t *dma_addr);
|
||||||
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
|
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
|
||||||
|
|
||||||
|
int (*dma_pin_guest_page)(unsigned long handle, dma_addr_t dma_addr);
|
||||||
|
|
||||||
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
|
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
|
||||||
unsigned long mfn, unsigned int nr, bool map);
|
unsigned long mfn, unsigned int nr, bool map);
|
||||||
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
|
int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
|
||||||
|
@ -1916,6 +1916,28 @@ err_unlock:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
|
||||||
|
{
|
||||||
|
struct kvmgt_guest_info *info;
|
||||||
|
struct gvt_dma *entry;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!handle_valid(handle))
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
info = (struct kvmgt_guest_info *)handle;
|
||||||
|
|
||||||
|
mutex_lock(&info->vgpu->vdev.cache_lock);
|
||||||
|
entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
|
||||||
|
if (entry)
|
||||||
|
kref_get(&entry->ref);
|
||||||
|
else
|
||||||
|
ret = -ENOMEM;
|
||||||
|
mutex_unlock(&info->vgpu->vdev.cache_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
static void __gvt_dma_release(struct kref *ref)
|
static void __gvt_dma_release(struct kref *ref)
|
||||||
{
|
{
|
||||||
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
|
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
|
||||||
@ -2027,6 +2049,7 @@ static struct intel_gvt_mpt kvmgt_mpt = {
|
|||||||
.gfn_to_mfn = kvmgt_gfn_to_pfn,
|
.gfn_to_mfn = kvmgt_gfn_to_pfn,
|
||||||
.dma_map_guest_page = kvmgt_dma_map_guest_page,
|
.dma_map_guest_page = kvmgt_dma_map_guest_page,
|
||||||
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
|
.dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
|
||||||
|
.dma_pin_guest_page = kvmgt_dma_pin_guest_page,
|
||||||
.set_opregion = kvmgt_set_opregion,
|
.set_opregion = kvmgt_set_opregion,
|
||||||
.set_edid = kvmgt_set_edid,
|
.set_edid = kvmgt_set_edid,
|
||||||
.get_vfio_device = kvmgt_get_vfio_device,
|
.get_vfio_device = kvmgt_get_vfio_device,
|
||||||
|
@ -254,6 +254,21 @@ static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
|
|||||||
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
|
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_gvt_hypervisor_dma_pin_guest_page - pin guest dma buf
|
||||||
|
* @vgpu: a vGPU
|
||||||
|
* @dma_addr: guest dma addr
|
||||||
|
*
|
||||||
|
* Returns:
|
||||||
|
* 0 on success, negative error code if failed.
|
||||||
|
*/
|
||||||
|
static inline int
|
||||||
|
intel_gvt_hypervisor_dma_pin_guest_page(struct intel_vgpu *vgpu,
|
||||||
|
dma_addr_t dma_addr)
|
||||||
|
{
|
||||||
|
return intel_gvt_host.mpt->dma_pin_guest_page(vgpu->handle, dma_addr);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
|
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
|
||||||
* @vgpu: a vGPU
|
* @vgpu: a vGPU
|
||||||
|
@ -212,9 +212,9 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
|||||||
*/
|
*/
|
||||||
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
mutex_lock(&vgpu->gvt->lock);
|
mutex_lock(&vgpu->vgpu_lock);
|
||||||
vgpu->active = true;
|
vgpu->active = true;
|
||||||
mutex_unlock(&vgpu->gvt->lock);
|
mutex_unlock(&vgpu->vgpu_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -9405,11 +9405,9 @@ enum skl_power_gate {
|
|||||||
#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
|
#define _ICL_AUX_REG_IDX(pw_idx) ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
|
||||||
#define _ICL_AUX_ANAOVRD1_A 0x162398
|
#define _ICL_AUX_ANAOVRD1_A 0x162398
|
||||||
#define _ICL_AUX_ANAOVRD1_B 0x6C398
|
#define _ICL_AUX_ANAOVRD1_B 0x6C398
|
||||||
#define _TGL_AUX_ANAOVRD1_C 0x160398
|
|
||||||
#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
|
#define ICL_AUX_ANAOVRD1(pw_idx) _MMIO(_PICK(_ICL_AUX_REG_IDX(pw_idx), \
|
||||||
_ICL_AUX_ANAOVRD1_A, \
|
_ICL_AUX_ANAOVRD1_A, \
|
||||||
_ICL_AUX_ANAOVRD1_B, \
|
_ICL_AUX_ANAOVRD1_B))
|
||||||
_TGL_AUX_ANAOVRD1_C))
|
|
||||||
#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
|
#define ICL_AUX_ANAOVRD1_LDO_BYPASS (1 << 7)
|
||||||
#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
|
#define ICL_AUX_ANAOVRD1_ENABLE (1 << 0)
|
||||||
|
|
||||||
@ -11994,7 +11992,7 @@ enum skl_power_gate {
|
|||||||
/* This register controls the Display State Buffer (DSB) engines. */
|
/* This register controls the Display State Buffer (DSB) engines. */
|
||||||
#define _DSBSL_INSTANCE_BASE 0x70B00
|
#define _DSBSL_INSTANCE_BASE 0x70B00
|
||||||
#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
|
#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \
|
||||||
(pipe) * 0x1000 + (id) * 100)
|
(pipe) * 0x1000 + (id) * 0x100)
|
||||||
#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
|
#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0)
|
||||||
#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
|
#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
|
||||||
#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
|
#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
|
||||||
|
@ -300,11 +300,11 @@ void i915_request_retire_upto(struct i915_request *rq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
__i915_request_await_execution(struct i915_request *rq,
|
__await_execution(struct i915_request *rq,
|
||||||
struct i915_request *signal,
|
struct i915_request *signal,
|
||||||
void (*hook)(struct i915_request *rq,
|
void (*hook)(struct i915_request *rq,
|
||||||
struct dma_fence *signal),
|
struct dma_fence *signal),
|
||||||
gfp_t gfp)
|
gfp_t gfp)
|
||||||
{
|
{
|
||||||
struct execute_cb *cb;
|
struct execute_cb *cb;
|
||||||
|
|
||||||
@ -341,6 +341,8 @@ __i915_request_await_execution(struct i915_request *rq,
|
|||||||
}
|
}
|
||||||
spin_unlock_irq(&signal->lock);
|
spin_unlock_irq(&signal->lock);
|
||||||
|
|
||||||
|
/* Copy across semaphore status as we need the same behaviour */
|
||||||
|
rq->sched.flags |= signal->sched.flags;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -811,31 +813,21 @@ already_busywaiting(struct i915_request *rq)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
emit_semaphore_wait(struct i915_request *to,
|
__emit_semaphore_wait(struct i915_request *to,
|
||||||
struct i915_request *from,
|
struct i915_request *from,
|
||||||
gfp_t gfp)
|
u32 seqno)
|
||||||
{
|
{
|
||||||
const int has_token = INTEL_GEN(to->i915) >= 12;
|
const int has_token = INTEL_GEN(to->i915) >= 12;
|
||||||
u32 hwsp_offset;
|
u32 hwsp_offset;
|
||||||
int len;
|
int len, err;
|
||||||
u32 *cs;
|
u32 *cs;
|
||||||
|
|
||||||
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
|
GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
|
||||||
|
|
||||||
/* Just emit the first semaphore we see as request space is limited. */
|
|
||||||
if (already_busywaiting(to) & from->engine->mask)
|
|
||||||
goto await_fence;
|
|
||||||
|
|
||||||
if (i915_request_await_start(to, from) < 0)
|
|
||||||
goto await_fence;
|
|
||||||
|
|
||||||
/* Only submit our spinner after the signaler is running! */
|
|
||||||
if (__i915_request_await_execution(to, from, NULL, gfp))
|
|
||||||
goto await_fence;
|
|
||||||
|
|
||||||
/* We need to pin the signaler's HWSP until we are finished reading. */
|
/* We need to pin the signaler's HWSP until we are finished reading. */
|
||||||
if (intel_timeline_read_hwsp(from, to, &hwsp_offset))
|
err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
|
||||||
goto await_fence;
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
len = 4;
|
len = 4;
|
||||||
if (has_token)
|
if (has_token)
|
||||||
@ -858,7 +850,7 @@ emit_semaphore_wait(struct i915_request *to,
|
|||||||
MI_SEMAPHORE_POLL |
|
MI_SEMAPHORE_POLL |
|
||||||
MI_SEMAPHORE_SAD_GTE_SDD) +
|
MI_SEMAPHORE_SAD_GTE_SDD) +
|
||||||
has_token;
|
has_token;
|
||||||
*cs++ = from->fence.seqno;
|
*cs++ = seqno;
|
||||||
*cs++ = hwsp_offset;
|
*cs++ = hwsp_offset;
|
||||||
*cs++ = 0;
|
*cs++ = 0;
|
||||||
if (has_token) {
|
if (has_token) {
|
||||||
@ -867,6 +859,28 @@ emit_semaphore_wait(struct i915_request *to,
|
|||||||
}
|
}
|
||||||
|
|
||||||
intel_ring_advance(to, cs);
|
intel_ring_advance(to, cs);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
emit_semaphore_wait(struct i915_request *to,
|
||||||
|
struct i915_request *from,
|
||||||
|
gfp_t gfp)
|
||||||
|
{
|
||||||
|
/* Just emit the first semaphore we see as request space is limited. */
|
||||||
|
if (already_busywaiting(to) & from->engine->mask)
|
||||||
|
goto await_fence;
|
||||||
|
|
||||||
|
if (i915_request_await_start(to, from) < 0)
|
||||||
|
goto await_fence;
|
||||||
|
|
||||||
|
/* Only submit our spinner after the signaler is running! */
|
||||||
|
if (__await_execution(to, from, NULL, gfp))
|
||||||
|
goto await_fence;
|
||||||
|
|
||||||
|
if (__emit_semaphore_wait(to, from, from->fence.seqno))
|
||||||
|
goto await_fence;
|
||||||
|
|
||||||
to->sched.semaphores |= from->engine->mask;
|
to->sched.semaphores |= from->engine->mask;
|
||||||
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
|
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
|
||||||
return 0;
|
return 0;
|
||||||
@ -980,6 +994,57 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool intel_timeline_sync_has_start(struct intel_timeline *tl,
|
||||||
|
struct dma_fence *fence)
|
||||||
|
{
|
||||||
|
return __intel_timeline_sync_is_later(tl,
|
||||||
|
fence->context,
|
||||||
|
fence->seqno - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int intel_timeline_sync_set_start(struct intel_timeline *tl,
|
||||||
|
const struct dma_fence *fence)
|
||||||
|
{
|
||||||
|
return __intel_timeline_sync_set(tl, fence->context, fence->seqno - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
__i915_request_await_execution(struct i915_request *to,
|
||||||
|
struct i915_request *from,
|
||||||
|
void (*hook)(struct i915_request *rq,
|
||||||
|
struct dma_fence *signal))
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
/* Submit both requests at the same time */
|
||||||
|
err = __await_execution(to, from, hook, I915_FENCE_GFP);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* Squash repeated depenendices to the same timelines */
|
||||||
|
if (intel_timeline_sync_has_start(i915_request_timeline(to),
|
||||||
|
&from->fence))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* Ensure both start together [after all semaphores in signal] */
|
||||||
|
if (intel_engine_has_semaphores(to->engine))
|
||||||
|
err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
|
||||||
|
else
|
||||||
|
err = i915_request_await_start(to, from);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
|
||||||
|
/* Couple the dependency tree for PI on this exposed to->fence */
|
||||||
|
if (to->engine->schedule) {
|
||||||
|
err = i915_sched_node_add_dependency(&to->sched, &from->sched);
|
||||||
|
if (err < 0)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
return intel_timeline_sync_set_start(i915_request_timeline(to),
|
||||||
|
&from->fence);
|
||||||
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
i915_request_await_execution(struct i915_request *rq,
|
i915_request_await_execution(struct i915_request *rq,
|
||||||
struct dma_fence *fence,
|
struct dma_fence *fence,
|
||||||
@ -1013,8 +1078,7 @@ i915_request_await_execution(struct i915_request *rq,
|
|||||||
if (dma_fence_is_i915(fence))
|
if (dma_fence_is_i915(fence))
|
||||||
ret = __i915_request_await_execution(rq,
|
ret = __i915_request_await_execution(rq,
|
||||||
to_request(fence),
|
to_request(fence),
|
||||||
hook,
|
hook);
|
||||||
I915_FENCE_GFP);
|
|
||||||
else
|
else
|
||||||
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
|
ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
|
||||||
I915_FENCE_TIMEOUT,
|
I915_FENCE_TIMEOUT,
|
||||||
|
@ -474,7 +474,6 @@ void i915_sched_node_fini(struct i915_sched_node *node)
|
|||||||
* so we may be called out-of-order.
|
* so we may be called out-of-order.
|
||||||
*/
|
*/
|
||||||
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
|
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
|
||||||
GEM_BUG_ON(!node_signaled(dep->signaler));
|
|
||||||
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
||||||
|
|
||||||
list_del(&dep->wait_link);
|
list_del(&dep->wait_link);
|
||||||
|
@ -78,12 +78,11 @@ static const struct dma_fence_ops fence_ops = {
|
|||||||
void dma_fence_work_init(struct dma_fence_work *f,
|
void dma_fence_work_init(struct dma_fence_work *f,
|
||||||
const struct dma_fence_work_ops *ops)
|
const struct dma_fence_work_ops *ops)
|
||||||
{
|
{
|
||||||
|
f->ops = ops;
|
||||||
spin_lock_init(&f->lock);
|
spin_lock_init(&f->lock);
|
||||||
dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
|
dma_fence_init(&f->dma, &fence_ops, &f->lock, 0, 0);
|
||||||
i915_sw_fence_init(&f->chain, fence_notify);
|
i915_sw_fence_init(&f->chain, fence_notify);
|
||||||
INIT_WORK(&f->work, fence_work);
|
INIT_WORK(&f->work, fence_work);
|
||||||
|
|
||||||
f->ops = ops;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
|
int dma_fence_work_chain(struct dma_fence_work *f, struct dma_fence *signal)
|
||||||
|
@ -4291,8 +4291,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
|
|||||||
&crtc_state->wm.skl.optimal.planes[plane_id];
|
&crtc_state->wm.skl.optimal.planes[plane_id];
|
||||||
|
|
||||||
if (plane_id == PLANE_CURSOR) {
|
if (plane_id == PLANE_CURSOR) {
|
||||||
if (WARN_ON(wm->wm[level].min_ddb_alloc >
|
if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
|
||||||
total[PLANE_CURSOR])) {
|
WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
|
||||||
blocks = U32_MAX;
|
blocks = U32_MAX;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user