mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 02:34:01 +08:00
Merge tag 'drm-intel-fixes-2020-02-20' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes
drm/i915 fixes for v5.6-rc3: - Workaround missing Display Stream Compression (DSC) state readout by forcing modeset when its enabled at probe - Fix EHL port clock voltage level requirements - Fix queuing retire workers on the virtual engine - Fix use of partially initialized waiters - Stop using drm_pci_alloc/drm_pci/free - Fix rewind of RING_TAIL by forcing a context reload - Fix locking on resetting ring->head - Propagate our bug filing URL change to stable kernels Signed-off-by: Dave Airlie <airlied@redhat.com> From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87y2sxtsrd.fsf@intel.com
This commit is contained in:
commit
97d9a4e961
@ -8392,7 +8392,7 @@ M: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
|
||||
M: Rodrigo Vivi <rodrigo.vivi@intel.com>
|
||||
L: intel-gfx@lists.freedesktop.org
|
||||
W: https://01.org/linuxgraphics/
|
||||
B: https://01.org/linuxgraphics/documentation/how-report-bugs
|
||||
B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
|
||||
C: irc://chat.freenode.net/intel-gfx
|
||||
Q: http://patchwork.freedesktop.org/project/intel-gfx/
|
||||
T: git git://anongit.freedesktop.org/drm-intel
|
||||
|
@ -75,9 +75,8 @@ config DRM_I915_CAPTURE_ERROR
|
||||
help
|
||||
This option enables capturing the GPU state when a hang is detected.
|
||||
This information is vital for triaging hangs and assists in debugging.
|
||||
Please report any hang to
|
||||
https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
|
||||
for triaging.
|
||||
Please report any hang for triaging according to:
|
||||
https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
|
||||
|
||||
If in doubt, say "Y".
|
||||
|
||||
|
@ -4251,7 +4251,9 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
|
||||
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
|
||||
if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
|
||||
crtc_state->min_voltage_level = 3;
|
||||
else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
|
||||
crtc_state->min_voltage_level = 1;
|
||||
else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
|
||||
crtc_state->min_voltage_level = 2;
|
||||
|
@ -11087,7 +11087,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
|
||||
u32 base;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
|
||||
base = obj->phys_handle->busaddr;
|
||||
base = sg_dma_address(obj->mm.pages->sgl);
|
||||
else
|
||||
base = intel_plane_ggtt_offset(plane_state);
|
||||
|
||||
@ -17433,6 +17433,24 @@ retry:
|
||||
* have readout for pipe gamma enable.
|
||||
*/
|
||||
crtc_state->uapi.color_mgmt_changed = true;
|
||||
|
||||
/*
|
||||
* FIXME hack to force full modeset when DSC is being
|
||||
* used.
|
||||
*
|
||||
* As long as we do not have full state readout and
|
||||
* config comparison of crtc_state->dsc, we have no way
|
||||
* to ensure reliable fastset. Remove once we have
|
||||
* readout for DSC.
|
||||
*/
|
||||
if (crtc_state->dsc.compression_enable) {
|
||||
ret = drm_atomic_add_affected_connectors(state,
|
||||
&crtc->base);
|
||||
if (ret)
|
||||
goto out;
|
||||
crtc_state->uapi.mode_changed = true;
|
||||
drm_dbg_kms(dev, "Force full modeset for DSC\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -565,6 +565,22 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
|
||||
if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* If the cancel fails, we then need to reset, cleanly!
|
||||
*
|
||||
* If the per-engine reset fails, all hope is lost! We resort
|
||||
* to a full GPU reset in that unlikely case, but realistically
|
||||
* if the engine could not reset, the full reset does not fare
|
||||
* much better. The damage has been done.
|
||||
*
|
||||
* However, if we cannot reset an engine by itself, we cannot
|
||||
* cleanup a hanging persistent context without causing
|
||||
* colateral damage, and we should not pretend we can by
|
||||
* exposing the interface.
|
||||
*/
|
||||
if (!intel_has_reset_engine(&ctx->i915->gt))
|
||||
return -ENODEV;
|
||||
|
||||
i915_gem_context_clear_persistence(ctx);
|
||||
}
|
||||
|
||||
|
@ -285,9 +285,6 @@ struct drm_i915_gem_object {
|
||||
|
||||
void *gvt_info;
|
||||
};
|
||||
|
||||
/** for phys allocated objects */
|
||||
struct drm_dma_handle *phys_handle;
|
||||
};
|
||||
|
||||
static inline struct drm_i915_gem_object *
|
||||
|
@ -22,88 +22,87 @@
|
||||
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
struct drm_dma_handle *phys;
|
||||
struct sg_table *st;
|
||||
struct scatterlist *sg;
|
||||
char *vaddr;
|
||||
struct sg_table *st;
|
||||
dma_addr_t dma;
|
||||
void *vaddr;
|
||||
void *dst;
|
||||
int i;
|
||||
int err;
|
||||
|
||||
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Always aligning to the object size, allows a single allocation
|
||||
/*
|
||||
* Always aligning to the object size, allows a single allocation
|
||||
* to handle all possible callers, and given typical object sizes,
|
||||
* the alignment of the buddy allocation will naturally match.
|
||||
*/
|
||||
phys = drm_pci_alloc(obj->base.dev,
|
||||
roundup_pow_of_two(obj->base.size),
|
||||
roundup_pow_of_two(obj->base.size));
|
||||
if (!phys)
|
||||
vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
|
||||
roundup_pow_of_two(obj->base.size),
|
||||
&dma, GFP_KERNEL);
|
||||
if (!vaddr)
|
||||
return -ENOMEM;
|
||||
|
||||
vaddr = phys->vaddr;
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
char *src;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page)) {
|
||||
err = PTR_ERR(page);
|
||||
goto err_phys;
|
||||
}
|
||||
|
||||
src = kmap_atomic(page);
|
||||
memcpy(vaddr, src, PAGE_SIZE);
|
||||
drm_clflush_virt_range(vaddr, PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
|
||||
put_page(page);
|
||||
vaddr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
|
||||
|
||||
st = kmalloc(sizeof(*st), GFP_KERNEL);
|
||||
if (!st) {
|
||||
err = -ENOMEM;
|
||||
goto err_phys;
|
||||
}
|
||||
if (!st)
|
||||
goto err_pci;
|
||||
|
||||
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
|
||||
kfree(st);
|
||||
err = -ENOMEM;
|
||||
goto err_phys;
|
||||
}
|
||||
if (sg_alloc_table(st, 1, GFP_KERNEL))
|
||||
goto err_st;
|
||||
|
||||
sg = st->sgl;
|
||||
sg->offset = 0;
|
||||
sg->length = obj->base.size;
|
||||
|
||||
sg_dma_address(sg) = phys->busaddr;
|
||||
sg_assign_page(sg, (struct page *)vaddr);
|
||||
sg_dma_address(sg) = dma;
|
||||
sg_dma_len(sg) = obj->base.size;
|
||||
|
||||
obj->phys_handle = phys;
|
||||
dst = vaddr;
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
struct page *page;
|
||||
void *src;
|
||||
|
||||
page = shmem_read_mapping_page(mapping, i);
|
||||
if (IS_ERR(page))
|
||||
goto err_st;
|
||||
|
||||
src = kmap_atomic(page);
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
drm_clflush_virt_range(dst, PAGE_SIZE);
|
||||
kunmap_atomic(src);
|
||||
|
||||
put_page(page);
|
||||
dst += PAGE_SIZE;
|
||||
}
|
||||
|
||||
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
|
||||
|
||||
__i915_gem_object_set_pages(obj, st, sg->length);
|
||||
|
||||
return 0;
|
||||
|
||||
err_phys:
|
||||
drm_pci_free(obj->base.dev, phys);
|
||||
|
||||
return err;
|
||||
err_st:
|
||||
kfree(st);
|
||||
err_pci:
|
||||
dma_free_coherent(&obj->base.dev->pdev->dev,
|
||||
roundup_pow_of_two(obj->base.size),
|
||||
vaddr, dma);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
||||
struct sg_table *pages)
|
||||
{
|
||||
dma_addr_t dma = sg_dma_address(pages->sgl);
|
||||
void *vaddr = sg_page(pages->sgl);
|
||||
|
||||
__i915_gem_object_release_shmem(obj, pages, false);
|
||||
|
||||
if (obj->mm.dirty) {
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
char *vaddr = obj->phys_handle->vaddr;
|
||||
void *src = vaddr;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
|
||||
@ -115,15 +114,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
||||
continue;
|
||||
|
||||
dst = kmap_atomic(page);
|
||||
drm_clflush_virt_range(vaddr, PAGE_SIZE);
|
||||
memcpy(dst, vaddr, PAGE_SIZE);
|
||||
drm_clflush_virt_range(src, PAGE_SIZE);
|
||||
memcpy(dst, src, PAGE_SIZE);
|
||||
kunmap_atomic(dst);
|
||||
|
||||
set_page_dirty(page);
|
||||
if (obj->mm.madv == I915_MADV_WILLNEED)
|
||||
mark_page_accessed(page);
|
||||
put_page(page);
|
||||
vaddr += PAGE_SIZE;
|
||||
|
||||
src += PAGE_SIZE;
|
||||
}
|
||||
obj->mm.dirty = false;
|
||||
}
|
||||
@ -131,7 +131,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
||||
sg_free_table(pages);
|
||||
kfree(pages);
|
||||
|
||||
drm_pci_free(obj->base.dev, obj->phys_handle);
|
||||
dma_free_coherent(&obj->base.dev->pdev->dev,
|
||||
roundup_pow_of_two(obj->base.size),
|
||||
vaddr, dma);
|
||||
}
|
||||
|
||||
static void phys_release(struct drm_i915_gem_object *obj)
|
||||
|
@ -136,6 +136,9 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
|
||||
struct intel_engine_cs *engine =
|
||||
container_of(b, struct intel_engine_cs, breadcrumbs);
|
||||
|
||||
if (unlikely(intel_engine_is_virtual(engine)))
|
||||
engine = intel_virtual_engine_get_sibling(engine, 0);
|
||||
|
||||
intel_engine_add_retire(engine, tl);
|
||||
}
|
||||
|
||||
|
@ -99,6 +99,9 @@ static bool add_retire(struct intel_engine_cs *engine,
|
||||
void intel_engine_add_retire(struct intel_engine_cs *engine,
|
||||
struct intel_timeline *tl)
|
||||
{
|
||||
/* We don't deal well with the engine disappearing beneath us */
|
||||
GEM_BUG_ON(intel_engine_is_virtual(engine));
|
||||
|
||||
if (add_retire(engine, tl))
|
||||
schedule_work(&engine->retire_work);
|
||||
}
|
||||
|
@ -237,7 +237,8 @@ static void execlists_init_reg_state(u32 *reg_state,
|
||||
bool close);
|
||||
static void
|
||||
__execlists_update_reg_state(const struct intel_context *ce,
|
||||
const struct intel_engine_cs *engine);
|
||||
const struct intel_engine_cs *engine,
|
||||
u32 head);
|
||||
|
||||
static void mark_eio(struct i915_request *rq)
|
||||
{
|
||||
@ -1186,12 +1187,11 @@ static void reset_active(struct i915_request *rq,
|
||||
head = rq->tail;
|
||||
else
|
||||
head = active_request(ce->timeline, rq)->head;
|
||||
ce->ring->head = intel_ring_wrap(ce->ring, head);
|
||||
intel_ring_update_space(ce->ring);
|
||||
head = intel_ring_wrap(ce->ring, head);
|
||||
|
||||
/* Scrub the context image to prevent replaying the previous batch */
|
||||
restore_default_state(ce, engine);
|
||||
__execlists_update_reg_state(ce, engine);
|
||||
__execlists_update_reg_state(ce, engine, head);
|
||||
|
||||
/* We've switched away, so this should be a no-op, but intent matters */
|
||||
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
|
||||
@ -1321,7 +1321,7 @@ static u64 execlists_update_context(struct i915_request *rq)
|
||||
{
|
||||
struct intel_context *ce = rq->context;
|
||||
u64 desc = ce->lrc_desc;
|
||||
u32 tail;
|
||||
u32 tail, prev;
|
||||
|
||||
/*
|
||||
* WaIdleLiteRestore:bdw,skl
|
||||
@ -1334,9 +1334,15 @@ static u64 execlists_update_context(struct i915_request *rq)
|
||||
* subsequent resubmissions (for lite restore). Should that fail us,
|
||||
* and we try and submit the same tail again, force the context
|
||||
* reload.
|
||||
*
|
||||
* If we need to return to a preempted context, we need to skip the
|
||||
* lite-restore and force it to reload the RING_TAIL. Otherwise, the
|
||||
* HW has a tendency to ignore us rewinding the TAIL to the end of
|
||||
* an earlier request.
|
||||
*/
|
||||
tail = intel_ring_set_tail(rq->ring, rq->tail);
|
||||
if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail))
|
||||
prev = ce->lrc_reg_state[CTX_RING_TAIL];
|
||||
if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
|
||||
desc |= CTX_DESC_FORCE_RESTORE;
|
||||
ce->lrc_reg_state[CTX_RING_TAIL] = tail;
|
||||
rq->tail = rq->wa_tail;
|
||||
@ -1605,6 +1611,11 @@ last_active(const struct intel_engine_execlists *execlists)
|
||||
return *last;
|
||||
}
|
||||
|
||||
#define for_each_waiter(p__, rq__) \
|
||||
list_for_each_entry_lockless(p__, \
|
||||
&(rq__)->sched.waiters_list, \
|
||||
wait_link)
|
||||
|
||||
static void defer_request(struct i915_request *rq, struct list_head * const pl)
|
||||
{
|
||||
LIST_HEAD(list);
|
||||
@ -1622,7 +1633,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
|
||||
GEM_BUG_ON(i915_request_is_active(rq));
|
||||
list_move_tail(&rq->sched.link, pl);
|
||||
|
||||
list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
|
||||
for_each_waiter(p, rq) {
|
||||
struct i915_request *w =
|
||||
container_of(p->waiter, typeof(*w), sched);
|
||||
|
||||
@ -1834,14 +1845,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
*/
|
||||
__unwind_incomplete_requests(engine);
|
||||
|
||||
/*
|
||||
* If we need to return to the preempted context, we
|
||||
* need to skip the lite-restore and force it to
|
||||
* reload the RING_TAIL. Otherwise, the HW has a
|
||||
* tendency to ignore us rewinding the TAIL to the
|
||||
* end of an earlier request.
|
||||
*/
|
||||
last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
|
||||
last = NULL;
|
||||
} else if (need_timeslice(engine, last) &&
|
||||
timer_expired(&engine->execlists.timer)) {
|
||||
@ -2860,16 +2863,17 @@ static void execlists_context_unpin(struct intel_context *ce)
|
||||
|
||||
static void
|
||||
__execlists_update_reg_state(const struct intel_context *ce,
|
||||
const struct intel_engine_cs *engine)
|
||||
const struct intel_engine_cs *engine,
|
||||
u32 head)
|
||||
{
|
||||
struct intel_ring *ring = ce->ring;
|
||||
u32 *regs = ce->lrc_reg_state;
|
||||
|
||||
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
|
||||
GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
|
||||
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
|
||||
|
||||
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
|
||||
regs[CTX_RING_HEAD] = ring->head;
|
||||
regs[CTX_RING_HEAD] = head;
|
||||
regs[CTX_RING_TAIL] = ring->tail;
|
||||
|
||||
/* RPCS */
|
||||
@ -2898,7 +2902,7 @@ __execlists_context_pin(struct intel_context *ce,
|
||||
|
||||
ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
|
||||
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
|
||||
__execlists_update_reg_state(ce, engine);
|
||||
__execlists_update_reg_state(ce, engine, ce->ring->tail);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2939,7 +2943,7 @@ static void execlists_context_reset(struct intel_context *ce)
|
||||
/* Scrub away the garbage */
|
||||
execlists_init_reg_state(ce->lrc_reg_state,
|
||||
ce, ce->engine, ce->ring, true);
|
||||
__execlists_update_reg_state(ce, ce->engine);
|
||||
__execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
|
||||
|
||||
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
|
||||
}
|
||||
@ -3494,6 +3498,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
struct intel_context *ce;
|
||||
struct i915_request *rq;
|
||||
u32 head;
|
||||
|
||||
mb(); /* paranoia: read the CSB pointers from after the reset */
|
||||
clflush(execlists->csb_write);
|
||||
@ -3521,15 +3526,15 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
|
||||
if (i915_request_completed(rq)) {
|
||||
/* Idle context; tidy up the ring so we can restart afresh */
|
||||
ce->ring->head = intel_ring_wrap(ce->ring, rq->tail);
|
||||
head = intel_ring_wrap(ce->ring, rq->tail);
|
||||
goto out_replay;
|
||||
}
|
||||
|
||||
/* Context has requests still in-flight; it should not be idle! */
|
||||
GEM_BUG_ON(i915_active_is_idle(&ce->active));
|
||||
rq = active_request(ce->timeline, rq);
|
||||
ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
|
||||
GEM_BUG_ON(ce->ring->head == ce->ring->tail);
|
||||
head = intel_ring_wrap(ce->ring, rq->head);
|
||||
GEM_BUG_ON(head == ce->ring->tail);
|
||||
|
||||
/*
|
||||
* If this request hasn't started yet, e.g. it is waiting on a
|
||||
@ -3574,10 +3579,9 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
|
||||
|
||||
out_replay:
|
||||
ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
|
||||
ce->ring->head, ce->ring->tail);
|
||||
intel_ring_update_space(ce->ring);
|
||||
head, ce->ring->tail);
|
||||
__execlists_reset_reg_state(ce, engine);
|
||||
__execlists_update_reg_state(ce, engine);
|
||||
__execlists_update_reg_state(ce, engine, head);
|
||||
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
|
||||
|
||||
unwind:
|
||||
@ -5220,10 +5224,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
|
||||
restore_default_state(ce, engine);
|
||||
|
||||
/* Rerun the request; its payload has been neutered (if guilty). */
|
||||
ce->ring->head = head;
|
||||
intel_ring_update_space(ce->ring);
|
||||
|
||||
__execlists_update_reg_state(ce, engine);
|
||||
__execlists_update_reg_state(ce, engine, head);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -145,6 +145,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
|
||||
|
||||
kref_init(&ring->ref);
|
||||
ring->size = size;
|
||||
ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
|
||||
|
||||
/*
|
||||
* Workaround an erratum on the i830 which causes a hang if
|
||||
|
@ -56,6 +56,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
|
||||
return pos & (ring->size - 1);
|
||||
}
|
||||
|
||||
static inline int intel_ring_direction(const struct intel_ring *ring,
|
||||
u32 next, u32 prev)
|
||||
{
|
||||
typecheck(typeof(ring->size), next);
|
||||
typecheck(typeof(ring->size), prev);
|
||||
return (next - prev) << ring->wrap;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
intel_ring_offset_valid(const struct intel_ring *ring,
|
||||
unsigned int pos)
|
||||
|
@ -39,12 +39,13 @@ struct intel_ring {
|
||||
*/
|
||||
atomic_t pin_count;
|
||||
|
||||
u32 head;
|
||||
u32 tail;
|
||||
u32 emit;
|
||||
u32 head; /* updated during retire, loosely tracks RING_HEAD */
|
||||
u32 tail; /* updated on submission, used for RING_TAIL */
|
||||
u32 emit; /* updated during request construction */
|
||||
|
||||
u32 space;
|
||||
u32 size;
|
||||
u32 wrap;
|
||||
u32 effective_size;
|
||||
};
|
||||
|
||||
|
@ -186,7 +186,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
|
||||
}
|
||||
GEM_BUG_ON(!ce[1]->ring->size);
|
||||
intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
|
||||
__execlists_update_reg_state(ce[1], engine);
|
||||
__execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
|
||||
|
||||
rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
|
||||
if (IS_ERR(rq[0])) {
|
||||
|
@ -180,7 +180,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file)
|
||||
{
|
||||
void *vaddr = obj->phys_handle->vaddr + args->offset;
|
||||
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
|
||||
char __user *user_data = u64_to_user_ptr(args->data_ptr);
|
||||
|
||||
/*
|
||||
@ -844,10 +844,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
ret = i915_gem_gtt_pwrite_fast(obj, args);
|
||||
|
||||
if (ret == -EFAULT || ret == -ENOSPC) {
|
||||
if (obj->phys_handle)
|
||||
ret = i915_gem_phys_pwrite(obj, args, file);
|
||||
else
|
||||
if (i915_gem_object_has_struct_page(obj))
|
||||
ret = i915_gem_shmem_pwrite(obj, args);
|
||||
else
|
||||
ret = i915_gem_phys_pwrite(obj, args, file);
|
||||
}
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
@ -1852,7 +1852,8 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
|
||||
if (!xchg(&warned, true) &&
|
||||
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
|
||||
pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
|
||||
pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
|
||||
pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
|
||||
pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
|
||||
pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
|
||||
pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
|
||||
pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
|
||||
|
@ -595,6 +595,8 @@ static void __i915_request_ctor(void *arg)
|
||||
i915_sw_fence_init(&rq->submit, submit_notify);
|
||||
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
|
||||
|
||||
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
|
||||
|
||||
rq->file_priv = NULL;
|
||||
rq->capture_list = NULL;
|
||||
|
||||
@ -653,25 +655,30 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
|
||||
}
|
||||
}
|
||||
|
||||
ret = intel_timeline_get_seqno(tl, rq, &seqno);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
rq->i915 = ce->engine->i915;
|
||||
rq->context = ce;
|
||||
rq->engine = ce->engine;
|
||||
rq->ring = ce->ring;
|
||||
rq->execution_mask = ce->engine->mask;
|
||||
|
||||
kref_init(&rq->fence.refcount);
|
||||
rq->fence.flags = 0;
|
||||
rq->fence.error = 0;
|
||||
INIT_LIST_HEAD(&rq->fence.cb_list);
|
||||
|
||||
ret = intel_timeline_get_seqno(tl, rq, &seqno);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
rq->fence.context = tl->fence_context;
|
||||
rq->fence.seqno = seqno;
|
||||
|
||||
RCU_INIT_POINTER(rq->timeline, tl);
|
||||
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
|
||||
rq->hwsp_seqno = tl->hwsp_seqno;
|
||||
|
||||
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
|
||||
|
||||
dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
|
||||
tl->fence_context, seqno);
|
||||
|
||||
/* We bump the ref for the fence chain */
|
||||
i915_sw_fence_reinit(&i915_request_get(rq)->submit);
|
||||
i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
|
||||
|
@ -423,8 +423,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
|
||||
|
||||
if (!node_signaled(signal)) {
|
||||
INIT_LIST_HEAD(&dep->dfs_link);
|
||||
list_add(&dep->wait_link, &signal->waiters_list);
|
||||
list_add(&dep->signal_link, &node->signalers_list);
|
||||
dep->signaler = signal;
|
||||
dep->waiter = node;
|
||||
dep->flags = flags;
|
||||
@ -434,6 +432,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
|
||||
!node_started(signal))
|
||||
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
|
||||
|
||||
/* All set, now publish. Beware the lockless walkers. */
|
||||
list_add(&dep->signal_link, &node->signalers_list);
|
||||
list_add_rcu(&dep->wait_link, &signal->waiters_list);
|
||||
|
||||
/*
|
||||
* As we do not allow WAIT to preempt inflight requests,
|
||||
* once we have executed a request, along with triggering
|
||||
|
@ -8,9 +8,8 @@
|
||||
#include "i915_drv.h"
|
||||
#include "i915_utils.h"
|
||||
|
||||
#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
|
||||
#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
|
||||
"providing the dmesg log by booting with drm.debug=0xf"
|
||||
#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
|
||||
#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
|
||||
|
||||
void
|
||||
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
||||
|
Loading…
Reference in New Issue
Block a user