Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next

This time mostly cleanups around the runtime power management handling
and slightly improved GPU hang handling. Also some additions to the
HWDB to get the driver working properly on more NXP i.MX8MP IP cores.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Lucas Stach <l.stach@pengutronix.de>
Link: https://patchwork.freedesktop.org/patch/msgid/f40c65f7ecfde2e61f1a6d7fd463f6f739bc0dd1.camel@pengutronix.de
This commit is contained in:
Dave Airlie 2023-08-18 06:41:51 +10:00
commit bd23a6ac53
11 changed files with 201 additions and 107 deletions

View File

@ -53,11 +53,12 @@ static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
OUT(buffer, VIV_FE_END_HEADER_OP_END);
}
static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer,
unsigned int waitcycles)
{
buffer->user_size = ALIGN(buffer->user_size, 8);
OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | waitcycles);
}
static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
@ -168,7 +169,7 @@ u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
/* initialize buffer */
buffer->user_size = 0;
CMD_WAIT(buffer);
CMD_WAIT(buffer, gpu->fe_waitcycles);
CMD_LINK(buffer, 2,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
@ -320,7 +321,7 @@ void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event)
CMD_END(buffer);
/* Append waitlink */
CMD_WAIT(buffer);
CMD_WAIT(buffer, gpu->fe_waitcycles);
CMD_LINK(buffer, 2,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);
@ -503,7 +504,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
VIVS_GL_EVENT_FROM_PE);
CMD_WAIT(buffer);
CMD_WAIT(buffer, gpu->fe_waitcycles);
CMD_LINK(buffer, 2,
etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping)
+ buffer->user_size - 4);

View File

@ -121,6 +121,9 @@ void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
int order = order_base_2(ALIGN(cmdbuf->size, SUBALLOC_GRANULE) /
SUBALLOC_GRANULE);
if (!suballoc)
return;
mutex_lock(&suballoc->lock);
bitmap_release_region(suballoc->granule_map,
cmdbuf->suballoc_offset / SUBALLOC_GRANULE,

View File

@ -6,7 +6,9 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <drm/drm_debugfs.h>

View File

@ -130,9 +130,9 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
return;
etnaviv_dump_core = false;
mutex_lock(&gpu->mmu_context->lock);
mutex_lock(&submit->mmu_context->lock);
mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
mmu_size = etnaviv_iommu_dump_size(submit->mmu_context);
/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
n_obj = 5;
@ -162,7 +162,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
__GFP_NORETRY);
if (!iter.start) {
mutex_unlock(&gpu->mmu_context->lock);
mutex_unlock(&submit->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
@ -174,18 +174,18 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
memset(iter.hdr, 0, iter.data - iter.start);
etnaviv_core_dump_registers(&iter, gpu);
etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer,
&gpu->mmu_context->cmdbuf_mapping));
&submit->mmu_context->cmdbuf_mapping));
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf,
&gpu->mmu_context->cmdbuf_mapping));
&submit->mmu_context->cmdbuf_mapping));
mutex_unlock(&gpu->mmu_context->lock);
mutex_unlock(&submit->mmu_context->lock);
/* Reserve space for the bomap */
if (n_bomap_pages) {

View File

@ -97,7 +97,6 @@ struct etnaviv_gem_submit {
struct list_head node; /* GPU active submit list */
struct etnaviv_cmdbuf cmdbuf;
struct pid *pid; /* submitting process */
bool runtime_resumed;
u32 exec_state;
u32 flags;
unsigned int nr_pmrs;

View File

@ -362,9 +362,6 @@ static void submit_cleanup(struct kref *kref)
container_of(kref, struct etnaviv_gem_submit, refcount);
unsigned i;
if (submit->runtime_resumed)
pm_runtime_put_autosuspend(submit->gpu->dev);
if (submit->cmdbuf.suballoc)
etnaviv_cmdbuf_free(&submit->cmdbuf);

View File

@ -493,6 +493,14 @@ static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, clock);
}
/*
* Choose number of wait cycles to target a ~30us (1/32768) max latency
* until new work is picked up by the FE when it polls in the idle loop.
* If the GPU base frequency is unknown use 200 wait cycles.
*/
gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale),
200UL, 0xffffUL);
}
static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
@ -576,7 +584,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
/* We rely on the GPU running, so program the clock */
etnaviv_gpu_update_clock(gpu);
gpu->fe_running = false;
gpu->state = ETNA_GPU_STATE_RESET;
gpu->exec_state = -1;
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
@ -651,8 +659,6 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
}
gpu->fe_running = true;
}
static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
@ -661,6 +667,8 @@ static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
u16 prefetch;
u32 address;
WARN_ON(gpu->state != ETNA_GPU_STATE_INITIALIZED);
/* setup the MMU */
etnaviv_iommu_restore(gpu, context);
@ -670,6 +678,8 @@ static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
&gpu->mmu_context->cmdbuf_mapping);
etnaviv_gpu_start_fe(gpu, address, prefetch);
gpu->state = ETNA_GPU_STATE_RUNNING;
}
static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
@ -705,6 +715,9 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
{
WARN_ON(!(gpu->state == ETNA_GPU_STATE_IDENTIFIED ||
gpu->state == ETNA_GPU_STATE_RESET));
if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
@ -751,6 +764,8 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
etnaviv_gpu_setup_pulse_eater(gpu);
gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
gpu->state = ETNA_GPU_STATE_INITIALIZED;
}
int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
@ -793,6 +808,8 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
(gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
gpu->sec_mode = ETNA_SEC_KERNEL;
gpu->state = ETNA_GPU_STATE_IDENTIFIED;
ret = etnaviv_hw_reset(gpu);
if (ret) {
dev_err(gpu->dev, "GPU reset failed\n");
@ -859,8 +876,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
gpu->initialized = true;
return 0;
fail:
@ -1059,50 +1074,6 @@ pm_put:
}
#endif
void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit)
{
struct etnaviv_gpu *gpu = submit->gpu;
char *comm = NULL, *cmd = NULL;
struct task_struct *task;
unsigned int i;
dev_err(gpu->dev, "recover hung GPU!\n");
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
put_task_struct(task);
}
if (comm && cmd)
dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd);
kfree(cmd);
kfree(comm);
if (pm_runtime_get_sync(gpu->dev) < 0)
goto pm_put;
mutex_lock(&gpu->lock);
etnaviv_hw_reset(gpu);
/* complete all events, the GPU won't do it after the reset */
spin_lock(&gpu->event_spinlock);
for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
complete(&gpu->event_free);
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
pm_put:
pm_runtime_put_autosuspend(gpu->dev);
}
/* fence object management */
struct etnaviv_fence {
struct etnaviv_gpu *gpu;
@ -1183,20 +1154,22 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
unsigned int *events)
{
unsigned long timeout = msecs_to_jiffies(10 * 10000);
unsigned i, acquired = 0;
unsigned i, acquired = 0, rpm_count = 0;
int ret;
for (i = 0; i < nr_events; i++) {
unsigned long ret;
unsigned long remaining;
ret = wait_for_completion_timeout(&gpu->event_free, timeout);
remaining = wait_for_completion_timeout(&gpu->event_free, timeout);
if (!ret) {
if (!remaining) {
dev_err(gpu->dev, "wait_for_completion_timeout failed");
ret = -EBUSY;
goto out;
}
acquired++;
timeout = ret;
timeout = remaining;
}
spin_lock(&gpu->event_spinlock);
@ -1211,13 +1184,23 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events,
spin_unlock(&gpu->event_spinlock);
for (i = 0; i < nr_events; i++) {
ret = pm_runtime_resume_and_get(gpu->dev);
if (ret)
goto out_rpm;
rpm_count++;
}
return 0;
out_rpm:
for (i = 0; i < rpm_count; i++)
pm_runtime_put_autosuspend(gpu->dev);
out:
for (i = 0; i < acquired; i++)
complete(&gpu->event_free);
return -EBUSY;
return ret;
}
static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
@ -1229,6 +1212,8 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
clear_bit(event, gpu->event_bitmap);
complete(&gpu->event_free);
}
pm_runtime_put_autosuspend(gpu->dev);
}
/*
@ -1371,15 +1356,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
unsigned int i, nr_events = 1, event[3];
int ret;
if (!submit->runtime_resumed) {
ret = pm_runtime_get_sync(gpu->dev);
if (ret < 0) {
pm_runtime_put_noidle(gpu->dev);
return NULL;
}
submit->runtime_resumed = true;
}
/*
* if there are performance monitor requests we need to have
* - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
@ -1407,7 +1383,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
goto out_unlock;
}
if (!gpu->fe_running)
if (gpu->state == ETNA_GPU_STATE_INITIALIZED)
etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
if (submit->prev_mmu_context)
@ -1454,6 +1430,49 @@ static void sync_point_worker(struct work_struct *work)
etnaviv_gpu_start_fe(gpu, addr + 2, 2);
}
void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit)
{
struct etnaviv_gpu *gpu = submit->gpu;
char *comm = NULL, *cmd = NULL;
struct task_struct *task;
unsigned int i;
dev_err(gpu->dev, "recover hung GPU!\n");
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
put_task_struct(task);
}
if (comm && cmd)
dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd);
kfree(cmd);
kfree(comm);
if (pm_runtime_get_sync(gpu->dev) < 0)
goto pm_put;
mutex_lock(&gpu->lock);
etnaviv_hw_reset(gpu);
/* complete all events, the GPU won't do it after the reset */
spin_lock(&gpu->event_spinlock);
for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
event_free(gpu, i);
spin_unlock(&gpu->event_spinlock);
etnaviv_gpu_hw_init(gpu);
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
pm_put:
pm_runtime_put_autosuspend(gpu->dev);
}
static void dump_mmu_fault(struct etnaviv_gpu *gpu)
{
static const char *fault_reasons[] = {
@ -1520,6 +1539,8 @@ static irqreturn_t irq_handler(int irq, void *data)
if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
dump_mmu_fault(gpu);
gpu->state = ETNA_GPU_STATE_FAULT;
drm_sched_fault(&gpu->sched);
intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
}
@ -1628,9 +1649,9 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms)
} while (1);
}
static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
static void etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
{
if (gpu->initialized && gpu->fe_running) {
if (gpu->state == ETNA_GPU_STATE_RUNNING) {
/* Replace the last WAIT with END */
mutex_lock(&gpu->lock);
etnaviv_buffer_end(gpu);
@ -1643,12 +1664,10 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
*/
etnaviv_gpu_wait_idle(gpu, 100);
gpu->fe_running = false;
gpu->state = ETNA_GPU_STATE_INITIALIZED;
}
gpu->exec_state = -1;
return etnaviv_gpu_clk_disable(gpu);
}
static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
@ -1733,13 +1752,11 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
if (ret)
goto out_workqueue;
if (IS_ENABLED(CONFIG_PM))
ret = pm_runtime_get_sync(gpu->dev);
else
if (!IS_ENABLED(CONFIG_PM)) {
ret = etnaviv_gpu_clk_enable(gpu);
if (ret < 0)
goto out_sched;
if (ret < 0)
goto out_sched;
}
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
@ -1751,9 +1768,6 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
priv->gpu[priv->num_gpus++] = gpu;
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
return 0;
out_sched:
@ -1785,16 +1799,14 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
pm_runtime_put_sync_suspend(gpu->dev);
} else {
etnaviv_gpu_hw_suspend(gpu);
etnaviv_gpu_clk_disable(gpu);
}
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu);
gpu->initialized = false;
}
etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_iommu_global_fini(gpu);
gpu->drm = NULL;
xa_destroy(&gpu->user_fences);
@ -1918,7 +1930,11 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev)
return -EBUSY;
}
return etnaviv_gpu_hw_suspend(gpu);
etnaviv_gpu_hw_suspend(gpu);
gpu->state = ETNA_GPU_STATE_IDENTIFIED;
return etnaviv_gpu_clk_disable(gpu);
}
static int etnaviv_gpu_rpm_resume(struct device *dev)
@ -1931,7 +1947,7 @@ static int etnaviv_gpu_rpm_resume(struct device *dev)
return ret;
/* Re-initialise the basic hardware state */
if (gpu->drm && gpu->initialized) {
if (gpu->state == ETNA_GPU_STATE_IDENTIFIED) {
ret = etnaviv_gpu_hw_resume(gpu);
if (ret) {
etnaviv_gpu_clk_disable(gpu);

View File

@ -95,6 +95,15 @@ struct clk;
#define ETNA_NR_EVENTS 30
enum etnaviv_gpu_state {
ETNA_GPU_STATE_UNKNOWN = 0,
ETNA_GPU_STATE_IDENTIFIED,
ETNA_GPU_STATE_RESET,
ETNA_GPU_STATE_INITIALIZED,
ETNA_GPU_STATE_RUNNING,
ETNA_GPU_STATE_FAULT,
};
struct etnaviv_gpu {
struct drm_device *drm;
struct thermal_cooling_device *cooling;
@ -105,8 +114,7 @@ struct etnaviv_gpu {
struct workqueue_struct *wq;
struct mutex sched_lock;
struct drm_gpu_scheduler sched;
bool initialized;
bool fe_running;
enum etnaviv_gpu_state state;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
@ -150,6 +158,7 @@ struct etnaviv_gpu {
struct clk *clk_shader;
unsigned int freq_scale;
unsigned int fe_waitcycles;
unsigned long base_rate_core;
unsigned long base_rate_shader;
};

View File

@ -38,6 +38,37 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.minor_features10 = 0x0,
.minor_features11 = 0x0,
},
{
.model = 0x520,
.revision = 0x5341,
.product_id = 0x5202,
.customer_id = 0x204,
.eco_id = 0,
.stream_count = 1,
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 512,
.pixel_pipes = 1,
.instruction_count = 256,
.num_constants = 168,
.buffer_size = 0,
.varyings_count = 8,
.features = 0xe02c7eca,
.minor_features0 = 0xe9399eff,
.minor_features1 = 0xfe1fb2db,
.minor_features2 = 0xcedf0080,
.minor_features3 = 0x10800005,
.minor_features4 = 0x20000000,
.minor_features5 = 0x00020880,
.minor_features6 = 0x00000000,
.minor_features7 = 0x00001000,
.minor_features8 = 0x00000000,
.minor_features9 = 0x00000000,
.minor_features10 = 0x00000000,
.minor_features11 = 0x00000000,
},
{
.model = 0x7000,
.revision = 0x6202,
@ -197,6 +228,38 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.minor_features10 = 0x108048c0,
.minor_features11 = 0x00000010,
},
{
.model = 0x8000,
.revision = 0x8002,
.product_id = 0x5080009,
.customer_id = 0x9f,
.eco_id = 0x6000000,
.stream_count = 8,
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 6,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
.instruction_count = 512,
.num_constants = 320,
.buffer_size = 0,
.varyings_count = 16,
.features = 0xe0287cac,
.minor_features0 = 0xc1799eff,
.minor_features1 = 0xfefbfadb,
.minor_features2 = 0xeb9d6fbf,
.minor_features3 = 0xedfffced,
.minor_features4 = 0xd30dafc7,
.minor_features5 = 0x7b5ac333,
.minor_features6 = 0xfc8ee200,
.minor_features7 = 0x03fffa6f,
.minor_features8 = 0x00fe0ef0,
.minor_features9 = 0x0088003c,
.minor_features10 = 0x108048c0,
.minor_features11 = 0x00000010,
},
};
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)

View File

@ -553,6 +553,9 @@ void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
struct etnaviv_drm_private *priv = gpu->drm->dev_private;
struct etnaviv_iommu_global *global = priv->mmu_global;
if (!global)
return;
if (--global->use > 0)
return;

View File

@ -55,8 +55,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*/
dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
change = dma_addr - gpu->hangcheck_dma_addr;
if (gpu->completed_fence != gpu->hangcheck_fence ||
change < 0 || change > 16) {
if (gpu->state == ETNA_GPU_STATE_RUNNING &&
(gpu->completed_fence != gpu->hangcheck_fence ||
change < 0 || change > 16)) {
gpu->hangcheck_dma_addr = dma_addr;
gpu->hangcheck_fence = gpu->completed_fence;
goto out_no_timeout;