mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 18:24:14 +08:00
drm/amdkfd: Remove sync_with_hw() from amdkfd
This patch completely removes the sync_with_hw() because it was broken and actually there is no point of using it. This function was used to: - Make sure that the submitted packet to the HIQ (which is a kernel queue) was read by the CP. However, it was discovered that the method this function used to do that (checking wptr == rptr) is not consistent with how the actual CP firmware works in all cases. - Make sure that the queue is empty before issuing the next packet. To achieve that, the function blocked amdkfd from continuing until the recently submitted packet was consumed. However, the acquire_packet_buffer() already checks if there is enough room for a new packet so calling sync_with_hw() is redundant. Signed-off-by: Oded Gabbay <oded.gabbay@amd.com> Acked-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
c51841fbbb
commit
939f4a20a7
@ -262,28 +262,6 @@ static void submit_packet(struct kernel_queue *kq)
|
||||
kq->pending_wptr);
|
||||
}
|
||||
|
||||
static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms)
|
||||
{
|
||||
unsigned long org_timeout_ms;
|
||||
|
||||
BUG_ON(!kq);
|
||||
|
||||
org_timeout_ms = timeout_ms;
|
||||
timeout_ms += jiffies * 1000 / HZ;
|
||||
while (*kq->wptr_kernel != *kq->rptr_kernel) {
|
||||
if (time_after(jiffies * 1000 / HZ, timeout_ms)) {
|
||||
pr_err("kfd: kernel_queue %s timeout expired %lu\n",
|
||||
__func__, org_timeout_ms);
|
||||
pr_err("kfd: wptr: %d rptr: %d\n",
|
||||
*kq->wptr_kernel, *kq->rptr_kernel);
|
||||
return -ETIME;
|
||||
}
|
||||
schedule();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rollback_packet(struct kernel_queue *kq)
|
||||
{
|
||||
BUG_ON(!kq);
|
||||
@ -305,7 +283,6 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
|
||||
kq->ops.uninitialize = uninitialize;
|
||||
kq->ops.acquire_packet_buffer = acquire_packet_buffer;
|
||||
kq->ops.submit_packet = submit_packet;
|
||||
kq->ops.sync_with_hw = sync_with_hw;
|
||||
kq->ops.rollback_packet = rollback_packet;
|
||||
|
||||
switch (dev->device_info->asic_family) {
|
||||
@ -349,7 +326,6 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
|
||||
for (i = 0; i < 5; i++)
|
||||
buffer[i] = kq->nop_packet;
|
||||
kq->ops.submit_packet(kq);
|
||||
kq->ops.sync_with_hw(kq, 1000);
|
||||
|
||||
pr_err("kfd: ending kernel queue test\n");
|
||||
}
|
||||
|
@ -61,8 +61,6 @@ struct kernel_queue_ops {
|
||||
unsigned int **buffer_ptr);
|
||||
|
||||
void (*submit_packet)(struct kernel_queue *kq);
|
||||
int (*sync_with_hw)(struct kernel_queue *kq,
|
||||
unsigned long timeout_ms);
|
||||
void (*rollback_packet)(struct kernel_queue *kq);
|
||||
};
|
||||
|
||||
|
@ -376,7 +376,6 @@ int pm_send_set_resources(struct packet_manager *pm,
|
||||
packet->queue_mask_hi = upper_32_bits(res->queue_mask);
|
||||
|
||||
pm->priv_queue->ops.submit_packet(pm->priv_queue);
|
||||
pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
|
||||
|
||||
mutex_unlock(&pm->lock);
|
||||
|
||||
@ -413,7 +412,6 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
|
||||
goto fail_create_runlist;
|
||||
|
||||
pm->priv_queue->ops.submit_packet(pm->priv_queue);
|
||||
pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
|
||||
|
||||
mutex_unlock(&pm->lock);
|
||||
|
||||
@ -460,7 +458,6 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
|
||||
packet->data_lo = lower_32_bits((uint64_t)fence_value);
|
||||
|
||||
pm->priv_queue->ops.submit_packet(pm->priv_queue);
|
||||
pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
|
||||
mutex_unlock(&pm->lock);
|
||||
|
||||
return 0;
|
||||
@ -538,7 +535,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
|
||||
};
|
||||
|
||||
pm->priv_queue->ops.submit_packet(pm->priv_queue);
|
||||
pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
|
||||
|
||||
mutex_unlock(&pm->lock);
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user