accel/qaic: Remove bo->queued field

->queued field is used to track whether the BO is submitted to hardware for
DMA or not. Since same information can be retrieved using ->xfer_list field
of same structure remove ->queued as it is redundant.

Signed-off-by: Pranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Signed-off-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231208163457.1295993-3-quic_jhugo@quicinc.com
This commit is contained in:
Pranjal Ramajor Asha Kanojiya 2023-12-08 09:34:52 -07:00 committed by Jeffrey Hugo
parent 0808aef86d
commit 401c005c16
2 changed files with 11 additions and 14 deletions

View File

@ -191,8 +191,6 @@ struct qaic_bo {
u32 nr_slice;
/* Number of slice that have been transferred by DMA engine */
u32 nr_slice_xfer_done;
/* true = BO is queued for execution, true = BO is not queued */
bool queued;
/*
* If true then user has attached slicing information to this BO by
* calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.

View File

@ -141,6 +141,11 @@ struct dbc_rsp {
__le16 status;
} __packed;
static inline bool bo_queued(struct qaic_bo *bo)
{
return !list_empty(&bo->xfer_list);
}
inline int get_dbc_req_elem_size(void)
{
return sizeof(struct dbc_req);
@ -648,6 +653,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
}
complete_all(&bo->xfer_done);
INIT_LIST_HEAD(&bo->slices);
INIT_LIST_HEAD(&bo->xfer_list);
}
static struct qaic_bo *qaic_alloc_init_bo(void)
@ -1166,7 +1172,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
struct bo_slice *slice;
unsigned long flags;
struct qaic_bo *bo;
bool queued;
int i, j;
int ret;
@ -1198,9 +1203,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
}
spin_lock_irqsave(&dbc->xfer_lock, flags);
queued = bo->queued;
bo->queued = true;
if (queued) {
if (bo_queued(bo)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EINVAL;
goto unlock_bo;
@ -1223,7 +1226,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
else
ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
if (ret) {
bo->queued = false;
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
goto unlock_bo;
}
@ -1246,8 +1248,7 @@ failed_to_send_bo:
spin_lock_irqsave(&dbc->xfer_lock, flags);
bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
obj = &bo->base;
bo->queued = false;
list_del(&bo->xfer_list);
list_del_init(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
drm_gem_object_put(obj);
@ -1608,8 +1609,7 @@ read_fifo:
*/
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
bo->nr_slice_xfer_done = 0;
bo->queued = false;
list_del(&bo->xfer_list);
list_del_init(&bo->xfer_list);
bo->perf_stats.req_processed_ts = ktime_get_ns();
complete_all(&bo->xfer_done);
drm_gem_object_put(&bo->base);
@ -1868,7 +1868,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
/* Check if BO is committed to H/W for DMA */
spin_lock_irqsave(&dbc->xfer_lock, flags);
if (bo->queued) {
if (bo_queued(bo)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EBUSY;
goto unlock_ch_srcu;
@ -1898,8 +1898,7 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
spin_lock_irqsave(&dbc->xfer_lock, flags);
while (!list_empty(&dbc->xfer_list)) {
bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
bo->queued = false;
list_del(&bo->xfer_list);
list_del_init(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
bo->nr_slice_xfer_done = 0;
bo->req_id = 0;