Commit 401c005c authored by Pranjal Ramajor Asha Kanojiya's avatar Pranjal Ramajor Asha Kanojiya Committed by Jeffrey Hugo

accel/qaic: Remove bo->queued field

->queued field is used to track whether the BO is submitted to hardware for
DMA or not. Since same information can be retrieved using ->xfer_list field
of same structure remove ->queued as it is redundant.
Signed-off-by: default avatarPranjal Ramajor Asha Kanojiya <quic_pkanojiy@quicinc.com>
Reviewed-by: default avatarJeffrey Hugo <quic_jhugo@quicinc.com>
Signed-off-by: default avatarJeffrey Hugo <quic_jhugo@quicinc.com>
Reviewed-by: default avatarJacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231208163457.1295993-3-quic_jhugo@quicinc.com
parent 0808aef8
...@@ -191,8 +191,6 @@ struct qaic_bo { ...@@ -191,8 +191,6 @@ struct qaic_bo {
u32 nr_slice; u32 nr_slice;
/* Number of slice that have been transferred by DMA engine */ /* Number of slice that have been transferred by DMA engine */
u32 nr_slice_xfer_done; u32 nr_slice_xfer_done;
/* true = BO is queued for execution, true = BO is not queued */
bool queued;
/* /*
* If true then user has attached slicing information to this BO by * If true then user has attached slicing information to this BO by
* calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl. * calling DRM_IOCTL_QAIC_ATTACH_SLICE_BO ioctl.
......
...@@ -141,6 +141,11 @@ struct dbc_rsp { ...@@ -141,6 +141,11 @@ struct dbc_rsp {
__le16 status; __le16 status;
} __packed; } __packed;
static inline bool bo_queued(struct qaic_bo *bo)
{
return !list_empty(&bo->xfer_list);
}
inline int get_dbc_req_elem_size(void) inline int get_dbc_req_elem_size(void)
{ {
return sizeof(struct dbc_req); return sizeof(struct dbc_req);
...@@ -648,6 +653,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit) ...@@ -648,6 +653,7 @@ static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
} }
complete_all(&bo->xfer_done); complete_all(&bo->xfer_done);
INIT_LIST_HEAD(&bo->slices); INIT_LIST_HEAD(&bo->slices);
INIT_LIST_HEAD(&bo->xfer_list);
} }
static struct qaic_bo *qaic_alloc_init_bo(void) static struct qaic_bo *qaic_alloc_init_bo(void)
...@@ -1166,7 +1172,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil ...@@ -1166,7 +1172,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
struct bo_slice *slice; struct bo_slice *slice;
unsigned long flags; unsigned long flags;
struct qaic_bo *bo; struct qaic_bo *bo;
bool queued;
int i, j; int i, j;
int ret; int ret;
...@@ -1198,9 +1203,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil ...@@ -1198,9 +1203,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
} }
spin_lock_irqsave(&dbc->xfer_lock, flags); spin_lock_irqsave(&dbc->xfer_lock, flags);
queued = bo->queued; if (bo_queued(bo)) {
bo->queued = true;
if (queued) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags); spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EINVAL; ret = -EINVAL;
goto unlock_bo; goto unlock_bo;
...@@ -1223,7 +1226,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil ...@@ -1223,7 +1226,6 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
else else
ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail);
if (ret) { if (ret) {
bo->queued = false;
spin_unlock_irqrestore(&dbc->xfer_lock, flags); spin_unlock_irqrestore(&dbc->xfer_lock, flags);
goto unlock_bo; goto unlock_bo;
} }
...@@ -1246,8 +1248,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil ...@@ -1246,8 +1248,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
spin_lock_irqsave(&dbc->xfer_lock, flags); spin_lock_irqsave(&dbc->xfer_lock, flags);
bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list); bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list);
obj = &bo->base; obj = &bo->base;
bo->queued = false; list_del_init(&bo->xfer_list);
list_del(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags); spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
drm_gem_object_put(obj); drm_gem_object_put(obj);
...@@ -1608,8 +1609,7 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data) ...@@ -1608,8 +1609,7 @@ irqreturn_t dbc_irq_threaded_fn(int irq, void *data)
*/ */
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir); dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
bo->nr_slice_xfer_done = 0; bo->nr_slice_xfer_done = 0;
bo->queued = false; list_del_init(&bo->xfer_list);
list_del(&bo->xfer_list);
bo->perf_stats.req_processed_ts = ktime_get_ns(); bo->perf_stats.req_processed_ts = ktime_get_ns();
complete_all(&bo->xfer_done); complete_all(&bo->xfer_done);
drm_gem_object_put(&bo->base); drm_gem_object_put(&bo->base);
...@@ -1868,7 +1868,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi ...@@ -1868,7 +1868,7 @@ int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
/* Check if BO is committed to H/W for DMA */ /* Check if BO is committed to H/W for DMA */
spin_lock_irqsave(&dbc->xfer_lock, flags); spin_lock_irqsave(&dbc->xfer_lock, flags);
if (bo->queued) { if (bo_queued(bo)) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags); spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EBUSY; ret = -EBUSY;
goto unlock_ch_srcu; goto unlock_ch_srcu;
...@@ -1898,8 +1898,7 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db ...@@ -1898,8 +1898,7 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
spin_lock_irqsave(&dbc->xfer_lock, flags); spin_lock_irqsave(&dbc->xfer_lock, flags);
while (!list_empty(&dbc->xfer_list)) { while (!list_empty(&dbc->xfer_list)) {
bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list); bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list);
bo->queued = false; list_del_init(&bo->xfer_list);
list_del(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags); spin_unlock_irqrestore(&dbc->xfer_lock, flags);
bo->nr_slice_xfer_done = 0; bo->nr_slice_xfer_done = 0;
bo->req_id = 0; bo->req_id = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment