Commit 5574cb21 authored by Ofir Bitton's avatar Ofir Bitton Committed by Oded Gabbay

habanalabs: Assign each CQ with its own work queue

We identified a possible race during job completion when working
with a single multi-threaded work queue. In order to overcome this
race we suggest using a single threaded work queue per completion
queue, hence we guarantee jobs completion in order.
Signed-off-by: default avatarOfir Bitton <obitton@habana.ai>
Reviewed-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent c83c4171
...@@ -487,10 +487,12 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) ...@@ -487,10 +487,12 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
void hl_cs_rollback_all(struct hl_device *hdev) void hl_cs_rollback_all(struct hl_device *hdev)
{ {
int i;
struct hl_cs *cs, *tmp; struct hl_cs *cs, *tmp;
/* flush all completions */ /* flush all completions */
flush_workqueue(hdev->cq_wq); for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
/* Make sure we don't have leftovers in the H/W queues mirror list */ /* Make sure we don't have leftovers in the H/W queues mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list,
......
...@@ -249,7 +249,8 @@ static void device_cdev_sysfs_del(struct hl_device *hdev) ...@@ -249,7 +249,8 @@ static void device_cdev_sysfs_del(struct hl_device *hdev)
*/ */
static int device_early_init(struct hl_device *hdev) static int device_early_init(struct hl_device *hdev)
{ {
int rc; int i, rc;
char workq_name[32];
switch (hdev->asic_type) { switch (hdev->asic_type) {
case ASIC_GOYA: case ASIC_GOYA:
...@@ -274,11 +275,24 @@ static int device_early_init(struct hl_device *hdev) ...@@ -274,11 +275,24 @@ static int device_early_init(struct hl_device *hdev)
if (rc) if (rc)
goto early_fini; goto early_fini;
hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0); if (hdev->asic_prop.completion_queues_count) {
hdev->cq_wq = kcalloc(hdev->asic_prop.completion_queues_count,
sizeof(*hdev->cq_wq),
GFP_ATOMIC);
if (!hdev->cq_wq) {
rc = -ENOMEM;
goto asid_fini;
}
}
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) {
snprintf(workq_name, 32, "hl-free-jobs-%u", i);
hdev->cq_wq[i] = create_singlethread_workqueue(workq_name);
if (hdev->cq_wq == NULL) { if (hdev->cq_wq == NULL) {
dev_err(hdev->dev, "Failed to allocate CQ workqueue\n"); dev_err(hdev->dev, "Failed to allocate CQ workqueue\n");
rc = -ENOMEM; rc = -ENOMEM;
goto asid_fini; goto free_cq_wq;
}
} }
hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0); hdev->eq_wq = alloc_workqueue("hl-events", WQ_UNBOUND, 0);
...@@ -321,7 +335,10 @@ static int device_early_init(struct hl_device *hdev) ...@@ -321,7 +335,10 @@ static int device_early_init(struct hl_device *hdev)
free_eq_wq: free_eq_wq:
destroy_workqueue(hdev->eq_wq); destroy_workqueue(hdev->eq_wq);
free_cq_wq: free_cq_wq:
destroy_workqueue(hdev->cq_wq); for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
if (hdev->cq_wq[i])
destroy_workqueue(hdev->cq_wq[i]);
kfree(hdev->cq_wq);
asid_fini: asid_fini:
hl_asid_fini(hdev); hl_asid_fini(hdev);
early_fini: early_fini:
...@@ -339,6 +356,8 @@ static int device_early_init(struct hl_device *hdev) ...@@ -339,6 +356,8 @@ static int device_early_init(struct hl_device *hdev)
*/ */
static void device_early_fini(struct hl_device *hdev) static void device_early_fini(struct hl_device *hdev)
{ {
int i;
mutex_destroy(&hdev->mmu_cache_lock); mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->debug_lock); mutex_destroy(&hdev->debug_lock);
mutex_destroy(&hdev->send_cpu_message_lock); mutex_destroy(&hdev->send_cpu_message_lock);
...@@ -351,7 +370,10 @@ static void device_early_fini(struct hl_device *hdev) ...@@ -351,7 +370,10 @@ static void device_early_fini(struct hl_device *hdev)
kfree(hdev->hl_chip_info); kfree(hdev->hl_chip_info);
destroy_workqueue(hdev->eq_wq); destroy_workqueue(hdev->eq_wq);
destroy_workqueue(hdev->cq_wq);
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
destroy_workqueue(hdev->cq_wq[i]);
kfree(hdev->cq_wq);
hl_asid_fini(hdev); hl_asid_fini(hdev);
...@@ -1181,6 +1203,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) ...@@ -1181,6 +1203,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
"failed to initialize completion queue\n"); "failed to initialize completion queue\n");
goto cq_fini; goto cq_fini;
} }
hdev->completion_queue[i].cq_idx = i;
} }
/* /*
......
...@@ -479,6 +479,7 @@ struct hl_hw_queue { ...@@ -479,6 +479,7 @@ struct hl_hw_queue {
* @hdev: pointer to the device structure * @hdev: pointer to the device structure
* @kernel_address: holds the queue's kernel virtual address * @kernel_address: holds the queue's kernel virtual address
* @bus_address: holds the queue's DMA address * @bus_address: holds the queue's DMA address
* @cq_idx: completion queue index in array
* @hw_queue_id: the id of the matching H/W queue * @hw_queue_id: the id of the matching H/W queue
* @ci: ci inside the queue * @ci: ci inside the queue
* @pi: pi inside the queue * @pi: pi inside the queue
...@@ -488,6 +489,7 @@ struct hl_cq { ...@@ -488,6 +489,7 @@ struct hl_cq {
struct hl_device *hdev; struct hl_device *hdev;
u64 kernel_address; u64 kernel_address;
dma_addr_t bus_address; dma_addr_t bus_address;
u32 cq_idx;
u32 hw_queue_id; u32 hw_queue_id;
u32 ci; u32 ci;
u32 pi; u32 pi;
...@@ -1396,7 +1398,8 @@ struct hl_device_idle_busy_ts { ...@@ -1396,7 +1398,8 @@ struct hl_device_idle_busy_ts {
* @asic_name: ASIC specific nmae. * @asic_name: ASIC specific nmae.
* @asic_type: ASIC specific type. * @asic_type: ASIC specific type.
* @completion_queue: array of hl_cq. * @completion_queue: array of hl_cq.
* @cq_wq: work queue of completion queues for executing work in process context * @cq_wq: work queues of completion queues for executing work in process
* context.
* @eq_wq: work queue of event queue for executing work in process context. * @eq_wq: work queue of event queue for executing work in process context.
* @kernel_ctx: Kernel driver context structure. * @kernel_ctx: Kernel driver context structure.
* @kernel_queues: array of hl_hw_queue. * @kernel_queues: array of hl_hw_queue.
...@@ -1492,7 +1495,7 @@ struct hl_device { ...@@ -1492,7 +1495,7 @@ struct hl_device {
char asic_name[16]; char asic_name[16];
enum hl_asic_type asic_type; enum hl_asic_type asic_type;
struct hl_cq *completion_queue; struct hl_cq *completion_queue;
struct workqueue_struct *cq_wq; struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq; struct workqueue_struct *eq_wq;
struct hl_ctx *kernel_ctx; struct hl_ctx *kernel_ctx;
struct hl_hw_queue *kernel_queues; struct hl_hw_queue *kernel_queues;
......
...@@ -119,7 +119,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg) ...@@ -119,7 +119,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
if ((shadow_index_valid) && (!hdev->disabled)) { if ((shadow_index_valid) && (!hdev->disabled)) {
job = queue->shadow_queue[hl_pi_2_offset(shadow_index)]; job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
queue_work(hdev->cq_wq, &job->finish_work); queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
} }
atomic_inc(&queue->ci); atomic_inc(&queue->ci);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment