Commit a839e365 authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: Use pool of ordered wq for GuC submission

To appease lockdep, use a pool of ordered wq for GuC submission rather
tha leaving the ordered wq allocation to the drm sched. Without this change
eventually lockdep runs out of hash entries (MAX_LOCKDEP_CHAINS is
exceeded) as each user allocated exec queue adds more hash table entries
to lockdep. A pool old of 256 ordered wq should be enough to have
similar behavior with and without lockdep enabled.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 43efd3ba
...@@ -188,6 +188,58 @@ static bool exec_queue_killed_or_banned(struct xe_exec_queue *q) ...@@ -188,6 +188,58 @@ static bool exec_queue_killed_or_banned(struct xe_exec_queue *q)
return exec_queue_killed(q) || exec_queue_banned(q); return exec_queue_killed(q) || exec_queue_banned(q);
} }
#ifdef CONFIG_PROVE_LOCKING
static int alloc_submit_wq(struct xe_guc *guc)
{
int i;
for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
guc->submission_state.submit_wq_pool[i] =
alloc_ordered_workqueue("submit_wq", 0);
if (!guc->submission_state.submit_wq_pool[i])
goto err_free;
}
return 0;
err_free:
while (i)
destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
return -ENOMEM;
}
static void free_submit_wq(struct xe_guc *guc)
{
int i;
for (i = 0; i < NUM_SUBMIT_WQ; ++i)
destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
}
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
{
int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
return guc->submission_state.submit_wq_pool[idx];
}
#else
static int alloc_submit_wq(struct xe_guc *guc)
{
return 0;
}
static void free_submit_wq(struct xe_guc *guc)
{
}
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
{
return NULL;
}
#endif
static void guc_submit_fini(struct drm_device *drm, void *arg) static void guc_submit_fini(struct drm_device *drm, void *arg)
{ {
struct xe_guc *guc = arg; struct xe_guc *guc = arg;
...@@ -195,6 +247,7 @@ static void guc_submit_fini(struct drm_device *drm, void *arg) ...@@ -195,6 +247,7 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
xa_destroy(&guc->submission_state.exec_queue_lookup); xa_destroy(&guc->submission_state.exec_queue_lookup);
ida_destroy(&guc->submission_state.guc_ids); ida_destroy(&guc->submission_state.guc_ids);
bitmap_free(guc->submission_state.guc_ids_bitmap); bitmap_free(guc->submission_state.guc_ids_bitmap);
free_submit_wq(guc);
mutex_destroy(&guc->submission_state.lock); mutex_destroy(&guc->submission_state.lock);
} }
...@@ -230,6 +283,12 @@ int xe_guc_submit_init(struct xe_guc *guc) ...@@ -230,6 +283,12 @@ int xe_guc_submit_init(struct xe_guc *guc)
if (!guc->submission_state.guc_ids_bitmap) if (!guc->submission_state.guc_ids_bitmap)
return -ENOMEM; return -ENOMEM;
err = alloc_submit_wq(guc);
if (err) {
bitmap_free(guc->submission_state.guc_ids_bitmap);
return err;
}
gt->exec_queue_ops = &guc_exec_queue_ops; gt->exec_queue_ops = &guc_exec_queue_ops;
mutex_init(&guc->submission_state.lock); mutex_init(&guc->submission_state.lock);
...@@ -1166,10 +1225,11 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) ...@@ -1166,10 +1225,11 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
timeout = (q->vm && xe_vm_no_dma_fences(q->vm)) ? MAX_SCHEDULE_TIMEOUT : timeout = (q->vm && xe_vm_no_dma_fences(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
q->hwe->eclass->sched_props.job_timeout_ms; q->hwe->eclass->sched_props.job_timeout_ms;
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, NULL, err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, get_submit_wq(guc),
64, timeout, guc_to_gt(guc)->ordered_wq, NULL, q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
q->name, gt_to_xe(q->gt)->drm.dev); timeout, guc_to_gt(guc)->ordered_wq, NULL,
q->name, gt_to_xe(q->gt)->drm.dev);
if (err) if (err)
goto err_free; goto err_free;
......
...@@ -61,6 +61,13 @@ struct xe_guc { ...@@ -61,6 +61,13 @@ struct xe_guc {
/** @patch: patch version of GuC submission */ /** @patch: patch version of GuC submission */
u32 patch; u32 patch;
} version; } version;
#ifdef CONFIG_PROVE_LOCKING
#define NUM_SUBMIT_WQ 256
/** @submit_wq_pool: submission ordered workqueues pool */
struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
/** @submit_wq_idx: submission ordered workqueue index */
int submit_wq_idx;
#endif
/** @enabled: submission is enabled */ /** @enabled: submission is enabled */
bool enabled; bool enabled;
} submission_state; } submission_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment