Commit a86ee96c authored by Matthew Brost's avatar Matthew Brost

drm/xe: Add xe_sched_msg_lock/unlock helper

Will help callers to own locking when adding messages to scheduler.
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarJonathan Cavitt <jonathan.cavitt@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240809191929.3138956-2-matthew.brost@intel.com
parent 549dd786
...@@ -15,11 +15,11 @@ static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched) ...@@ -15,11 +15,11 @@ static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
{ {
struct xe_sched_msg *msg; struct xe_sched_msg *msg;
spin_lock(&sched->base.job_list_lock); xe_sched_msg_lock(sched);
msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link); msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
if (msg) if (msg)
xe_sched_process_msg_queue(sched); xe_sched_process_msg_queue(sched);
spin_unlock(&sched->base.job_list_lock); xe_sched_msg_unlock(sched);
} }
static struct xe_sched_msg * static struct xe_sched_msg *
...@@ -27,12 +27,12 @@ xe_sched_get_msg(struct xe_gpu_scheduler *sched) ...@@ -27,12 +27,12 @@ xe_sched_get_msg(struct xe_gpu_scheduler *sched)
{ {
struct xe_sched_msg *msg; struct xe_sched_msg *msg;
spin_lock(&sched->base.job_list_lock); xe_sched_msg_lock(sched);
msg = list_first_entry_or_null(&sched->msgs, msg = list_first_entry_or_null(&sched->msgs,
struct xe_sched_msg, link); struct xe_sched_msg, link);
if (msg) if (msg)
list_del(&msg->link); list_del(&msg->link);
spin_unlock(&sched->base.job_list_lock); xe_sched_msg_unlock(sched);
return msg; return msg;
} }
...@@ -93,9 +93,9 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched) ...@@ -93,9 +93,9 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
void xe_sched_add_msg(struct xe_gpu_scheduler *sched, void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg) struct xe_sched_msg *msg)
{ {
spin_lock(&sched->base.job_list_lock); xe_sched_msg_lock(sched);
list_add_tail(&msg->link, &sched->msgs); list_add_tail(&msg->link, &sched->msgs);
spin_unlock(&sched->base.job_list_lock); xe_sched_msg_unlock(sched);
xe_sched_process_msg_queue(sched); xe_sched_process_msg_queue(sched);
} }
...@@ -25,6 +25,16 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched); ...@@ -25,6 +25,16 @@ void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
void xe_sched_add_msg(struct xe_gpu_scheduler *sched, void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg); struct xe_sched_msg *msg);
static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
{
spin_lock(&sched->base.job_list_lock);
}
static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
{
spin_unlock(&sched->base.job_list_lock);
}
static inline void xe_sched_stop(struct xe_gpu_scheduler *sched) static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
{ {
drm_sched_stop(&sched->base, NULL); drm_sched_stop(&sched->base, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment