Commit 69879b30 authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdkfd: fix svm_bo release invalid wait context warning

Add svm_range_bo_unref_async to schedule work to wait for svm_bo
eviction work done and then free svm_bo. __do_munmap put_page
is atomic context, call svm_range_bo_unref_async to avoid warning
invalid wait context. Other non atomic context call svm_range_bo_unref.
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 5c1e6fa4
...@@ -549,7 +549,7 @@ static void svm_migrate_page_free(struct page *page) ...@@ -549,7 +549,7 @@ static void svm_migrate_page_free(struct page *page)
if (svm_bo) { if (svm_bo) {
pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref)); pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
svm_range_bo_unref(svm_bo); svm_range_bo_unref_async(svm_bo);
} }
} }
......
...@@ -332,6 +332,8 @@ static void svm_range_bo_release(struct kref *kref) ...@@ -332,6 +332,8 @@ static void svm_range_bo_release(struct kref *kref)
struct svm_range_bo *svm_bo; struct svm_range_bo *svm_bo;
svm_bo = container_of(kref, struct svm_range_bo, kref); svm_bo = container_of(kref, struct svm_range_bo, kref);
pr_debug("svm_bo 0x%p\n", svm_bo);
spin_lock(&svm_bo->list_lock); spin_lock(&svm_bo->list_lock);
while (!list_empty(&svm_bo->range_list)) { while (!list_empty(&svm_bo->range_list)) {
struct svm_range *prange = struct svm_range *prange =
...@@ -365,12 +367,33 @@ static void svm_range_bo_release(struct kref *kref) ...@@ -365,12 +367,33 @@ static void svm_range_bo_release(struct kref *kref)
kfree(svm_bo); kfree(svm_bo);
} }
void svm_range_bo_unref(struct svm_range_bo *svm_bo) static void svm_range_bo_wq_release(struct work_struct *work)
{ {
if (!svm_bo) struct svm_range_bo *svm_bo;
return;
svm_bo = container_of(work, struct svm_range_bo, release_work);
svm_range_bo_release(&svm_bo->kref);
}
static void svm_range_bo_release_async(struct kref *kref)
{
struct svm_range_bo *svm_bo;
svm_bo = container_of(kref, struct svm_range_bo, kref);
pr_debug("svm_bo 0x%p\n", svm_bo);
INIT_WORK(&svm_bo->release_work, svm_range_bo_wq_release);
schedule_work(&svm_bo->release_work);
}
kref_put(&svm_bo->kref, svm_range_bo_release); void svm_range_bo_unref_async(struct svm_range_bo *svm_bo)
{
kref_put(&svm_bo->kref, svm_range_bo_release_async);
}
static void svm_range_bo_unref(struct svm_range_bo *svm_bo)
{
if (svm_bo)
kref_put(&svm_bo->kref, svm_range_bo_release);
} }
static bool static bool
......
...@@ -48,6 +48,7 @@ struct svm_range_bo { ...@@ -48,6 +48,7 @@ struct svm_range_bo {
struct work_struct eviction_work; struct work_struct eviction_work;
struct svm_range_list *svms; struct svm_range_list *svms;
uint32_t evicting; uint32_t evicting;
struct work_struct release_work;
}; };
enum svm_work_list_ops { enum svm_work_list_ops {
...@@ -195,7 +196,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s ...@@ -195,7 +196,7 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
*/ */
#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0) #define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0)
void svm_range_bo_unref(struct svm_range_bo *svm_bo); void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
#else #else
struct kfd_process; struct kfd_process;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment