Commit 281b4223 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: add reference for **fence

fix fence is released when pass to **fence sometimes.
add reference for it.
Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarChristian K?nig <christian.koenig@amd.com>
parent 1ffd2652
......@@ -136,6 +136,7 @@ static void amdgpu_job_work_func(struct work_struct *work)
sched_job->free_job(sched_job);
mutex_unlock(&sched_job->job_lock);
/* after processing job, free memory */
fence_put(&sched_job->s_fence->base);
kfree(sched_job);
}
struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
......
......@@ -133,13 +133,13 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
return r;
}
ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq;
*f = &sched_job->s_fence->base;
*f = fence_get(&sched_job->s_fence->base);
mutex_unlock(&sched_job->job_lock);
} else {
r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
if (r)
return r;
*f = &ibs[num_ibs - 1].fence->base;
*f = fence_get(&ibs[num_ibs - 1].fence->base);
}
return 0;
}
......@@ -877,7 +877,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
if (fence)
*fence = fence_get(f);
amdgpu_bo_unref(&bo);
fence_put(f);
if (amdgpu_enable_scheduler)
return 0;
......
......@@ -415,6 +415,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
goto err;
if (fence)
*fence = fence_get(f);
fence_put(f);
if (amdgpu_enable_scheduler)
return 0;
err:
......@@ -481,6 +482,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
goto err;
if (fence)
*fence = fence_get(f);
fence_put(f);
if (amdgpu_enable_scheduler)
return 0;
err:
......
......@@ -366,6 +366,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
&fence);
if (!r)
amdgpu_bo_fence(bo, fence, true);
fence_put(fence);
if (amdgpu_enable_scheduler) {
amdgpu_bo_unreserve(bo);
return 0;
......@@ -495,6 +496,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (r)
goto error_free;
amdgpu_bo_fence(pd, fence, true);
fence_put(fence);
}
if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
......@@ -812,6 +814,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
fence_put(*fence);
*fence = fence_get(f);
}
fence_put(f);
if (!amdgpu_enable_scheduler) {
amdgpu_ib_free(adev, ib);
kfree(ib);
......
......@@ -669,6 +669,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
}
err1:
fence_put(f);
amdgpu_ib_free(adev, &ib);
err0:
amdgpu_wb_free(adev, index);
......
......@@ -2698,6 +2698,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
}
err2:
fence_put(f);
amdgpu_ib_free(adev, &ib);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
......
......@@ -659,6 +659,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
r = -EINVAL;
}
err2:
fence_put(f);
amdgpu_ib_free(adev, &ib);
err1:
amdgpu_gfx_scratch_free(adev, scratch);
......
......@@ -733,6 +733,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
}
err1:
fence_put(f);
amdgpu_ib_free(adev, &ib);
err0:
amdgpu_wb_free(adev, index);
......
......@@ -853,6 +853,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
r = -EINVAL;
}
err1:
fence_put(f);
amdgpu_ib_free(adev, &ib);
err0:
amdgpu_wb_free(adev, index);
......
......@@ -313,6 +313,7 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
kfree(job);
return -EINVAL;
}
fence_get(&(*fence)->base);
job->s_fence = *fence;
while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
&c_entity->queue_lock) != sizeof(void *)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment