Commit 2e8f9fbe authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: fix and cleanup shadow handling

Set the shadow flag on the shadow and not the parent, always bind shadow BOs
during allocation instead of manually, use the reservation_object wrappers
to grab the lock.

This fixes a couple of issues with binding the shadow BOs as well as correctly
evicting them when memory becomes tight.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent db63042b
...@@ -2622,12 +2622,6 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, ...@@ -2622,12 +2622,6 @@ static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
goto err; goto err;
} }
r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
if (r) {
DRM_ERROR("%p bind failed\n", bo->shadow);
goto err;
}
r = amdgpu_bo_restore_from_shadow(adev, ring, bo, r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
NULL, fence, true); NULL, fence, true);
if (r) { if (r) {
......
...@@ -91,7 +91,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, ...@@ -91,7 +91,10 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_GTT) { if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; if (flags & AMDGPU_GEM_CREATE_SHADOW)
places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT;
else
places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_TT; places[c].flags = TTM_PL_FLAG_TT;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC | places[c].flags |= TTM_PL_FLAG_WC |
...@@ -446,17 +449,16 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, ...@@ -446,17 +449,16 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
if (bo->shadow) if (bo->shadow)
return 0; return 0;
bo->flags |= AMDGPU_GEM_CREATE_SHADOW; memset(&placements, 0, sizeof(placements));
memset(&placements, 0, amdgpu_ttm_placement_init(adev, &placement, placements,
(AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
amdgpu_ttm_placement_init(adev, &placement, AMDGPU_GEM_CREATE_SHADOW);
placements, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC);
r = amdgpu_bo_create_restricted(adev, size, byte_align, true, r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC, AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
NULL, &placement, NULL, &placement,
bo->tbo.resv, bo->tbo.resv,
0, 0,
...@@ -484,30 +486,28 @@ int amdgpu_bo_create(struct amdgpu_device *adev, ...@@ -484,30 +486,28 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
{ {
struct ttm_placement placement = {0}; struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r; int r;
memset(&placements, 0, memset(&placements, 0, sizeof(placements));
(AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); amdgpu_ttm_placement_init(adev, &placement, placements,
domain, parent_flags);
amdgpu_ttm_placement_init(adev, &placement, r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
placements, domain, flags); parent_flags, sg, &placement, resv,
init_value, bo_ptr);
r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
domain, flags, sg, &placement,
resv, init_value, bo_ptr);
if (r) if (r)
return r; return r;
if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
if (!resv) { if (!resv)
r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL); WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
WARN_ON(r != 0); NULL));
}
r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
if (!resv) if (!resv)
ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock); reservation_object_unlock((*bo_ptr)->tbo.resv);
if (r) if (r)
amdgpu_bo_unref(bo_ptr); amdgpu_bo_unref(bo_ptr);
......
...@@ -165,14 +165,6 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent, ...@@ -165,14 +165,6 @@ static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
unsigned i; unsigned i;
int r; int r;
if (parent->bo->shadow) {
struct amdgpu_bo *shadow = parent->bo->shadow;
r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
if (r)
return r;
}
if (use_cpu_for_update) { if (use_cpu_for_update) {
r = amdgpu_bo_kmap(parent->bo, NULL); r = amdgpu_bo_kmap(parent->bo, NULL);
if (r) if (r)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment