Commit e83dfe4d authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: remove amdgpu_bo_list_entry.robj (v2)

We can get that just by casting tv.bo.

v2: squash in kfd fix (Alex)
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 03651735
...@@ -675,7 +675,6 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, ...@@ -675,7 +675,6 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
if (!ctx->vm_pd) if (!ctx->vm_pd)
return -ENOMEM; return -ENOMEM;
ctx->kfd_bo.robj = bo;
ctx->kfd_bo.priority = 0; ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo; ctx->kfd_bo.tv.bo = &bo->tbo;
ctx->kfd_bo.tv.shared = true; ctx->kfd_bo.tv.shared = true;
...@@ -740,7 +739,6 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, ...@@ -740,7 +739,6 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
return -ENOMEM; return -ENOMEM;
} }
ctx->kfd_bo.robj = bo;
ctx->kfd_bo.priority = 0; ctx->kfd_bo.priority = 0;
ctx->kfd_bo.tv.bo = &bo->tbo; ctx->kfd_bo.tv.bo = &bo->tbo;
ctx->kfd_bo.tv.shared = true; ctx->kfd_bo.tv.shared = true;
......
...@@ -49,8 +49,11 @@ static void amdgpu_bo_list_free(struct kref *ref) ...@@ -49,8 +49,11 @@ static void amdgpu_bo_list_free(struct kref *ref)
refcount); refcount);
struct amdgpu_bo_list_entry *e; struct amdgpu_bo_list_entry *e;
amdgpu_bo_list_for_each_entry(e, list) amdgpu_bo_list_for_each_entry(e, list) {
amdgpu_bo_unref(&e->robj); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
amdgpu_bo_unref(&bo);
}
call_rcu(&list->rhead, amdgpu_bo_list_free_rcu); call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
} }
...@@ -112,21 +115,20 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, ...@@ -112,21 +115,20 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
entry = &array[last_entry++]; entry = &array[last_entry++];
} }
entry->robj = bo;
entry->priority = min(info[i].bo_priority, entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY); AMDGPU_BO_LIST_MAX_PRIORITY);
entry->tv.bo = &entry->robj->tbo; entry->tv.bo = &bo->tbo;
entry->tv.shared = !entry->robj->prime_shared_count; entry->tv.shared = !bo->prime_shared_count;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
list->gds_obj = entry->robj; list->gds_obj = bo;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
list->gws_obj = entry->robj; list->gws_obj = bo;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
list->oa_obj = entry->robj; list->oa_obj = bo;
total_size += amdgpu_bo_size(entry->robj); total_size += amdgpu_bo_size(bo);
trace_amdgpu_bo_list_set(list, entry->robj); trace_amdgpu_bo_list_set(list, bo);
} }
list->first_userptr = first_userptr; list->first_userptr = first_userptr;
...@@ -138,8 +140,11 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp, ...@@ -138,8 +140,11 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
return 0; return 0;
error_free: error_free:
while (i--) while (i--) {
amdgpu_bo_unref(&array[i].robj); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
amdgpu_bo_unref(&bo);
}
kvfree(list); kvfree(list);
return r; return r;
...@@ -191,9 +196,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, ...@@ -191,9 +196,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* with the same priority, i.e. it must be stable. * with the same priority, i.e. it must be stable.
*/ */
amdgpu_bo_list_for_each_entry(e, list) { amdgpu_bo_list_for_each_entry(e, list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
unsigned priority = e->priority; unsigned priority = e->priority;
if (!e->robj->parent) if (!bo->parent)
list_add_tail(&e->tv.head, &bucket[priority]); list_add_tail(&e->tv.head, &bucket[priority]);
e->user_pages = NULL; e->user_pages = NULL;
......
...@@ -32,7 +32,6 @@ struct amdgpu_bo_va; ...@@ -32,7 +32,6 @@ struct amdgpu_bo_va;
struct amdgpu_fpriv; struct amdgpu_fpriv;
struct amdgpu_bo_list_entry { struct amdgpu_bo_list_entry {
struct amdgpu_bo *robj;
struct ttm_validate_buffer tv; struct ttm_validate_buffer tv;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
uint32_t priority; uint32_t priority;
......
...@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, ...@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
uint32_t *offset) uint32_t *offset)
{ {
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct amdgpu_bo *bo;
unsigned long size; unsigned long size;
int r; int r;
...@@ -46,21 +47,21 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, ...@@ -46,21 +47,21 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
if (gobj == NULL) if (gobj == NULL)
return -EINVAL; return -EINVAL;
p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0; p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; p->uf_entry.tv.bo = &bo->tbo;
p->uf_entry.tv.shared = true; p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL; p->uf_entry.user_pages = NULL;
drm_gem_object_put_unlocked(gobj); drm_gem_object_put_unlocked(gobj);
size = amdgpu_bo_size(p->uf_entry.robj); size = amdgpu_bo_size(bo);
if (size != PAGE_SIZE || (data->offset + 8) > size) { if (size != PAGE_SIZE || (data->offset + 8) > size) {
r = -EINVAL; r = -EINVAL;
goto error_unref; goto error_unref;
} }
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
r = -EINVAL; r = -EINVAL;
goto error_unref; goto error_unref;
} }
...@@ -70,7 +71,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, ...@@ -70,7 +71,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
return 0; return 0;
error_unref: error_unref:
amdgpu_bo_unref(&p->uf_entry.robj); amdgpu_bo_unref(&bo);
return r; return r;
} }
...@@ -229,7 +230,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs ...@@ -229,7 +230,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
goto free_all_kdata; goto free_all_kdata;
} }
if (p->uf_entry.robj) if (p->uf_entry.tv.bo)
p->job->uf_addr = uf_offset; p->job->uf_addr = uf_offset;
kfree(chunk_array); kfree(chunk_array);
...@@ -458,13 +459,13 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, ...@@ -458,13 +459,13 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
p->evictable = list_prev_entry(p->evictable, tv.head)) { p->evictable = list_prev_entry(p->evictable, tv.head)) {
struct amdgpu_bo_list_entry *candidate = p->evictable; struct amdgpu_bo_list_entry *candidate = p->evictable;
struct amdgpu_bo *bo = candidate->robj; struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
bool update_bytes_moved_vis; bool update_bytes_moved_vis;
uint32_t other; uint32_t other;
/* If we reached our current BO we can forget it */ /* If we reached our current BO we can forget it */
if (candidate->robj == validated) if (bo == validated)
break; break;
/* We can't move pinned BOs here */ /* We can't move pinned BOs here */
...@@ -529,7 +530,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, ...@@ -529,7 +530,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
int r; int r;
list_for_each_entry(lobj, validated, tv.head) { list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = lobj->robj; struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
bool binding_userptr = false; bool binding_userptr = false;
struct mm_struct *usermm; struct mm_struct *usermm;
...@@ -604,7 +605,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -604,7 +605,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&duplicates); INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
if (p->uf_entry.robj && !p->uf_entry.robj->parent) if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
list_add(&p->uf_entry.tv.head, &p->validated); list_add(&p->uf_entry.tv.head, &p->validated);
while (1) { while (1) {
...@@ -620,7 +621,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -620,7 +621,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&need_pages); INIT_LIST_HEAD(&need_pages);
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->robj; struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm, if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) { &e->user_invalidated) && e->user_pages) {
...@@ -639,7 +640,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -639,7 +640,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
list_del(&e->tv.head); list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages); list_add(&e->tv.head, &need_pages);
amdgpu_bo_unreserve(e->robj); amdgpu_bo_unreserve(bo);
} }
} }
...@@ -658,7 +659,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -658,7 +659,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
/* Fill the page arrays for all userptrs. */ /* Fill the page arrays for all userptrs. */
list_for_each_entry(e, &need_pages, tv.head) { list_for_each_entry(e, &need_pages, tv.head) {
struct ttm_tt *ttm = e->robj->tbo.ttm; struct ttm_tt *ttm = e->tv.bo->ttm;
e->user_pages = kvmalloc_array(ttm->num_pages, e->user_pages = kvmalloc_array(ttm->num_pages,
sizeof(struct page*), sizeof(struct page*),
...@@ -717,7 +718,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -717,7 +718,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
oa = p->bo_list->oa_obj; oa = p->bo_list->oa_obj;
amdgpu_bo_list_for_each_entry(e, p->bo_list) amdgpu_bo_list_for_each_entry(e, p->bo_list)
e->bo_va = amdgpu_vm_bo_find(vm, e->robj); e->bo_va = amdgpu_vm_bo_find(vm, ttm_to_amdgpu_bo(e->tv.bo));
if (gds) { if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds); p->job->gds_base = amdgpu_bo_gpu_offset(gds);
...@@ -732,8 +733,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -732,8 +733,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->job->oa_size = amdgpu_bo_size(oa); p->job->oa_size = amdgpu_bo_size(oa);
} }
if (!r && p->uf_entry.robj) { if (!r && p->uf_entry.tv.bo) {
struct amdgpu_bo *uf = p->uf_entry.robj; struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
r = amdgpu_ttm_alloc_gart(&uf->tbo); r = amdgpu_ttm_alloc_gart(&uf->tbo);
p->job->uf_addr += amdgpu_bo_gpu_offset(uf); p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
...@@ -749,8 +750,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -749,8 +750,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (!e->user_pages) if (!e->user_pages)
continue; continue;
release_pages(e->user_pages, release_pages(e->user_pages, e->tv.bo->ttm->num_pages);
e->robj->tbo.ttm->num_pages);
kvfree(e->user_pages); kvfree(e->user_pages);
} }
...@@ -763,9 +763,11 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) ...@@ -763,9 +763,11 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
int r; int r;
list_for_each_entry(e, &p->validated, tv.head) { list_for_each_entry(e, &p->validated, tv.head) {
struct reservation_object *resv = e->robj->tbo.resv; struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct reservation_object *resv = bo->tbo.resv;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
amdgpu_bo_explicit_sync(e->robj)); amdgpu_bo_explicit_sync(bo));
if (r) if (r)
return r; return r;
...@@ -808,7 +810,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, ...@@ -808,7 +810,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
kfree(parser->chunks); kfree(parser->chunks);
if (parser->job) if (parser->job)
amdgpu_job_free(parser->job); amdgpu_job_free(parser->job);
amdgpu_bo_unref(&parser->uf_entry.robj); if (parser->uf_entry.tv.bo) {
struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
amdgpu_bo_unref(&uf);
}
} }
static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
...@@ -919,7 +925,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -919,7 +925,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
struct dma_fence *f; struct dma_fence *f;
/* ignore duplicates */ /* ignore duplicates */
bo = e->robj; bo = ttm_to_amdgpu_bo(e->tv.bo);
if (!bo) if (!bo)
continue; continue;
...@@ -958,11 +964,13 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -958,11 +964,13 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (amdgpu_vm_debug) { if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */ /* Invalidate all BOs to test for userspace bugs */
amdgpu_bo_list_for_each_entry(e, p->bo_list) { amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
/* ignore duplicates */ /* ignore duplicates */
if (!e->robj) if (!bo)
continue; continue;
amdgpu_vm_bo_invalidate(adev, e->robj, false); amdgpu_vm_bo_invalidate(adev, bo, false);
} }
} }
...@@ -1211,7 +1219,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1211,7 +1219,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
/* No memory allocation is allowed while holding the mn lock */ /* No memory allocation is allowed while holding the mn lock */
amdgpu_mn_lock(p->mn); amdgpu_mn_lock(p->mn);
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = e->robj; struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) { if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
r = -ERESTARTSYS; r = -ERESTARTSYS;
......
...@@ -593,9 +593,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, ...@@ -593,9 +593,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated, struct list_head *validated,
struct amdgpu_bo_list_entry *entry) struct amdgpu_bo_list_entry *entry)
{ {
entry->robj = vm->root.base.bo;
entry->priority = 0; entry->priority = 0;
entry->tv.bo = &entry->robj->tbo; entry->tv.bo = &vm->root.base.bo->tbo;
entry->tv.shared = true; entry->tv.shared = true;
entry->user_pages = NULL; entry->user_pages = NULL;
list_add(&entry->tv.head, validated); list_add(&entry->tv.head, validated);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment