Commit 6d2f2944 authored by Christian König's avatar Christian König

drm/radeon: use normal BOs for the page tables v4

No need to make it more complicated than necessary,
just allocate the page tables as normal BO and
flush whenever the address change.

v2: update comments and function name
v3: squash bug fixes, page directory and tables patch
v4: rebased on Mareks changes
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
parent fa688343
......@@ -857,17 +857,22 @@ struct radeon_mec {
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
struct radeon_vm_pt {
struct radeon_bo *bo;
uint64_t addr;
};
struct radeon_vm {
struct list_head list;
struct list_head va;
unsigned id;
/* contains the page directory */
struct radeon_sa_bo *page_directory;
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
unsigned max_pde_used;
/* array of page tables, one for each page directory entry */
struct radeon_sa_bo **page_tables;
struct radeon_vm_pt *page_tables;
struct mutex mutex;
/* last fence for cs using this vm */
......@@ -880,9 +885,7 @@ struct radeon_vm {
struct radeon_vm_manager {
struct mutex lock;
struct list_head lru_vm;
struct radeon_fence *active[RADEON_NUM_VM];
struct radeon_sa_manager sa_manager;
uint32_t max_pfn;
/* number of VMIDs */
unsigned nvm;
......@@ -1011,6 +1014,7 @@ struct radeon_cs_parser {
unsigned nrelocs;
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct radeon_bo_list *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
......@@ -2798,10 +2802,11 @@ extern void radeon_program_register_sequence(struct radeon_device *rdev,
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
void radeon_vm_flush(struct radeon_device *rdev,
......@@ -2811,6 +2816,8 @@ void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
......
......@@ -168,6 +168,10 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
radeon_cs_buckets_get_list(&buckets, &p->validated);
if (p->cs_flags & RADEON_CS_USE_VM)
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
&p->validated);
return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
}
......@@ -401,6 +405,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
kfree(parser->track);
kfree(parser->relocs);
kfree(parser->relocs_ptr);
kfree(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
......@@ -440,24 +445,32 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
return r;
}
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
struct radeon_device *rdev = parser->rdev;
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
int r;
struct radeon_device *rdev = p->rdev;
int i, r;
r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
if (r) {
r = radeon_vm_update_page_directory(rdev, vm);
if (r)
return r;
}
list_for_each_entry(lobj, &parser->validated, tv.head) {
bo = lobj->bo;
r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
if (r) {
r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
&rdev->ring_tmp_bo.bo->tbo.mem);
if (r)
return r;
for (i = 0; i < p->nrelocs; i++) {
struct radeon_bo *bo;
/* ignore duplicates */
if (p->relocs_ptr[i] != &p->relocs[i])
continue;
bo = p->relocs[i].robj;
r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
if (r)
return r;
}
}
return 0;
}
......@@ -491,10 +504,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
r = radeon_vm_alloc_pt(rdev, vm);
if (r) {
goto out;
}
r = radeon_bo_vm_update_pte(parser, vm);
if (r) {
goto out;
......@@ -512,7 +521,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
}
out:
radeon_vm_add_to_lru(rdev, vm);
mutex_unlock(&vm->mutex);
mutex_unlock(&rdev->vm_manager.lock);
return r;
......
......@@ -1198,7 +1198,6 @@ int radeon_device_init(struct radeon_device *rdev,
* Max GPUVM size for cayman and SI is 40 bits.
*/
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
/* Set asic functions */
r = radeon_asic_init(rdev);
......
......@@ -559,7 +559,9 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
radeon_vm_init(rdev, &fpriv->vm);
r = radeon_vm_init(rdev, &fpriv->vm);
if (r)
return r;
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment