Commit 49b02b18 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: reserve/unreserve objects out of map/unmap operations

Change-Id: Id6514f2fb6e002437fdbe99353d5d35f4ac736c7
Signed-off-by: default avatarChunming Zhou <David1.Zhou@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
parent ef9f0a83
...@@ -515,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -515,6 +515,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *rbo; struct amdgpu_bo *rbo;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
struct ttm_validate_buffer tv, tv_pd;
struct ww_acquire_ctx ticket;
struct list_head list, duplicates;
uint32_t invalid_flags, va_flags = 0; uint32_t invalid_flags, va_flags = 0;
int r = 0; int r = 0;
...@@ -552,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -552,7 +555,18 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
return -ENOENT; return -ENOENT;
mutex_lock(&fpriv->vm.mutex); mutex_lock(&fpriv->vm.mutex);
rbo = gem_to_amdgpu_bo(gobj); rbo = gem_to_amdgpu_bo(gobj);
r = amdgpu_bo_reserve(rbo, false); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &rbo->tbo;
tv.shared = true;
list_add(&tv.head, &list);
if (args->operation == AMDGPU_VA_OP_MAP) {
tv_pd.bo = &fpriv->vm.page_directory->tbo;
tv_pd.shared = true;
list_add(&tv_pd.head, &list);
}
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) { if (r) {
mutex_unlock(&fpriv->vm.mutex); mutex_unlock(&fpriv->vm.mutex);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
...@@ -561,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -561,7 +575,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
if (!bo_va) { if (!bo_va) {
amdgpu_bo_unreserve(rbo); ttm_eu_backoff_reservation(&ticket, &list);
drm_gem_object_unreference_unlocked(gobj);
mutex_unlock(&fpriv->vm.mutex); mutex_unlock(&fpriv->vm.mutex);
return -ENOENT; return -ENOENT;
} }
...@@ -584,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -584,7 +599,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
default: default:
break; break;
} }
ttm_eu_backoff_reservation(&ticket, &list);
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
amdgpu_gem_va_update_vm(adev, bo_va, args->operation); amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
mutex_unlock(&fpriv->vm.mutex); mutex_unlock(&fpriv->vm.mutex);
......
...@@ -985,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, ...@@ -985,7 +985,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
* Add a mapping of the BO at the specefied addr into the VM. * Add a mapping of the BO at the specefied addr into the VM.
* Returns 0 for success, error for failure. * Returns 0 for success, error for failure.
* *
* Object has to be reserved and gets unreserved by this function! * Object has to be reserved and unreserved outside!
*/ */
int amdgpu_vm_bo_map(struct amdgpu_device *adev, int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, struct amdgpu_bo_va *bo_va,
...@@ -1001,23 +1001,18 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1001,23 +1001,18 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* validate the parameters */ /* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK || if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
size == 0 || size & AMDGPU_GPU_PAGE_MASK) { size == 0 || size & AMDGPU_GPU_PAGE_MASK)
amdgpu_bo_unreserve(bo_va->bo);
return -EINVAL; return -EINVAL;
}
/* make sure object fit at this offset */ /* make sure object fit at this offset */
eaddr = saddr + size; eaddr = saddr + size;
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) { if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
amdgpu_bo_unreserve(bo_va->bo);
return -EINVAL; return -EINVAL;
}
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
if (last_pfn > adev->vm_manager.max_pfn) { if (last_pfn > adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, adev->vm_manager.max_pfn); last_pfn, adev->vm_manager.max_pfn);
amdgpu_bo_unreserve(bo_va->bo);
return -EINVAL; return -EINVAL;
} }
...@@ -1034,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1034,14 +1029,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with " dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
tmp->it.start, tmp->it.last + 1); tmp->it.start, tmp->it.last + 1);
amdgpu_bo_unreserve(bo_va->bo);
r = -EINVAL; r = -EINVAL;
goto error; goto error;
} }
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) { if (!mapping) {
amdgpu_bo_unreserve(bo_va->bo);
r = -ENOMEM; r = -ENOMEM;
goto error; goto error;
} }
...@@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1067,8 +1060,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (eaddr > vm->max_pde_used) if (eaddr > vm->max_pde_used)
vm->max_pde_used = eaddr; vm->max_pde_used = eaddr;
amdgpu_bo_unreserve(bo_va->bo);
/* walk over the address space and allocate the page tables */ /* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv; struct reservation_object *resv = vm->page_directory->tbo.resv;
...@@ -1077,18 +1068,15 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1077,18 +1068,15 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (vm->page_tables[pt_idx].bo) if (vm->page_tables[pt_idx].bo)
continue; continue;
ww_mutex_lock(&resv->lock, NULL);
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS, AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
NULL, resv, &pt); NULL, resv, &pt);
if (r) { if (r)
ww_mutex_unlock(&resv->lock);
goto error_free; goto error_free;
}
r = amdgpu_vm_clear_bo(adev, pt); r = amdgpu_vm_clear_bo(adev, pt);
ww_mutex_unlock(&resv->lock);
if (r) { if (r) {
amdgpu_bo_unref(&pt); amdgpu_bo_unref(&pt);
goto error_free; goto error_free;
...@@ -1122,7 +1110,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1122,7 +1110,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
* Remove a mapping of the BO at the specefied addr from the VM. * Remove a mapping of the BO at the specefied addr from the VM.
* Returns 0 for success, error for failure. * Returns 0 for success, error for failure.
* *
* Object has to be reserved and gets unreserved by this function! * Object has to be reserved and unreserved outside!
*/ */
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, struct amdgpu_bo_va *bo_va,
...@@ -1147,11 +1135,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -1147,11 +1135,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
break; break;
} }
if (&mapping->list == &bo_va->invalids) { if (&mapping->list == &bo_va->invalids)
amdgpu_bo_unreserve(bo_va->bo);
return -ENOENT; return -ENOENT;
} }
}
list_del(&mapping->list); list_del(&mapping->list);
spin_lock(&vm->it_lock); spin_lock(&vm->it_lock);
...@@ -1163,7 +1149,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, ...@@ -1163,7 +1149,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
list_add(&mapping->list, &vm->freed); list_add(&mapping->list, &vm->freed);
else else
kfree(mapping); kfree(mapping);
amdgpu_bo_unreserve(bo_va->bo);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment