Commit 1e256e27 authored by Rex Zhu's avatar Rex Zhu Committed by Alex Deucher

drm/amdgpu: Refine CSA related functions

There is no functional changes,
Use function arguments for SRIOV special variables which
is hardcode in those functions.

so we can share those functions in baremetal.
Reviewed-by: default avatarMonk Liu <Monk.Liu@amd.com>
Signed-off-by: default avatarRex Zhu <Rex.Zhu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 20bedfe0
...@@ -1656,7 +1656,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) ...@@ -1656,7 +1656,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* right after GMC hw init, we create CSA */ /* right after GMC hw init, we create CSA */
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
r = amdgpu_allocate_static_csa(adev); r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_CSA_SIZE);
if (r) { if (r) {
DRM_ERROR("allocate CSA failed %d\n", r); DRM_ERROR("allocate CSA failed %d\n", r);
return r; return r;
...@@ -1890,7 +1892,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) ...@@ -1890,7 +1892,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_ucode_free_bo(adev); amdgpu_ucode_free_bo(adev);
amdgpu_free_static_csa(adev); amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev); amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev); amdgpu_device_vram_scratch_fini(adev);
} }
......
...@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -978,7 +978,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
} }
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
&fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
if (r) if (r)
goto error_vm; goto error_vm;
} }
......
...@@ -41,25 +41,25 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) ...@@ -41,25 +41,25 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
return RREG32_NO_KIQ(0xc040) == 0xffffffff; return RREG32_NO_KIQ(0xc040) == 0xffffffff;
} }
int amdgpu_allocate_static_csa(struct amdgpu_device *adev) int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
u32 domain, uint32_t size)
{ {
int r; int r;
void *ptr; void *ptr;
r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj, domain, bo,
NULL, &ptr); NULL, &ptr);
if (r) if (!bo)
return r; return -ENOMEM;
memset(ptr, 0, AMDGPU_CSA_SIZE); memset(ptr, 0, size);
return 0; return 0;
} }
void amdgpu_free_static_csa(struct amdgpu_device *adev) { void amdgpu_free_static_csa(struct amdgpu_bo **bo)
amdgpu_bo_free_kernel(&adev->virt.csa_obj, {
NULL, amdgpu_bo_free_kernel(bo, NULL, NULL);
NULL);
} }
/* /*
...@@ -69,9 +69,9 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) { ...@@ -69,9 +69,9 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
* package to support SRIOV gfx preemption. * package to support SRIOV gfx preemption.
*/ */
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va) struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
uint64_t csa_addr, uint32_t size)
{ {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head list; struct list_head list;
struct amdgpu_bo_list_entry pd; struct amdgpu_bo_list_entry pd;
...@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&list); INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&csa_tv.head); INIT_LIST_HEAD(&csa_tv.head);
csa_tv.bo = &adev->virt.csa_obj->tbo; csa_tv.bo = &bo->tbo;
csa_tv.shared = true; csa_tv.shared = true;
list_add(&csa_tv.head, &list); list_add(&csa_tv.head, &list);
...@@ -92,7 +92,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -92,7 +92,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r; return r;
} }
*bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj); *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
if (!*bo_va) { if (!*bo_va) {
ttm_eu_backoff_reservation(&ticket, &list); ttm_eu_backoff_reservation(&ticket, &list);
DRM_ERROR("failed to create bo_va for static CSA\n"); DRM_ERROR("failed to create bo_va for static CSA\n");
...@@ -100,7 +100,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -100,7 +100,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
} }
r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr, r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
AMDGPU_CSA_SIZE); size);
if (r) { if (r) {
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
amdgpu_vm_bo_rmv(adev, *bo_va); amdgpu_vm_bo_rmv(adev, *bo_va);
...@@ -108,7 +108,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -108,7 +108,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r; return r;
} }
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE, r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE); AMDGPU_PTE_EXECUTABLE);
......
...@@ -280,10 +280,12 @@ struct amdgpu_vm; ...@@ -280,10 +280,12 @@ struct amdgpu_vm;
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev); uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
int amdgpu_allocate_static_csa(struct amdgpu_device *adev); int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
u32 domain, uint32_t size);
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va); struct amdgpu_bo *bo,
void amdgpu_free_static_csa(struct amdgpu_device *adev); struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size);
void amdgpu_free_static_csa(struct amdgpu_bo **bo);
void amdgpu_virt_init_setting(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment