Commit f92d5c61 authored by Jack Xiao's avatar Jack Xiao Committed by Alex Deucher

drm/amdgpu: enable the static csa when mcbp enabled

CSA is the Context Save Area for preemption.
Acked-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarJack Xiao <Jack.Xiao@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b239c017
...@@ -872,7 +872,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -872,7 +872,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r) if (r)
return r; return r;
if (amdgpu_sriov_vf(adev)) { if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
struct dma_fence *f; struct dma_fence *f;
bo_va = fpriv->csa_va; bo_va = fpriv->csa_va;
...@@ -961,7 +961,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -961,7 +961,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB) if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
continue; continue;
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) { if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
(amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
ce_preempt++; ce_preempt++;
......
...@@ -1722,7 +1722,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) ...@@ -1722,7 +1722,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = true; adev->ip_blocks[i].status.hw = true;
/* right after GMC hw init, we create CSA */ /* right after GMC hw init, we create CSA */
if (amdgpu_sriov_vf(adev)) { if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_CSA_SIZE); AMDGPU_CSA_SIZE);
......
...@@ -217,9 +217,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ...@@ -217,9 +217,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* drop preamble IBs if we don't have a context switch */ /* drop preamble IBs if we don't have a context switch */
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
skip_preamble && skip_preamble &&
!(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) && !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ !amdgpu_mcbp &&
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue; continue;
amdgpu_ring_emit_ib(ring, job, ib, status); amdgpu_ring_emit_ib(ring, job, ib, status);
......
...@@ -709,7 +709,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file ...@@ -709,7 +709,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags = 0; dev_info.ids_flags = 0;
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
if (amdgpu_sriov_vf(adev)) if (amdgpu_mcbp || amdgpu_sriov_vf(adev))
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
...@@ -1003,7 +1003,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) ...@@ -1003,7 +1003,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto error_vm; goto error_vm;
} }
if (amdgpu_sriov_vf(adev)) { if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK; uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj, r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
...@@ -1066,7 +1066,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, ...@@ -1066,7 +1066,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_vm_bo_rmv(adev, fpriv->prt_va); amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
if (amdgpu_sriov_vf(adev)) { if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
/* TODO: how to handle reserve failure */ /* TODO: how to handle reserve failure */
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true)); BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
amdgpu_vm_bo_rmv(adev, fpriv->csa_va); amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment