Commit 2a85e816 authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu/sdma4: APUs do not have a page queue

Don't use the paging queue on APUs.
Tested-by: default avatarTom St Denis <tom.stdenis@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 161d0711
...@@ -51,6 +51,7 @@ struct amdgpu_sdma { ...@@ -51,6 +51,7 @@ struct amdgpu_sdma {
struct amdgpu_irq_src illegal_inst_irq; struct amdgpu_irq_src illegal_inst_irq;
int num_instances; int num_instances;
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
bool has_page_queue;
}; };
/* /*
......
...@@ -746,7 +746,8 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) ...@@ -746,7 +746,8 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
if (enable == false) { if (enable == false) {
sdma_v4_0_gfx_stop(adev); sdma_v4_0_gfx_stop(adev);
sdma_v4_0_rlc_stop(adev); sdma_v4_0_rlc_stop(adev);
sdma_v4_0_page_stop(adev); if (adev->sdma.has_page_queue)
sdma_v4_0_page_stop(adev);
} }
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
...@@ -1115,7 +1116,8 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) ...@@ -1115,7 +1116,8 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0); WREG32_SDMA(i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL, 0);
sdma_v4_0_gfx_resume(adev, i); sdma_v4_0_gfx_resume(adev, i);
sdma_v4_0_page_resume(adev, i); if (adev->sdma.has_page_queue)
sdma_v4_0_page_resume(adev, i);
/* set utc l1 enable flag always to 1 */ /* set utc l1 enable flag always to 1 */
temp = RREG32_SDMA(i, mmSDMA0_CNTL); temp = RREG32_SDMA(i, mmSDMA0_CNTL);
...@@ -1457,10 +1459,13 @@ static int sdma_v4_0_early_init(void *handle) ...@@ -1457,10 +1459,13 @@ static int sdma_v4_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->asic_type == CHIP_RAVEN) if (adev->asic_type == CHIP_RAVEN) {
adev->sdma.num_instances = 1; adev->sdma.num_instances = 1;
else adev->sdma.has_page_queue = false;
} else {
adev->sdma.num_instances = 2; adev->sdma.num_instances = 2;
adev->sdma.has_page_queue = true;
}
sdma_v4_0_set_ring_funcs(adev); sdma_v4_0_set_ring_funcs(adev);
sdma_v4_0_set_buffer_funcs(adev); sdma_v4_0_set_buffer_funcs(adev);
...@@ -1522,18 +1527,20 @@ static int sdma_v4_0_sw_init(void *handle) ...@@ -1522,18 +1527,20 @@ static int sdma_v4_0_sw_init(void *handle)
if (r) if (r)
return r; return r;
ring = &adev->sdma.instance[i].page; if (adev->sdma.has_page_queue) {
ring->ring_obj = NULL; ring = &adev->sdma.instance[i].page;
ring->use_doorbell = false; ring->ring_obj = NULL;
ring->use_doorbell = false;
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024, sprintf(ring->name, "page%d", i);
&adev->sdma.trap_irq, r = amdgpu_ring_init(adev, ring, 1024,
(i == 0) ? &adev->sdma.trap_irq,
AMDGPU_SDMA_IRQ_TRAP0 : (i == 0) ?
AMDGPU_SDMA_IRQ_TRAP1); AMDGPU_SDMA_IRQ_TRAP0 :
if (r) AMDGPU_SDMA_IRQ_TRAP1);
return r; if (r)
return r;
}
} }
return r; return r;
...@@ -1546,7 +1553,8 @@ static int sdma_v4_0_sw_fini(void *handle) ...@@ -1546,7 +1553,8 @@ static int sdma_v4_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring); amdgpu_ring_fini(&adev->sdma.instance[i].ring);
amdgpu_ring_fini(&adev->sdma.instance[i].page); if (adev->sdma.has_page_queue)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
} }
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
...@@ -1955,8 +1963,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) ...@@ -1955,8 +1963,10 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs; adev->sdma.instance[i].ring.funcs = &sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i; adev->sdma.instance[i].ring.me = i;
adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs; if (adev->sdma.has_page_queue) {
adev->sdma.instance[i].page.me = i; adev->sdma.instance[i].page.funcs = &sdma_v4_0_page_ring_funcs;
adev->sdma.instance[i].page.me = i;
}
} }
} }
...@@ -2056,7 +2066,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) ...@@ -2056,7 +2066,10 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs; adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
sched = &adev->sdma.instance[i].page.sched; if (adev->sdma.has_page_queue)
sched = &adev->sdma.instance[i].page.sched;
else
sched = &adev->sdma.instance[i].ring.sched;
adev->vm_manager.vm_pte_rqs[i] = adev->vm_manager.vm_pte_rqs[i] =
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment