Commit 66daccde authored by Le Ma's avatar Le Ma Committed by Alex Deucher

drm/amdgpu: add master/slave check in init phase

Skip KCQ setup on slave xcc as there's no use case.
Signed-off-by: default avatarLe Ma <le.ma@amd.com>
Reviewed-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 224d3df9
...@@ -489,16 +489,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id) ...@@ -489,16 +489,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
return -EINVAL; return -EINVAL;
spin_lock(&kiq->ring_lock); spin_lock(&kiq->ring_lock);
if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size * if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
adev->gfx.num_compute_rings)) { if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
spin_unlock(&adev->gfx.kiq[0].ring_lock); adev->gfx.num_compute_rings)) {
return -ENOMEM; spin_unlock(&kiq->ring_lock);
} return -ENOMEM;
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) { for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings; j = i + xcc_id * adev->gfx.num_compute_rings;
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i], kiq->pmf->kiq_unmap_queues(kiq_ring,
RESET_QUEUES, 0, 0); &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
}
} }
if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang) if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
...@@ -549,22 +552,26 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) ...@@ -549,22 +552,26 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
kiq_ring->queue); kiq_ring->queue);
spin_lock(&kiq->ring_lock); spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * /* No need to map kcq on the slave */
adev->gfx.num_compute_rings + if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
kiq->pmf->set_resources_size); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
if (r) { adev->gfx.num_compute_rings +
DRM_ERROR("Failed to lock KIQ (%d).\n", r); kiq->pmf->set_resources_size);
spin_unlock(&adev->gfx.kiq[0].ring_lock); if (r) {
return r; DRM_ERROR("Failed to lock KIQ (%d).\n", r);
} spin_unlock(&adev->gfx.kiq[0].ring_lock);
return r;
}
if (adev->enable_mes) if (adev->enable_mes)
queue_mask = ~0ULL; queue_mask = ~0ULL;
kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
for (i = 0; i < adev->gfx.num_compute_rings; i++) { for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings; j = i + xcc_id * adev->gfx.num_compute_rings;
kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]); kiq->pmf->kiq_map_queues(kiq_ring,
&adev->gfx.compute_ring[i]);
}
} }
r = amdgpu_ring_test_helper(kiq_ring); r = amdgpu_ring_test_helper(kiq_ring);
...@@ -1078,3 +1085,9 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, ...@@ -1078,3 +1085,9 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE); adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
} }
} }
bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
{
return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
adev->gfx.num_xcc_per_xcp : 1));
}
...@@ -462,4 +462,6 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id) ...@@ -462,4 +462,6 @@ void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id)
int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev); int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev);
int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry); struct amdgpu_iv_entry *entry);
bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id);
#endif #endif
...@@ -406,6 +406,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) ...@@ -406,6 +406,11 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
else else
tmo = tmo_gfx; tmo = tmo_gfx;
/* skip ib test on the slave kcq */
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
!amdgpu_gfx_is_master_xcc(adev, ring->xcc_id))
continue;
r = amdgpu_ring_test_ib(ring, tmo); r = amdgpu_ring_test_ib(ring, tmo);
if (!r) { if (!r) {
DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n", DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
......
...@@ -1885,9 +1885,13 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev) ...@@ -1885,9 +1885,13 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
for (j = 0; j < adev->gfx.num_compute_rings; j++) { /* skip ring test on slave kcq */
ring = &adev->gfx.compute_ring[j + i * adev->gfx.num_compute_rings]; if (amdgpu_gfx_is_master_xcc(adev, i)) {
amdgpu_ring_test_helper(ring); for (j = 0; j < adev->gfx.num_compute_rings; j++) {
ring = &adev->gfx.compute_ring[j +
i * adev->gfx.num_compute_rings];
amdgpu_ring_test_helper(ring);
}
} }
gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i); gfx_v9_4_3_enable_gui_idle_interrupt(adev, true, i);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment