Commit 0f83227b authored by Sunil Khatri's avatar Sunil Khatri Committed by Alex Deucher

drm/amdgpu: Add cp queues support fro gfx10 in ipdump

Add support to dump registers of all instances of
cp queue registers of gfx10 to devcoredump.
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarSunil Khatri <sunil.khatri@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 74feef56
...@@ -436,6 +436,7 @@ struct amdgpu_gfx { ...@@ -436,6 +436,7 @@ struct amdgpu_gfx {
/* IP reg dump */ /* IP reg dump */
uint32_t *ip_dump_core; uint32_t *ip_dump_core;
uint32_t *ip_dump_cp_queues;
}; };
struct amdgpu_gfx_ras_reg_entry { struct amdgpu_gfx_ras_reg_entry {
......
...@@ -381,6 +381,49 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = { ...@@ -381,6 +381,49 @@ static const struct amdgpu_hwip_reg_entry gc_reg_list_10_1[] = {
SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3) SOC15_REG_ENTRY_STR(GC, 0, mmGRBM_STATUS_SE3)
}; };
static const struct amdgpu_hwip_reg_entry gc_cp_reg_list_10[] = {
/* compute registers */
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_VMID),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PERSISTENT_STATE),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PIPE_PRIORITY),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUEUE_PRIORITY),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_QUANTUM),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_BASE_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_RPTR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_RPTR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_IB_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_REQUEST),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_RPTR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_EVENTS),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_LO),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_BASE_ADDR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_CONTROL),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CNTL_STACK_SIZE),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_WG_STATE_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_CTX_SAVE_SIZE),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_GDS_RESOURCE_STATE),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_ERROR),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_EOP_WPTR_MEM),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_LO),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_PQ_WPTR_HI),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_CNTL_STACK_DW_CNT),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_SUSPEND_WG_STATE_OFFSET),
SOC15_REG_ENTRY_STR(GC, 0, mmCP_HQD_DEQUEUE_STATUS)
};
static const struct soc15_reg_golden golden_settings_gc_10_1[] = { static const struct soc15_reg_golden golden_settings_gc_10_1[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
...@@ -4595,10 +4638,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, ...@@ -4595,10 +4638,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
hw_prio, NULL); hw_prio, NULL);
} }
static void gfx_v10_0_alloc_dump_mem(struct amdgpu_device *adev) static void gfx_v10_0_alloc_ip_dump(struct amdgpu_device *adev)
{ {
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
uint32_t *ptr; uint32_t *ptr;
uint32_t inst;
ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL); ptr = kcalloc(reg_count, sizeof(uint32_t), GFP_KERNEL);
if (ptr == NULL) { if (ptr == NULL) {
...@@ -4607,6 +4651,19 @@ static void gfx_v10_0_alloc_dump_mem(struct amdgpu_device *adev) ...@@ -4607,6 +4651,19 @@ static void gfx_v10_0_alloc_dump_mem(struct amdgpu_device *adev)
} else { } else {
adev->gfx.ip_dump_core = ptr; adev->gfx.ip_dump_core = ptr;
} }
/* Allocate memory for compute queue registers for all the instances */
reg_count = ARRAY_SIZE(gc_cp_reg_list_10);
inst = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
ptr = kcalloc(reg_count * inst, sizeof(uint32_t), GFP_KERNEL);
if (ptr == NULL) {
DRM_ERROR("Failed to allocate memory for GFX CP IP Dump\n");
adev->gfx.ip_dump_cp_queues = NULL;
} else {
adev->gfx.ip_dump_cp_queues = ptr;
}
} }
static int gfx_v10_0_sw_init(void *handle) static int gfx_v10_0_sw_init(void *handle)
...@@ -4761,7 +4818,7 @@ static int gfx_v10_0_sw_init(void *handle) ...@@ -4761,7 +4818,7 @@ static int gfx_v10_0_sw_init(void *handle)
gfx_v10_0_gpu_early_init(adev); gfx_v10_0_gpu_early_init(adev);
gfx_v10_0_alloc_dump_mem(adev); gfx_v10_0_alloc_ip_dump(adev);
return 0; return 0;
} }
...@@ -4816,6 +4873,7 @@ static int gfx_v10_0_sw_fini(void *handle) ...@@ -4816,6 +4873,7 @@ static int gfx_v10_0_sw_fini(void *handle)
gfx_v10_0_free_microcode(adev); gfx_v10_0_free_microcode(adev);
kfree(adev->gfx.ip_dump_core); kfree(adev->gfx.ip_dump_core);
kfree(adev->gfx.ip_dump_cp_queues);
return 0; return 0;
} }
...@@ -9289,7 +9347,7 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring) ...@@ -9289,7 +9347,7 @@ static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
static void gfx_v10_ip_print(void *handle, struct drm_printer *p) static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
uint32_t i; uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
if (!adev->gfx.ip_dump_core) if (!adev->gfx.ip_dump_core)
...@@ -9299,12 +9357,36 @@ static void gfx_v10_ip_print(void *handle, struct drm_printer *p) ...@@ -9299,12 +9357,36 @@ static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
drm_printf(p, "%-50s \t 0x%08x\n", drm_printf(p, "%-50s \t 0x%08x\n",
gc_reg_list_10_1[i].reg_name, gc_reg_list_10_1[i].reg_name,
adev->gfx.ip_dump_core[i]); adev->gfx.ip_dump_core[i]);
/* print compute queue registers for all instances */
if (!adev->gfx.ip_dump_cp_queues)
return;
reg_count = ARRAY_SIZE(gc_cp_reg_list_10);
drm_printf(p, "\nnum_mec: %d num_pipe: %d num_queue: %d\n",
adev->gfx.mec.num_mec,
adev->gfx.mec.num_pipe_per_mec,
adev->gfx.mec.num_queue_per_pipe);
for (i = 0; i < adev->gfx.mec.num_mec; i++) {
for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
drm_printf(p, "\nmec %d, pipe %d, queue %d\n", i, j, k);
for (reg = 0; reg < reg_count; reg++) {
drm_printf(p, "%-50s \t 0x%08x\n",
gc_cp_reg_list_10[reg].reg_name,
adev->gfx.ip_dump_cp_queues[index + reg]);
}
index += reg_count;
}
}
}
} }
static void gfx_v10_ip_dump(void *handle) static void gfx_v10_ip_dump(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
uint32_t i; uint32_t i, j, k, reg, index = 0;
uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1); uint32_t reg_count = ARRAY_SIZE(gc_reg_list_10_1);
if (!adev->gfx.ip_dump_core) if (!adev->gfx.ip_dump_core)
...@@ -9314,6 +9396,32 @@ static void gfx_v10_ip_dump(void *handle) ...@@ -9314,6 +9396,32 @@ static void gfx_v10_ip_dump(void *handle)
for (i = 0; i < reg_count; i++) for (i = 0; i < reg_count; i++)
adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_10_1[i])); adev->gfx.ip_dump_core[i] = RREG32(SOC15_REG_ENTRY_OFFSET(gc_reg_list_10_1[i]));
amdgpu_gfx_off_ctrl(adev, true); amdgpu_gfx_off_ctrl(adev, true);
/* dump compute queue registers for all instances */
if (!adev->gfx.ip_dump_cp_queues)
return;
reg_count = ARRAY_SIZE(gc_cp_reg_list_10);
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->srbm_mutex);
for (i = 0; i < adev->gfx.mec.num_mec; i++) {
for (j = 0; j < adev->gfx.mec.num_pipe_per_mec; j++) {
for (k = 0; k < adev->gfx.mec.num_queue_per_pipe; k++) {
/* ME0 is for GFX so start from 1 for CP */
nv_grbm_select(adev, 1 + i, j, k, 0);
for (reg = 0; reg < reg_count; reg++) {
adev->gfx.ip_dump_cp_queues[index + reg] =
RREG32(SOC15_REG_ENTRY_OFFSET(
gc_cp_reg_list_10[reg]));
}
index += reg_count;
}
}
}
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
amdgpu_gfx_off_ctrl(adev, true);
} }
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = { static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment