Commit 0dd9c514 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.5-2023-07-26' of...

Merge tag 'amd-drm-fixes-6.5-2023-07-26' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.5-2023-07-26:

amdgpu:
- gfxhub partition fix
- Fix error handling in psp_sw_init()
- SMU13 fix
- DCN 3.1 fix
- DCN 3.2 fix
- Fix for display PHY programming sequence
- DP MST error handling fix
- GFX 9.4.3 fix

amdkfd:
- GFX11 trap handling fix
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230726184936.7812-1-alexander.deucher@amd.com
parents 75da46c1 bc1688fc
...@@ -498,11 +498,11 @@ static int psp_sw_init(void *handle) ...@@ -498,11 +498,11 @@ static int psp_sw_init(void *handle)
return 0; return 0;
failed2: failed2:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed1:
amdgpu_bo_free_kernel(&psp->fence_buf_bo, amdgpu_bo_free_kernel(&psp->fence_buf_bo,
&psp->fence_buf_mc_addr, &psp->fence_buf); &psp->fence_buf_mc_addr, &psp->fence_buf);
failed1:
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
return ret; return ret;
} }
......
...@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin"); ...@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/gc_9_4_3_rlc.bin");
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define GOLDEN_GB_ADDR_CONFIG 0x2a114042 #define GOLDEN_GB_ADDR_CONFIG 0x2a114042
#define CP_HQD_PERSISTENT_STATE_DEFAULT 0xbe05301
struct amdgpu_gfx_ras gfx_v9_4_3_ras; struct amdgpu_gfx_ras gfx_v9_4_3_ras;
...@@ -1736,7 +1737,7 @@ static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring, ...@@ -1736,7 +1737,7 @@ static int gfx_v9_4_3_xcc_q_fini_register(struct amdgpu_ring *ring,
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IQ_TIMER, 0);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_IB_CONTROL, 0);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PERSISTENT_STATE, CP_HQD_PERSISTENT_STATE_DEFAULT);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0x40000000);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_DOORBELL_CONTROL, 0);
WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0); WREG32_SOC15_RLC(GC, GET_INST(GC, xcc_id), regCP_HQD_PQ_RPTR, 0);
......
...@@ -402,18 +402,15 @@ static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev, ...@@ -402,18 +402,15 @@ static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,
static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev, static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
uint32_t xcc_mask) uint32_t xcc_mask)
{ {
uint32_t tmp_mask;
int i; int i;
tmp_mask = xcc_mask;
/* /*
* MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, because they are
* VF copy registers so vbios post doesn't program them, for * VF copy registers so vbios post doesn't program them, for
* SRIOV driver need to program them * SRIOV driver need to program them
*/ */
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
for_each_inst(i, tmp_mask) { for_each_inst(i, xcc_mask) {
i = ffs(tmp_mask) - 1;
WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE,
adev->gmc.vram_start >> 24); adev->gmc.vram_start >> 24);
WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP,
......
...@@ -302,8 +302,7 @@ static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable) ...@@ -302,8 +302,7 @@ static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable)
if (!q) if (!q)
return 0; return 0;
if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || if (!kfd_dbg_has_cwsr_workaround(q->device))
KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0))
return 0; return 0;
if (enable && q->properties.is_user_cu_masked) if (enable && q->properties.is_user_cu_masked)
...@@ -349,7 +348,7 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) ...@@ -349,7 +348,7 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd)
{ {
uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode;
uint32_t flags = pdd->process->dbg_flags; uint32_t flags = pdd->process->dbg_flags;
bool sq_trap_en = !!spi_dbg_cntl; bool sq_trap_en = !!spi_dbg_cntl || !kfd_dbg_has_cwsr_workaround(pdd->dev);
if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) if (!kfd_dbg_is_per_vmid_supported(pdd->dev))
return 0; return 0;
......
...@@ -100,6 +100,12 @@ static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev) ...@@ -100,6 +100,12 @@ static inline bool kfd_dbg_is_rlc_restore_supported(struct kfd_node *dev)
KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1)); KFD_GC_VERSION(dev) == IP_VERSION(10, 1, 1));
} }
static inline bool kfd_dbg_has_cwsr_workaround(struct kfd_node *dev)
{
return KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0) &&
KFD_GC_VERSION(dev) <= IP_VERSION(11, 0, 3);
}
static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev) static inline bool kfd_dbg_has_gws_support(struct kfd_node *dev)
{ {
if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1) if ((KFD_GC_VERSION(dev) == IP_VERSION(9, 0, 1)
......
...@@ -226,8 +226,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, ...@@ -226,8 +226,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
queue_input.paging = false; queue_input.paging = false;
queue_input.tba_addr = qpd->tba_addr; queue_input.tba_addr = qpd->tba_addr;
queue_input.tma_addr = qpd->tma_addr; queue_input.tma_addr = qpd->tma_addr;
queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || queue_input.trap_en = !kfd_dbg_has_cwsr_workaround(q->device);
KFD_GC_VERSION(q->device) > IP_VERSION(11, 0, 3);
queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled; queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled;
queue_type = convert_to_mes_queue_type(q->properties.type); queue_type = convert_to_mes_queue_type(q->properties.type);
...@@ -1806,8 +1805,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, ...@@ -1806,8 +1805,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
*/ */
q->properties.is_evicted = !!qpd->evicted; q->properties.is_evicted = !!qpd->evicted;
q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled &&
KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) && kfd_dbg_has_cwsr_workaround(q->device);
KFD_GC_VERSION(q->device) <= IP_VERSION(11, 0, 3);
if (qd) if (qd)
mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
......
...@@ -706,7 +706,7 @@ void dm_handle_mst_sideband_msg_ready_event( ...@@ -706,7 +706,7 @@ void dm_handle_mst_sideband_msg_ready_event(
if (retry == 3) { if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n"); DRM_ERROR("Failed to ack MST event.\n");
return; break;
} }
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
......
...@@ -1792,10 +1792,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) ...@@ -1792,10 +1792,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
hws->funcs.edp_backlight_control(edp_link_with_sink, false); hws->funcs.edp_backlight_control(edp_link_with_sink, false);
} }
/*resume from S3, no vbios posting, no need to power down again*/ /*resume from S3, no vbios posting, no need to power down again*/
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
power_down_all_hw_blocks(dc); power_down_all_hw_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc); disable_vga_and_power_gate_all_controllers(dc);
if (edp_link_with_sink && !keep_edp_vdd_on) if (edp_link_with_sink && !keep_edp_vdd_on)
dc->hwss.edp_power_control(edp_link_with_sink, false); dc->hwss.edp_power_control(edp_link_with_sink, false);
clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
} }
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1); bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 1);
} }
......
...@@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk( ...@@ -84,7 +84,8 @@ static enum phyd32clk_clock_source get_phy_mux_symclk(
struct dcn_dccg *dccg_dcn, struct dcn_dccg *dccg_dcn,
enum phyd32clk_clock_source src) enum phyd32clk_clock_source src)
{ {
if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { if (dccg_dcn->base.ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
if (src == PHYD32CLKC) if (src == PHYD32CLKC)
src = PHYD32CLKF; src = PHYD32CLKF;
if (src == PHYD32CLKD) if (src == PHYD32CLKD)
......
...@@ -49,6 +49,9 @@ static void dccg32_trigger_dio_fifo_resync( ...@@ -49,6 +49,9 @@ static void dccg32_trigger_dio_fifo_resync(
uint32_t dispclk_rdivider_value = 0; uint32_t dispclk_rdivider_value = 0;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value); REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
/* Not valid for the WDIVIDER to be set to 0 */
if (dispclk_rdivider_value != 0)
REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value); REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
} }
......
...@@ -1734,7 +1734,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu, ...@@ -1734,7 +1734,7 @@ static ssize_t smu_v13_0_0_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency; gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; gpu_metrics->current_gfxclk = gpu_metrics->average_gfxclk_frequency;
gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment