Commit 141bb6bc authored by Dave Airlie's avatar Dave Airlie

Merge tag 'amd-drm-fixes-6.11-2024-09-05' of...

Merge tag 'amd-drm-fixes-6.11-2024-09-05' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-6.11-2024-09-05:

amdgpu:
- IPS workaround
- Fix compatibility with older MES firmware
- Fix CPU spikes when clearing VRAM
- Backlight fix
- PMO fix
- Revert SWSMU change to fix regression
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240905190533.854116-1-alexander.deucher@amd.com
parents ca10367a 1a8d8454
...@@ -348,6 +348,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -348,6 +348,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
/* always clear VRAM */
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
......
...@@ -657,7 +657,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) ...@@ -657,7 +657,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
uint64_t queue_mask = 0; uint64_t queue_mask = 0;
int r, i, j; int r, i, j;
if (adev->enable_mes) if (adev->mes.enable_legacy_queue_map)
return amdgpu_gfx_mes_enable_kcq(adev, xcc_id); return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
...@@ -719,7 +719,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) ...@@ -719,7 +719,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL); amdgpu_device_flush_hdp(adev, NULL);
if (adev->enable_mes) { if (adev->mes.enable_legacy_queue_map) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings; j = i + xcc_id * adev->gfx.num_gfx_rings;
r = amdgpu_mes_map_legacy_queue(adev, r = amdgpu_mes_map_legacy_queue(adev,
......
...@@ -75,6 +75,7 @@ struct amdgpu_mes { ...@@ -75,6 +75,7 @@ struct amdgpu_mes {
uint32_t sched_version; uint32_t sched_version;
uint32_t kiq_version; uint32_t kiq_version;
bool enable_legacy_queue_map;
uint32_t total_max_queue; uint32_t total_max_queue;
uint32_t max_doorbell_slices; uint32_t max_doorbell_slices;
......
...@@ -693,6 +693,28 @@ static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev, ...@@ -693,6 +693,28 @@ static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
(void **)&adev->mes.ucode_fw_ptr[pipe]); (void **)&adev->mes.ucode_fw_ptr[pipe]);
} }
static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
{
int pipe;
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
soc21_grbm_select(adev, 3, pipe, 0, 0);
if (pipe == AMDGPU_MES_SCHED_PIPE)
adev->mes.sched_version =
RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
adev->mes.kiq_version =
RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
}
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable) static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
{ {
uint64_t ucode_addr; uint64_t ucode_addr;
...@@ -1062,18 +1084,6 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev, ...@@ -1062,18 +1084,6 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
mes_v11_0_queue_init_register(ring); mes_v11_0_queue_init_register(ring);
} }
/* get MES scheduler/KIQ versions */
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, 3, pipe, 0, 0);
if (pipe == AMDGPU_MES_SCHED_PIPE)
adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
soc21_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
return 0; return 0;
} }
...@@ -1320,15 +1330,24 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) ...@@ -1320,15 +1330,24 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_enable(adev, true); mes_v11_0_enable(adev, true);
mes_v11_0_get_fw_version(adev);
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring); mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE); r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r) if (r)
goto failure; goto failure;
if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x47)
adev->mes.enable_legacy_queue_map = true;
else
adev->mes.enable_legacy_queue_map = false;
if (adev->mes.enable_legacy_queue_map) {
r = mes_v11_0_hw_init(adev); r = mes_v11_0_hw_init(adev);
if (r) if (r)
goto failure; goto failure;
}
return r; return r;
......
...@@ -1266,6 +1266,7 @@ static int mes_v12_0_sw_init(void *handle) ...@@ -1266,6 +1266,7 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.funcs = &mes_v12_0_funcs; adev->mes.funcs = &mes_v12_0_funcs;
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init; adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini; adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
adev->mes.enable_legacy_queue_map = true;
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE; adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
...@@ -1422,9 +1423,11 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev) ...@@ -1422,9 +1423,11 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE); mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE);
} }
if (adev->mes.enable_legacy_queue_map) {
r = mes_v12_0_hw_init(adev); r = mes_v12_0_hw_init(adev);
if (r) if (r)
goto failure; goto failure;
}
return r; return r;
......
...@@ -1752,6 +1752,30 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * ...@@ -1752,6 +1752,30 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
return bb; return bb;
} }
static enum dmub_ips_disable_type dm_get_default_ips_mode(
struct amdgpu_device *adev)
{
/*
* On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
* cause a hard hang. A fix exists for newer PMFW.
*
* As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
* IPS state in all cases, except for s0ix and all displays off (DPMS),
* where IPS2 is allowed.
*
* When checking pmfw version, use the major and minor only.
*/
if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) &&
(adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
return DMUB_IPS_ENABLE;
/* ASICs older than DCN35 do not have IPSs */
return DMUB_IPS_DISABLE_ALL;
}
static int amdgpu_dm_init(struct amdgpu_device *adev) static int amdgpu_dm_init(struct amdgpu_device *adev)
{ {
struct dc_init_data init_data; struct dc_init_data init_data;
...@@ -1863,7 +1887,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) ...@@ -1863,7 +1887,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
else else
init_data.flags.disable_ips = DMUB_IPS_ENABLE; init_data.flags.disable_ips = dm_get_default_ips_mode(adev);
init_data.flags.disable_ips_in_vpb = 0; init_data.flags.disable_ips_in_vpb = 0;
...@@ -4492,7 +4516,7 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, ...@@ -4492,7 +4516,7 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
struct amdgpu_dm_backlight_caps caps; struct amdgpu_dm_backlight_caps caps;
struct dc_link *link; struct dc_link *link;
u32 brightness; u32 brightness;
bool rc; bool rc, reallow_idle = false;
amdgpu_dm_update_backlight_caps(dm, bl_idx); amdgpu_dm_update_backlight_caps(dm, bl_idx);
caps = dm->backlight_caps[bl_idx]; caps = dm->backlight_caps[bl_idx];
...@@ -4505,6 +4529,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, ...@@ -4505,6 +4529,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
link = (struct dc_link *)dm->backlight_link[bl_idx]; link = (struct dc_link *)dm->backlight_link[bl_idx];
/* Change brightness based on AUX property */ /* Change brightness based on AUX property */
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
dc_allow_idle_optimizations(dm->dc, false);
reallow_idle = true;
}
if (caps.aux_support) { if (caps.aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness, rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS); AUX_BL_DEFAULT_TRANSITION_TIME_MS);
...@@ -4516,6 +4546,11 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, ...@@ -4516,6 +4546,11 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
} }
if (dm->dc->caps.ips_support && reallow_idle)
dc_allow_idle_optimizations(dm->dc, true);
mutex_unlock(&dm->dc_lock);
if (rc) if (rc)
dm->actual_brightness[bl_idx] = user_brightness; dm->actual_brightness[bl_idx] = user_brightness;
} }
......
...@@ -811,7 +811,8 @@ static void build_synchronized_timing_groups( ...@@ -811,7 +811,8 @@ static void build_synchronized_timing_groups(
for (j = i + 1; j < display_config->display_config.num_streams; j++) { for (j = i + 1; j < display_config->display_config.num_streams; j++) {
if (memcmp(master_timing, if (memcmp(master_timing,
&display_config->display_config.stream_descriptors[j].timing, &display_config->display_config.stream_descriptors[j].timing,
sizeof(struct dml2_timing_cfg)) == 0) { sizeof(struct dml2_timing_cfg)) == 0 &&
display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder) {
set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j); set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
set_bit_in_bitfield(&stream_mapped_mask, j); set_bit_in_bitfield(&stream_mapped_mask, j);
} }
......
...@@ -2266,7 +2266,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, ...@@ -2266,7 +2266,8 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
smu_dpm_ctx->dpm_level = level; smu_dpm_ctx->dpm_level = level;
} }
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) { if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask); index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index]; workload[0] = smu->workload_setting[index];
...@@ -2345,7 +2346,8 @@ static int smu_switch_power_profile(void *handle, ...@@ -2345,7 +2346,8 @@ static int smu_switch_power_profile(void *handle,
workload[0] = smu->workload_setting[index]; workload[0] = smu->workload_setting[index];
} }
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, workload, 0); smu_bump_power_profile_mode(smu, workload, 0);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment