Commit c2a801af authored by Jack Zhang's avatar Jack Zhang Committed by Alex Deucher

amd/amdgpu/sriov enable onevf mode for ARCTURUS VF

Before, initialization of smu ip block would be skipped
for sriov ASICs. But if there's only one VF being used,
guest driver should be able to dump some HW info such as
clks, temperature,etc.

To solve this, now after onevf mode is enabled, host
driver will notify guest. If it's onevf mode, guest will
do smu hw_init and skip some steps in normal smu hw_init
flow because host driver has already done it for smu.

With this fix, guest app can talk with smu and dump hw
information from smu.

v2: refine the logic for pm_enabled.Skip hw_init by not
changing pm_enabled.
v3: refine is_support_sw_smu and fix some indentation
issue.
Signed-off-by: default avatarJack Zhang <Jack.Zhang1@amd.com>
Acked-by: default avatarEvan Quan <evan.quan@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6a876844
...@@ -1319,7 +1319,8 @@ static int psp_np_fw_load(struct psp_context *psp) ...@@ -1319,7 +1319,8 @@ static int psp_np_fw_load(struct psp_context *psp)
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
|| ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
|| ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
/*skip ucode loading in SRIOV VF */ /*skip ucode loading in SRIOV VF */
continue; continue;
......
...@@ -828,8 +828,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) ...@@ -828,8 +828,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
......
...@@ -560,7 +560,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev) ...@@ -560,7 +560,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20) if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false; return (amdgpu_dpm == 2) ? true : false;
else if (adev->asic_type >= CHIP_ARCTURUS) { else if (adev->asic_type >= CHIP_ARCTURUS) {
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return false; return false;
else else
return true; return true;
...@@ -1090,28 +1090,27 @@ static int smu_smc_table_hw_init(struct smu_context *smu, ...@@ -1090,28 +1090,27 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
} }
/* smu_dump_pptable(smu); */ /* smu_dump_pptable(smu); */
if (!amdgpu_sriov_vf(adev)) {
/*
* Copy pptable bo in the vram to smc with SMU MSGs such as
* SetDriverDramAddr and TransferTableDram2Smu.
*/
ret = smu_write_pptable(smu);
if (ret)
return ret;
/* /* issue Run*Btc msg */
* Copy pptable bo in the vram to smc with SMU MSGs such as ret = smu_run_btc(smu);
* SetDriverDramAddr and TransferTableDram2Smu. if (ret)
*/ return ret;
ret = smu_write_pptable(smu); ret = smu_feature_set_allowed_mask(smu);
if (ret) if (ret)
return ret; return ret;
/* issue Run*Btc msg */
ret = smu_run_btc(smu);
if (ret)
return ret;
ret = smu_feature_set_allowed_mask(smu);
if (ret)
return ret;
ret = smu_system_features_control(smu, true);
if (ret)
return ret;
ret = smu_system_features_control(smu, true);
if (ret)
return ret;
}
if (adev->asic_type != CHIP_ARCTURUS) { if (adev->asic_type != CHIP_ARCTURUS) {
ret = smu_notify_display_change(smu); ret = smu_notify_display_change(smu);
if (ret) if (ret)
...@@ -1164,8 +1163,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu, ...@@ -1164,8 +1163,9 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
/* /*
* Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
*/ */
ret = smu_set_tool_table_location(smu); if (!amdgpu_sriov_vf(adev)) {
ret = smu_set_tool_table_location(smu);
}
if (!smu_is_dpm_running(smu)) if (!smu_is_dpm_running(smu))
pr_info("dpm has been disabled\n"); pr_info("dpm has been disabled\n");
...@@ -1277,6 +1277,9 @@ static int smu_hw_init(void *handle) ...@@ -1277,6 +1277,9 @@ static int smu_hw_init(void *handle)
smu_set_gfx_cgpg(&adev->smu, true); smu_set_gfx_cgpg(&adev->smu, true);
} }
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
if (!smu->pm_enabled) if (!smu->pm_enabled)
return 0; return 0;
...@@ -1329,37 +1332,42 @@ static int smu_hw_fini(void *handle) ...@@ -1329,37 +1332,42 @@ static int smu_hw_fini(void *handle)
struct smu_table_context *table_context = &smu->smu_table; struct smu_table_context *table_context = &smu->smu_table;
int ret = 0; int ret = 0;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
if (smu->is_apu) { if (smu->is_apu) {
smu_powergate_sdma(&adev->smu, true); smu_powergate_sdma(&adev->smu, true);
smu_powergate_vcn(&adev->smu, true); smu_powergate_vcn(&adev->smu, true);
smu_powergate_jpeg(&adev->smu, true); smu_powergate_jpeg(&adev->smu, true);
} }
ret = smu_stop_thermal_control(smu); if (!amdgpu_sriov_vf(adev)){
if (ret) { ret = smu_stop_thermal_control(smu);
pr_warn("Fail to stop thermal control!\n");
return ret;
}
/*
* For custom pptable uploading, skip the DPM features
* disable process on Navi1x ASICs.
* - As the gfx related features are under control of
* RLC on those ASICs. RLC reinitialization will be
* needed to reenable them. That will cost much more
* efforts.
*
* - SMU firmware can handle the DPM reenablement
* properly.
*/
if (!smu->uploading_custom_pp_table ||
!((adev->asic_type >= CHIP_NAVI10) &&
(adev->asic_type <= CHIP_NAVI12))) {
ret = smu_stop_dpms(smu);
if (ret) { if (ret) {
pr_warn("Fail to stop Dpms!\n"); pr_warn("Fail to stop thermal control!\n");
return ret; return ret;
} }
/*
* For custom pptable uploading, skip the DPM features
* disable process on Navi1x ASICs.
* - As the gfx related features are under control of
* RLC on those ASICs. RLC reinitialization will be
* needed to reenable them. That will cost much more
* efforts.
*
* - SMU firmware can handle the DPM reenablement
* properly.
*/
if (!smu->uploading_custom_pp_table ||
!((adev->asic_type >= CHIP_NAVI10) &&
(adev->asic_type <= CHIP_NAVI12))) {
ret = smu_stop_dpms(smu);
if (ret) {
pr_warn("Fail to stop Dpms!\n");
return ret;
}
}
} }
kfree(table_context->driver_pptable); kfree(table_context->driver_pptable);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment