Commit c004d44e authored by Mukul Joshi's avatar Mukul Joshi Committed by Alex Deucher

drm/amdgpu: Enable KFD with MES enabled

Enable KFD initialization with MES enabled.
Signed-off-by: default avatarMukul Joshi <mukul.joshi@amd.com>
Acked-by: default avatarOak Zeng <Oak.Zeng@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarHarish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 9c12f5cd
...@@ -2171,7 +2171,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) ...@@ -2171,7 +2171,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->has_pr3 = parent ? pci_pr3_present(parent) : false; adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
} }
if (!adev->enable_mes)
amdgpu_amdkfd_device_probe(adev); amdgpu_amdkfd_device_probe(adev);
adev->pm.pp_feature = amdgpu_pp_feature_mask; adev->pm.pp_feature = amdgpu_pp_feature_mask;
...@@ -2500,8 +2499,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) ...@@ -2500,8 +2499,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
goto init_failed; goto init_failed;
/* Don't init kfd if whole hive need to be reset during init */ /* Don't init kfd if whole hive need to be reset during init */
if (!adev->gmc.xgmi.pending_reset && if (!adev->gmc.xgmi.pending_reset)
!adev->enable_mes)
amdgpu_amdkfd_device_init(adev); amdgpu_amdkfd_device_init(adev);
amdgpu_fru_get_product_info(adev); amdgpu_fru_get_product_info(adev);
...@@ -2864,7 +2862,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) ...@@ -2864,7 +2862,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1) if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev); amdgpu_xgmi_remove_device(adev);
if (!adev->enable_mes)
amdgpu_amdkfd_device_fini_sw(adev); amdgpu_amdkfd_device_fini_sw(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
...@@ -4126,7 +4123,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) ...@@ -4126,7 +4123,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_device_ip_suspend_phase1(adev); amdgpu_device_ip_suspend_phase1(adev);
if (!adev->in_s0ix && !adev->enable_mes) if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm); amdgpu_amdkfd_suspend(adev, adev->in_runpm);
amdgpu_device_evict_resources(adev); amdgpu_device_evict_resources(adev);
...@@ -4180,7 +4177,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) ...@@ -4180,7 +4177,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
queue_delayed_work(system_wq, &adev->delayed_init_work, queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS)); msecs_to_jiffies(AMDGPU_RESUME_MS));
if (!adev->in_s0ix && !adev->enable_mes) { if (!adev->in_s0ix) {
r = amdgpu_amdkfd_resume(adev, adev->in_runpm); r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r) if (r)
return r; return r;
...@@ -4463,7 +4460,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, ...@@ -4463,7 +4460,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
int retry_limit = 0; int retry_limit = 0;
retry: retry:
if (!adev->enable_mes)
amdgpu_amdkfd_pre_reset(adev); amdgpu_amdkfd_pre_reset(adev);
amdgpu_amdkfd_pre_reset(adev); amdgpu_amdkfd_pre_reset(adev);
...@@ -4503,7 +4499,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, ...@@ -4503,7 +4499,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_irq_gpu_reset_resume_helper(adev); amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev); r = amdgpu_ib_ring_tests(adev);
if (!adev->enable_mes)
amdgpu_amdkfd_post_reset(adev); amdgpu_amdkfd_post_reset(adev);
} }
...@@ -5149,7 +5144,7 @@ int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev, ...@@ -5149,7 +5144,7 @@ int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
cancel_delayed_work_sync(&tmp_adev->delayed_init_work); cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
if (!amdgpu_sriov_vf(tmp_adev) && !adev->enable_mes) if (!amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_pre_reset(tmp_adev); amdgpu_amdkfd_pre_reset(tmp_adev);
/* /*
...@@ -5272,8 +5267,7 @@ int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev, ...@@ -5272,8 +5267,7 @@ int amdgpu_device_gpu_recover_imp(struct amdgpu_device *adev,
skip_sched_resume: skip_sched_resume:
list_for_each_entry(tmp_adev, device_list_handle, reset_list) { list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/* unlock kfd: SRIOV would do it separately */ /* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev) && if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
!adev->enable_mes)
amdgpu_amdkfd_post_reset(tmp_adev); amdgpu_amdkfd_post_reset(tmp_adev);
/* kfd_post_reset will do nothing if kfd device is not initialized, /* kfd_post_reset will do nothing if kfd device is not initialized,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment