Commit cace4bff authored by Hawking Zhang's avatar Hawking Zhang Committed by Alex Deucher

drm/amdgpu: check df_funcs and its callback pointers

in case they are not avaiable in early phase
Signed-off-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Reviewed-by: default avatarLe Ma <Le.Ma@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4ac955ba
...@@ -233,6 +233,10 @@ static void amdgpu_perf_start(struct perf_event *event, int flags) ...@@ -233,6 +233,10 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return; return;
if ((!pe->adev->df.funcs) ||
(!pe->adev->df.funcs->pmc_start))
return;
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0; hwc->state = 0;
...@@ -268,6 +272,10 @@ static void amdgpu_perf_read(struct perf_event *event) ...@@ -268,6 +272,10 @@ static void amdgpu_perf_read(struct perf_event *event)
pmu); pmu);
u64 count, prev; u64 count, prev;
if ((!pe->adev->df.funcs) ||
(!pe->adev->df.funcs->pmc_get_count))
return;
do { do {
prev = local64_read(&hwc->prev_count); prev = local64_read(&hwc->prev_count);
...@@ -297,6 +305,10 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags) ...@@ -297,6 +305,10 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
if (hwc->state & PERF_HES_UPTODATE) if (hwc->state & PERF_HES_UPTODATE)
return; return;
if ((!pe->adev->df.funcs) ||
(!pe->adev->df.funcs->pmc_stop))
return;
switch (hwc->config_base) { switch (hwc->config_base) {
case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF: case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI: case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
...@@ -326,6 +338,10 @@ static int amdgpu_perf_add(struct perf_event *event, int flags) ...@@ -326,6 +338,10 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
struct amdgpu_pmu_entry, struct amdgpu_pmu_entry,
pmu); pmu);
if ((!pe->adev->df.funcs) ||
(!pe->adev->df.funcs->pmc_start))
return -EINVAL;
switch (pe->pmu_perf_type) { switch (pe->pmu_perf_type) {
case AMDGPU_PMU_PERF_TYPE_DF: case AMDGPU_PMU_PERF_TYPE_DF:
hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF; hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF;
...@@ -371,6 +387,9 @@ static void amdgpu_perf_del(struct perf_event *event, int flags) ...@@ -371,6 +387,9 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
struct amdgpu_pmu_entry *pe = container_of(event->pmu, struct amdgpu_pmu_entry *pe = container_of(event->pmu,
struct amdgpu_pmu_entry, struct amdgpu_pmu_entry,
pmu); pmu);
if ((!pe->adev->df.funcs) ||
(!pe->adev->df.funcs->pmc_stop))
return;
amdgpu_perf_stop(event, PERF_EF_UPDATE); amdgpu_perf_stop(event, PERF_EF_UPDATE);
......
...@@ -265,6 +265,11 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev, ...@@ -265,6 +265,11 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200); ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208); ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
if ((!adev->df.funcs) ||
(!adev->df.funcs->get_fica) ||
(!adev->df.funcs->set_fica))
return -EINVAL;
fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in); fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
if (fica_out != 0x1f) if (fica_out != 0x1f)
pr_err("xGMI error counters not enabled!\n"); pr_err("xGMI error counters not enabled!\n");
......
...@@ -1294,7 +1294,8 @@ static int gmc_v9_0_late_init(void *handle) ...@@ -1294,7 +1294,8 @@ static int gmc_v9_0_late_init(void *handle)
if (!amdgpu_sriov_vf(adev) && if (!amdgpu_sriov_vf(adev) &&
(adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) { (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) { if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
if (adev->df.funcs->enable_ecc_force_par_wr_rmw) if (adev->df.funcs &&
adev->df.funcs->enable_ecc_force_par_wr_rmw)
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false); adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
} }
} }
...@@ -1505,9 +1506,11 @@ static int gmc_v9_0_sw_init(void *handle) ...@@ -1505,9 +1506,11 @@ static int gmc_v9_0_sw_init(void *handle)
chansize = 64; chansize = 64;
else else
chansize = 128; chansize = 128;
if (adev->df.funcs &&
numchan = adev->df.funcs->get_hbm_channel_number(adev); adev->df.funcs->get_hbm_channel_number) {
adev->gmc.vram_width = numchan * chansize; numchan = adev->df.funcs->get_hbm_channel_number(adev);
adev->gmc.vram_width = numchan * chansize;
}
} }
adev->gmc.vram_type = vram_type; adev->gmc.vram_type = vram_type;
......
...@@ -1238,7 +1238,9 @@ static int soc15_common_sw_init(void *handle) ...@@ -1238,7 +1238,9 @@ static int soc15_common_sw_init(void *handle)
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_add_irq_id(adev); xgpu_ai_mailbox_add_irq_id(adev);
adev->df.funcs->sw_init(adev); if (adev->df.funcs &&
adev->df.funcs->sw_init)
adev->df.funcs->sw_init(adev);
return 0; return 0;
} }
...@@ -1250,7 +1252,10 @@ static int soc15_common_sw_fini(void *handle) ...@@ -1250,7 +1252,10 @@ static int soc15_common_sw_fini(void *handle)
if (adev->nbio.ras_funcs && if (adev->nbio.ras_funcs &&
adev->nbio.ras_funcs->ras_fini) adev->nbio.ras_funcs->ras_fini)
adev->nbio.ras_funcs->ras_fini(adev); adev->nbio.ras_funcs->ras_fini(adev);
adev->df.funcs->sw_fini(adev);
if (adev->df.funcs &&
adev->df.funcs->sw_fini)
adev->df.funcs->sw_fini(adev);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment