Commit 8ab0d6f0 authored by Luben Tuikov's avatar Luben Tuikov Committed by Alex Deucher

drm/amdgpu: Rename to ras_*_enabled

Rename,
  ras_hw_supported --> ras_hw_enabled, and
  ras_features     --> ras_enabled,
to show that ras_enabled is a subset of
ras_hw_enabled, which itself is a subset
of the ASIC capability.

Cc: Alexander Deucher <Alexander.Deucher@amd.com>
Cc: John Clements <john.clements@amd.com>
Cc: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarLuben Tuikov <luben.tuikov@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJohn Clements <John.Clements@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e509965e
......@@ -1073,8 +1073,8 @@ struct amdgpu_device {
atomic_t throttling_logging_enabled;
struct ratelimit_state throttling_logging_rs;
uint32_t ras_hw_supported;
uint32_t ras_features;
uint32_t ras_hw_enabled;
uint32_t ras_enabled;
bool in_pci_err_recovery;
struct pci_saved_state *pci_state;
......
......@@ -5108,7 +5108,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
return -ENOTSUPP;
if (ras && adev->ras_features &&
if (ras && adev->ras_enabled &&
adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
......@@ -5128,7 +5128,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
if (ret)
return ret;
if (ras && adev->ras_features &&
if (ras && adev->ras_enabled &&
adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
......
......@@ -986,7 +986,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!ras)
return -EINVAL;
ras_mask = (uint64_t)adev->ras_features << 32 | ras->features;
ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features;
return copy_to_user(out, &ras_mask,
min_t(u64, size, sizeof(ras_mask))) ?
......
......@@ -2146,7 +2146,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
return 0;
if ((amdgpu_in_reset(adev) &&
ras && adev->ras_features &&
ras && adev->ras_enabled &&
(adev->asic_type == CHIP_ARCTURUS ||
adev->asic_type == CHIP_VEGA20)) ||
(adev->in_runpm &&
......
......@@ -532,7 +532,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
if (!adev->ras_features || !con)
if (!adev->ras_enabled || !con)
return NULL;
if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
......@@ -559,7 +559,7 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
struct ras_manager *obj;
int i;
if (!adev->ras_features || !con)
if (!adev->ras_enabled || !con)
return NULL;
if (head) {
......@@ -613,7 +613,7 @@ static void amdgpu_ras_parse_status_code(struct amdgpu_device *adev,
static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
struct ras_common_if *head)
{
return adev->ras_hw_supported & BIT(head->block);
return adev->ras_hw_enabled & BIT(head->block);
}
static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
......@@ -767,7 +767,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
ret = amdgpu_ras_feature_enable(adev, head, 0);
/* clean gfx block ras features flag */
if (adev->ras_features && head->block == AMDGPU_RAS_BLOCK__GFX)
if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
con->features &= ~BIT(head->block);
}
} else
......@@ -1072,7 +1072,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
struct ras_manager *obj;
struct ras_err_data data = {0, 0};
if (!adev->ras_features || !con)
if (!adev->ras_enabled || !con)
return 0;
list_for_each_entry(obj, &con->head, node) {
......@@ -1595,7 +1595,7 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
if (!adev->ras_features || !con)
if (!adev->ras_enabled || !con)
return;
list_for_each_entry(obj, &con->head, node) {
......@@ -1645,7 +1645,7 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
if (!adev->ras_features || !con)
if (!adev->ras_enabled || !con)
return;
list_for_each_entry(obj, &con->head, node) {
......@@ -1959,7 +1959,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
bool exc_err_limit = false;
int ret;
if (adev->ras_features && con)
if (adev->ras_enabled && con)
data = &con->eh_data;
else
return 0;
......@@ -2076,7 +2076,7 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
if (strnstr(ctx->vbios_version, "D16406",
sizeof(ctx->vbios_version)))
adev->ras_hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX);
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
}
/*
......@@ -2090,7 +2090,7 @@ static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
*/
static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
{
adev->ras_hw_supported = adev->ras_features = 0;
adev->ras_hw_enabled = adev->ras_enabled = 0;
if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
!amdgpu_ras_asic_supported(adev))
......@@ -2099,7 +2099,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.connected_to_cpu) {
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
dev_info(adev->dev, "MEM ECC is active.\n");
adev->ras_hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
} else {
dev_info(adev->dev, "MEM ECC is not presented.\n");
......@@ -2107,7 +2107,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
dev_info(adev->dev, "SRAM ECC is active.\n");
adev->ras_hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
} else {
dev_info(adev->dev, "SRAM ECC is not presented.\n");
......@@ -2115,7 +2115,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
} else {
/* driver only manages a few IP blocks RAS feature
* when GPU is connected cpu through XGMI */
adev->ras_hw_supported |= (1 << AMDGPU_RAS_BLOCK__GFX |
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
1 << AMDGPU_RAS_BLOCK__SDMA |
1 << AMDGPU_RAS_BLOCK__MMHUB);
}
......@@ -2123,10 +2123,10 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
amdgpu_ras_get_quirks(adev);
/* hw_supported needs to be aligned with RAS block mask. */
adev->ras_hw_supported &= AMDGPU_RAS_BLOCK_MASK;
adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
adev->ras_features = amdgpu_ras_enable == 0 ? 0 :
adev->ras_hw_supported & amdgpu_ras_mask;
adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
adev->ras_hw_enabled & amdgpu_ras_mask;
}
int amdgpu_ras_init(struct amdgpu_device *adev)
......@@ -2149,11 +2149,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_check_supported(adev);
if (!adev->ras_hw_supported || adev->asic_type == CHIP_VEGA10) {
if (!adev->ras_hw_enabled || adev->asic_type == CHIP_VEGA10) {
/* set gfx block ras context feature for VEGA20 Gaming
* send ras disable cmd to ras ta during ras late init.
*/
if (!adev->ras_features && adev->asic_type == CHIP_VEGA20) {
if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
return 0;
......@@ -2204,7 +2204,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
adev->ras_hw_supported, adev->ras_features);
adev->ras_hw_enabled, adev->ras_enabled);
return 0;
release_con:
......@@ -2319,7 +2319,7 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp;
if (!adev->ras_features || !con) {
if (!adev->ras_enabled || !con) {
/* clean ras context for VEGA20 Gaming after send ras disable cmd */
amdgpu_release_ras_context(adev);
......@@ -2365,7 +2365,7 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
if (!adev->ras_features || !con)
if (!adev->ras_enabled || !con)
return;
amdgpu_ras_disable_all_features(adev, 0);
......@@ -2379,7 +2379,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
if (!adev->ras_features || !con)
if (!adev->ras_enabled || !con)
return 0;
/* Need disable ras on all IPs here before ip [hw/sw]fini */
......@@ -2392,7 +2392,7 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
if (!adev->ras_features || !con)
if (!adev->ras_enabled || !con)
return 0;
amdgpu_ras_fs_fini(adev);
......@@ -2412,7 +2412,7 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
amdgpu_ras_check_supported(adev);
if (!adev->ras_hw_supported)
if (!adev->ras_hw_enabled)
return;
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
......@@ -2441,7 +2441,7 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev)
if (!con)
return;
if (!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
amdgpu_ras_set_context(adev, NULL);
kfree(con);
......
......@@ -475,7 +475,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
if (block >= AMDGPU_RAS_BLOCK_COUNT)
return 0;
return ras && (adev->ras_features & (1 << block));
return ras && (adev->ras_enabled & (1 << block));
}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
......
......@@ -1262,7 +1262,7 @@ static int gmc_v9_0_late_init(void *handle)
* writes, while disables HBM ECC for vega10.
*/
if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
}
......
......@@ -655,7 +655,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
int ret = 0;
/* avoid NBIF got stuck when do RAS recovery in BACO reset */
if (ras && adev->ras_features)
if (ras && adev->ras_enabled)
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
ret = amdgpu_dpm_baco_reset(adev);
......@@ -663,7 +663,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
return ret;
/* re-enable doorbell interrupt after BACO exit */
if (ras && adev->ras_features)
if (ras && adev->ras_enabled)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
return 0;
......@@ -710,7 +710,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
* 1. PMFW version > 0x284300: all cases use baco
* 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
*/
if (ras && adev->ras_features &&
if (ras && adev->ras_enabled &&
adev->pm.fw_version <= 0x283400)
baco_reset = false;
break;
......
......@@ -1430,13 +1430,13 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
adev = (struct amdgpu_device *)(dev->gpu->kgd);
/* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
dev->node_props.capability |=
((adev->ras_features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
HSA_CAP_SRAM_EDCSUPPORTED : 0;
dev->node_props.capability |= ((adev->ras_features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
HSA_CAP_MEM_EDCSUPPORTED : 0;
if (adev->asic_type != CHIP_VEGA10)
dev->node_props.capability |= (adev->ras_features != 0) ?
dev->node_props.capability |= (adev->ras_enabled != 0) ?
HSA_CAP_RASEVENTNOTIFY : 0;
/* SVM API and HMM page migration work together, device memory type
......
......@@ -85,7 +85,7 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
if (state == BACO_STATE_IN) {
if (!ras || !adev->ras_features) {
if (!ras || !adev->ras_enabled) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
......
......@@ -1531,7 +1531,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
NULL);
break;
default:
if (!ras || !adev->ras_features ||
if (!ras || !adev->ras_enabled ||
adev->gmc.xgmi.pending_reset) {
if (adev->asic_type == CHIP_ARCTURUS) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment