Commit 4b0cb230 authored by Tao Zhou's avatar Tao Zhou Committed by Alex Deucher

drm/amdgpu: retire UMC v12 mca_addr_to_pa

RAS TA will handle it, the function is useless.
Signed-off-by: default avatarTao Zhou <tao.zhou1@amd.com>
Reviewed-by: default avatarHawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f6ac0842
...@@ -1457,7 +1457,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) ...@@ -1457,7 +1457,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET; adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET;
adev->umc.active_mask = adev->aid_mask; adev->umc.active_mask = adev->aid_mask;
adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL;
adev->umc.channel_idx_tbl = &umc_v12_0_channel_idx_tbl[0][0][0];
if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
adev->umc.ras = &umc_v12_0_ras; adev->umc.ras = &umc_v12_0_ras;
break; break;
......
...@@ -28,28 +28,6 @@ ...@@ -28,28 +28,6 @@
#include "umc/umc_12_0_0_sh_mask.h" #include "umc/umc_12_0_0_sh_mask.h"
#include "mp/mp_13_0_6_sh_mask.h" #include "mp/mp_13_0_6_sh_mask.h"
const uint32_t
umc_v12_0_channel_idx_tbl[]
[UMC_V12_0_UMC_INSTANCE_NUM]
[UMC_V12_0_CHANNEL_INSTANCE_NUM] = {
{{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12},
{19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}},
{{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32},
{63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}},
{{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64},
{95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}},
{{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108},
{115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}}
};
/* mapping of MCA error address to normalized address */
static const uint32_t umc_v12_0_ma2na_mapping[] = {
0, 5, 6, 8, 9, 14, 12, 13,
10, 11, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28,
24, 7, 29, 30,
};
static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev, static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev,
uint32_t node_inst, uint32_t node_inst,
uint32_t umc_inst, uint32_t umc_inst,
...@@ -192,79 +170,6 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev, ...@@ -192,79 +170,6 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev,
umc_v12_0_reset_error_count(adev); umc_v12_0_reset_error_count(adev);
} }
static bool umc_v12_0_bit_wise_xor(uint32_t val)
{
bool result = 0;
int i;
for (i = 0; i < 32; i++)
result = result ^ ((val >> i) & 0x1);
return result;
}
static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev,
uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst,
uint32_t node_inst,
struct ta_ras_query_address_output *addr_out)
{
uint32_t channel_index, i;
uint64_t na, soc_pa;
uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
uint32_t bank0, bank1, bank2, bank3, bank;
bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL;
bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL;
bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL;
col = (err_addr >> 1) & 0x1fULL;
row = (err_addr >> 10) & 0x3fffULL;
/* apply bank hash algorithm */
bank0 =
bank_hash0 ^ (UMC_V12_0_XOR_EN0 &
(umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^
(umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0))));
bank1 =
bank_hash1 ^ (UMC_V12_0_XOR_EN1 &
(umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^
(umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1))));
bank2 =
bank_hash2 ^ (UMC_V12_0_XOR_EN2 &
(umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^
(umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2))));
bank3 =
bank_hash3 ^ (UMC_V12_0_XOR_EN3 &
(umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^
(umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3))));
bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3);
err_addr &= ~0x3c0ULL;
err_addr |= (bank << UMC_V12_0_MCA_B0_BIT);
na = 0x0;
/* convert mca error address to normalized address */
for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++)
na |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i];
channel_index =
adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num *
adev->umc.channel_inst_num +
umc_inst * adev->umc.channel_inst_num +
ch_inst];
/* translate umc channel address to soc pa, 3 parts are included */
soc_pa = ADDR_OF_32KB_BLOCK(na) |
ADDR_OF_256B_BLOCK(channel_index) |
OFFSET_IN_256B_BLOCK(na);
/* the umc channel bits are not original values, they are hashed */
UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
addr_out->pa.pa = soc_pa;
addr_out->pa.bank = bank;
addr_out->pa.channel_idx = channel_index;
}
static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
struct ras_err_data *err_data, struct ras_err_data *err_data,
struct ta_ras_query_address_input *addr_in) struct ta_ras_query_address_input *addr_in)
...@@ -275,10 +180,12 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, ...@@ -275,10 +180,12 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev,
err_addr = addr_in->ma.err_addr; err_addr = addr_in->ma.err_addr;
addr_in->addr_type = TA_RAS_MCA_TO_PA; addr_in->addr_type = TA_RAS_MCA_TO_PA;
if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) {
/* fallback to old path if fail to get pa from psp */ dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx",
umc_v12_0_mca_addr_to_pa(adev, err_addr, addr_in->ma.ch_inst, err_addr);
addr_in->ma.umc_inst, addr_in->ma.node_inst, &addr_out);
return;
}
soc_pa = addr_out.pa.pa; soc_pa = addr_out.pa.pa;
bank = addr_out.pa.bank; bank = addr_out.pa.bank;
......
...@@ -55,67 +55,12 @@ ...@@ -55,67 +55,12 @@
#define UMC_V12_0_NA_MAP_PA_NUM 8 #define UMC_V12_0_NA_MAP_PA_NUM 8
/* R13 bit shift should be considered, double the number */ /* R13 bit shift should be considered, double the number */
#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2) #define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
/* bank bits in MCA error address */
#define UMC_V12_0_MCA_B0_BIT 6
#define UMC_V12_0_MCA_B1_BIT 7
#define UMC_V12_0_MCA_B2_BIT 8
#define UMC_V12_0_MCA_B3_BIT 9
/* column bits in SOC physical address */ /* column bits in SOC physical address */
#define UMC_V12_0_PA_C2_BIT 15 #define UMC_V12_0_PA_C2_BIT 15
#define UMC_V12_0_PA_C4_BIT 21 #define UMC_V12_0_PA_C4_BIT 21
/* row bits in SOC physical address */ /* row bits in SOC physical address */
#define UMC_V12_0_PA_R13_BIT 35 #define UMC_V12_0_PA_R13_BIT 35
/* channel index bits in SOC physical address */
#define UMC_V12_0_PA_CH4_BIT 12
#define UMC_V12_0_PA_CH5_BIT 13
#define UMC_V12_0_PA_CH6_BIT 14
/* bank hash settings */
#define UMC_V12_0_XOR_EN0 1
#define UMC_V12_0_XOR_EN1 1
#define UMC_V12_0_XOR_EN2 1
#define UMC_V12_0_XOR_EN3 1
#define UMC_V12_0_COL_XOR0 0x0
#define UMC_V12_0_COL_XOR1 0x0
#define UMC_V12_0_COL_XOR2 0x800
#define UMC_V12_0_COL_XOR3 0x1000
#define UMC_V12_0_ROW_XOR0 0x11111
#define UMC_V12_0_ROW_XOR1 0x22222
#define UMC_V12_0_ROW_XOR2 0x4444
#define UMC_V12_0_ROW_XOR3 0x8888
/* channel hash settings */
#define UMC_V12_0_HASH_4K 0
#define UMC_V12_0_HASH_64K 1
#define UMC_V12_0_HASH_2M 1
#define UMC_V12_0_HASH_1G 1
#define UMC_V12_0_HASH_1T 1
/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA),
* hash bit is only effective when related setting is enabled
*/
#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \
(((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
(((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
(((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
(((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T))
#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \
(((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
(((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
(((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
(((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T))
#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \
(((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
(((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
(((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
(((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \
(((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_4K))
#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \
(pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \
(pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \
(pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \
(pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
} while (0)
#define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \ #define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \
(((_ipid_lo) >> 12) & 0xF)) (((_ipid_lo) >> 12) & 0xF))
...@@ -127,11 +72,6 @@ bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_ ...@@ -127,11 +72,6 @@ bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_
typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status); typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status);
extern const uint32_t
umc_v12_0_channel_idx_tbl[]
[UMC_V12_0_UMC_INSTANCE_NUM]
[UMC_V12_0_CHANNEL_INSTANCE_NUM];
extern struct amdgpu_umc_ras umc_v12_0_ras; extern struct amdgpu_umc_ras umc_v12_0_ras;
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment