Commit 9b008fb7 authored by Rex Zhu's avatar Rex Zhu Committed by Alex Deucher

drm/amdgpu: Remove FW_LOAD_DIRECT type support on VI

AMDGPU_FW_LOAD_DIRECT is used for bring up.
Now it don't work any more. so remove the support.

v2: Add warning message if user select
   AMDGPU_FW_LOAD_DIRECT/AMDGPU_FW_LOAD_PSP on VI.
Reviewed-by: default avatarEvan Quan <evan.quan@amd.com>
Signed-off-by: default avatarRex Zhu <Rex.Zhu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 825da4d9
...@@ -297,10 +297,9 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) ...@@ -297,10 +297,9 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_POLARIS11: case CHIP_POLARIS11:
case CHIP_POLARIS12: case CHIP_POLARIS12:
case CHIP_VEGAM: case CHIP_VEGAM:
if (!load_type) if (load_type != AMDGPU_FW_LOAD_SMU)
return AMDGPU_FW_LOAD_DIRECT; pr_warning("%d is not supported on VI\n", load_type);
else return AMDGPU_FW_LOAD_SMU;
return AMDGPU_FW_LOAD_SMU;
case CHIP_VEGA10: case CHIP_VEGA10:
case CHIP_RAVEN: case CHIP_RAVEN:
case CHIP_VEGA12: case CHIP_VEGA12:
......
...@@ -1173,64 +1173,61 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) ...@@ -1173,64 +1173,61 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
} }
} }
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP];
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; info->ucode_id = AMDGPU_UCODE_ID_CP_PFP;
info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; info->fw = adev->gfx.pfp_fw;
info->fw = adev->gfx.pfp_fw; header = (const struct common_firmware_header *)info->fw->data;
header = (const struct common_firmware_header *)info->fw->data; adev->firmware.fw_size +=
adev->firmware.fw_size += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME];
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; info->ucode_id = AMDGPU_UCODE_ID_CP_ME;
info->ucode_id = AMDGPU_UCODE_ID_CP_ME; info->fw = adev->gfx.me_fw;
info->fw = adev->gfx.me_fw; header = (const struct common_firmware_header *)info->fw->data;
header = (const struct common_firmware_header *)info->fw->data; adev->firmware.fw_size +=
adev->firmware.fw_size += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE];
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; info->ucode_id = AMDGPU_UCODE_ID_CP_CE;
info->ucode_id = AMDGPU_UCODE_ID_CP_CE; info->fw = adev->gfx.ce_fw;
info->fw = adev->gfx.ce_fw; header = (const struct common_firmware_header *)info->fw->data;
header = (const struct common_firmware_header *)info->fw->data; adev->firmware.fw_size +=
adev->firmware.fw_size += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
info->fw = adev->gfx.rlc_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
info->fw = adev->gfx.mec_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
/* we need account JT in */
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; if (amdgpu_sriov_vf(adev)) {
info->ucode_id = AMDGPU_UCODE_ID_RLC_G; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
info->fw = adev->gfx.rlc_fw; info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
header = (const struct common_firmware_header *)info->fw->data; info->fw = adev->gfx.mec_fw;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; if (adev->gfx.mec2_fw) {
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
info->fw = adev->gfx.mec_fw; info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
info->fw = adev->gfx.mec2_fw;
header = (const struct common_firmware_header *)info->fw->data; header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size += adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
/* we need account JT in */
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
if (amdgpu_sriov_vf(adev)) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE];
info->ucode_id = AMDGPU_UCODE_ID_STORAGE;
info->fw = adev->gfx.mec_fw;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE);
}
if (adev->gfx.mec2_fw) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
info->fw = adev->gfx.mec2_fw;
header = (const struct common_firmware_header *)info->fw->data;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
} }
out: out:
...@@ -4176,45 +4173,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) ...@@ -4176,45 +4173,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
udelay(50); udelay(50);
} }
static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
{
const struct rlc_firmware_header_v2_0 *hdr;
const __le32 *fw_data;
unsigned i, fw_size;
if (!adev->gfx.rlc_fw)
return -EINVAL;
hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
amdgpu_ucode_print_rlc_hdr(&hdr->header);
fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
WREG32(mmRLC_GPM_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version);
return 0;
}
static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
{ {
int r;
gfx_v8_0_rlc_stop(adev); gfx_v8_0_rlc_stop(adev);
gfx_v8_0_rlc_reset(adev); gfx_v8_0_rlc_reset(adev);
gfx_v8_0_init_pg(adev); gfx_v8_0_init_pg(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/* legacy rlc firmware loading */
r = gfx_v8_0_rlc_load_microcode(adev);
if (r)
return r;
}
gfx_v8_0_rlc_start(adev); gfx_v8_0_rlc_start(adev);
return 0; return 0;
...@@ -4240,63 +4203,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) ...@@ -4240,63 +4203,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
udelay(50); udelay(50);
} }
static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
{
const struct gfx_firmware_header_v1_0 *pfp_hdr;
const struct gfx_firmware_header_v1_0 *ce_hdr;
const struct gfx_firmware_header_v1_0 *me_hdr;
const __le32 *fw_data;
unsigned i, fw_size;
if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw)
return -EINVAL;
pfp_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.pfp_fw->data;
ce_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.ce_fw->data;
me_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.me_fw->data;
amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
gfx_v8_0_cp_gfx_enable(adev, false);
/* PFP */
fw_data = (const __le32 *)
(adev->gfx.pfp_fw->data +
le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4;
WREG32(mmCP_PFP_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version);
/* CE */
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data +
le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4;
WREG32(mmCP_CE_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++));
WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version);
/* ME */
fw_data = (const __le32 *)
(adev->gfx.me_fw->data +
le32_to_cpu(me_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4;
WREG32(mmCP_ME_RAM_WADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++));
WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version);
return 0;
}
static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev) static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev)
{ {
u32 count = 0; u32 count = 0;
...@@ -4496,52 +4402,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) ...@@ -4496,52 +4402,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
udelay(50); udelay(50);
} }
static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
{
const struct gfx_firmware_header_v1_0 *mec_hdr;
const __le32 *fw_data;
unsigned i, fw_size;
if (!adev->gfx.mec_fw)
return -EINVAL;
gfx_v8_0_cp_compute_enable(adev, false);
mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
/* MEC1 */
WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i));
WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version);
/* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */
if (adev->gfx.mec2_fw) {
const struct gfx_firmware_header_v1_0 *mec2_hdr;
mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4;
WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0);
for (i = 0; i < fw_size; i++)
WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i));
WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version);
}
return 0;
}
/* KIQ functions */ /* KIQ functions */
static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring) static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring)
{ {
...@@ -4975,17 +4835,6 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) ...@@ -4975,17 +4835,6 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
if (!(adev->flags & AMD_IS_APU)) if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, false); gfx_v8_0_enable_gui_idle_interrupt(adev, false);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/* legacy firmware loading */
r = gfx_v8_0_cp_gfx_load_microcode(adev);
if (r)
return r;
r = gfx_v8_0_cp_compute_load_microcode(adev);
if (r)
return r;
}
r = gfx_v8_0_kiq_resume(adev); r = gfx_v8_0_kiq_resume(adev);
if (r) if (r)
return r; return r;
......
...@@ -318,14 +318,13 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) ...@@ -318,14 +318,13 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
if (adev->sdma.instance[i].feature_version >= 20) if (adev->sdma.instance[i].feature_version >= 20)
adev->sdma.instance[i].burst_nop = true; adev->sdma.instance[i].burst_nop = true;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; info->fw = adev->sdma.instance[i].fw;
info->fw = adev->sdma.instance[i].fw; header = (const struct common_firmware_header *)info->fw->data;
header = (const struct common_firmware_header *)info->fw->data; adev->firmware.fw_size +=
adev->firmware.fw_size += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
}
} }
out: out:
if (err) { if (err) {
...@@ -777,42 +776,6 @@ static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev) ...@@ -777,42 +776,6 @@ static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev)
return 0; return 0;
} }
/**
* sdma_v3_0_load_microcode - load the sDMA ME ucode
*
* @adev: amdgpu_device pointer
*
* Loads the sDMA0/1 ucode.
* Returns 0 for success, -EINVAL if the ucode is not available.
*/
static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
{
const struct sdma_firmware_header_v1_0 *hdr;
const __le32 *fw_data;
u32 fw_size;
int i, j;
/* halt the MEs */
sdma_v3_0_enable(adev, false);
for (i = 0; i < adev->sdma.num_instances; i++) {
if (!adev->sdma.instance[i].fw)
return -EINVAL;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
amdgpu_ucode_print_sdma_hdr(&hdr->header);
fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *)
(adev->sdma.instance[i].fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
for (j = 0; j < fw_size; j++)
WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++));
WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version);
}
return 0;
}
/** /**
* sdma_v3_0_start - setup and start the async dma engines * sdma_v3_0_start - setup and start the async dma engines
* *
...@@ -825,12 +788,6 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) ...@@ -825,12 +788,6 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
{ {
int r; int r;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
r = sdma_v3_0_load_microcode(adev);
if (r)
return r;
}
/* disable sdma engine before programing it */ /* disable sdma engine before programing it */
sdma_v3_0_ctx_switch_enable(adev, false); sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false); sdma_v3_0_enable(adev, false);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment