Commit 106c7d61 authored by Likun Gao's avatar Likun Gao Committed by Alex Deucher

drm/amdgpu: abstract the function of enter/exit safe mode for RLC

Abstract the function of amdgpu_gfx_rlc_enter/exit_safe_mode and some part of
rlc_init to improve the reusability of RLC.
Signed-off-by: default avatarLikun Gao <Likun.Gao@amd.com>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 88dfc9a3
/* /*
* Copyright 2014 Advanced Micro Devices, Inc. * Copyright 2014 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc. * Copyright 2008 Red Hat Inc.
...@@ -23,11 +22,237 @@ ...@@ -23,11 +22,237 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
* *
*/ */
#include <linux/firmware.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_gfx.h" #include "amdgpu_gfx.h"
#include "amdgpu_rlc.h" #include "amdgpu_rlc.h"
/**
* amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
*
* @adev: amdgpu_device pointer
*
* Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
*/
void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev)
{
if (adev->gfx.rlc.in_safe_mode)
return;
/* if RLC is not enabled, do nothing */
if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
return;
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
adev->gfx.rlc.funcs->set_safe_mode(adev);
adev->gfx.rlc.in_safe_mode = true;
}
}
/**
* amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
*
* @adev: amdgpu_device pointer
*
* Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
*/
void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev)
{
if (!(adev->gfx.rlc.in_safe_mode))
return;
/* if RLC is not enabled, do nothing */
if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
return;
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
adev->gfx.rlc.funcs->unset_safe_mode(adev);
adev->gfx.rlc.in_safe_mode = false;
}
}
/**
* amdgpu_gfx_rlc_init_sr - Init save restore block
*
* @adev: amdgpu_device pointer
* @dws: the size of save restore block
*
* Allocate and setup value to save restore block of rlc.
* Returns 0 on succeess or negative error code if allocate failed.
*/
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
const u32 *src_ptr;
volatile u32 *dst_ptr;
u32 i;
int r;
/* allocate save restore block */
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
amdgpu_gfx_rlc_fini(adev);
return r;
}
/* write the sr buffer */
src_ptr = adev->gfx.rlc.reg_list;
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
return 0;
}
/**
* amdgpu_gfx_rlc_init_csb - Init clear state block
*
* @adev: amdgpu_device pointer
*
* Allocate and setup value to clear state block of rlc.
* Returns 0 on succeess or negative error code if allocate failed.
*/
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
{
volatile u32 *dst_ptr;
u32 dws;
int r;
/* allocate clear state block */
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
amdgpu_gfx_rlc_fini(adev);
return r;
}
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
adev->gfx.rlc.funcs->get_csb_buffer(adev, dst_ptr);
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
return 0;
}
/**
* amdgpu_gfx_rlc_init_cpt - Init cp table
*
* @adev: amdgpu_device pointer
*
* Allocate and setup value to cp table of rlc.
* Returns 0 on succeess or negative error code if allocate failed.
*/
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
{
int r;
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
amdgpu_gfx_rlc_fini(adev);
return r;
}
/* set up the cp table */
amdgpu_gfx_rlc_setup_cp_table(adev);
amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
return 0;
}
/**
* amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
*
* @adev: amdgpu_device pointer
*
* Write cp firmware data into cp table.
*/
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
const __le32 *fw_data;
volatile u32 *dst_ptr;
int me, i, max_me;
u32 bo_offset = 0;
u32 table_offset, table_size;
max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
/* write the cp table buffer */
dst_ptr = adev->gfx.rlc.cp_table_ptr;
for (me = 0; me < max_me; me++) {
if (me == 0) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 1) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
fw_data = (const __le32 *)
(adev->gfx.pfp_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 2) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
fw_data = (const __le32 *)
(adev->gfx.me_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 3) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 4) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
}
for (i = 0; i < table_size; i ++) {
dst_ptr[bo_offset + i] =
cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
}
bo_offset += table_size;
}
}
/** /**
* amdgpu_gfx_rlc_fini - Free BO which used for RLC * amdgpu_gfx_rlc_fini - Free BO which used for RLC
* *
......
/* /*
* Copyright 2014 Advanced Micro Devices, Inc. * Copyright 2014 Advanced Micro Devices, Inc.
* *
...@@ -28,9 +27,13 @@ ...@@ -28,9 +27,13 @@
#include "clearstate_defs.h" #include "clearstate_defs.h"
struct amdgpu_rlc_funcs { struct amdgpu_rlc_funcs {
void (*enter_safe_mode)(struct amdgpu_device *adev); bool (*is_rlc_enabled)(struct amdgpu_device *adev);
void (*exit_safe_mode)(struct amdgpu_device *adev); void (*set_safe_mode)(struct amdgpu_device *adev);
void (*unset_safe_mode)(struct amdgpu_device *adev);
int (*init)(struct amdgpu_device *adev); int (*init)(struct amdgpu_device *adev);
u32 (*get_csb_size)(struct amdgpu_device *adev);
void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev); int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev); void (*stop)(struct amdgpu_device *adev);
void (*reset)(struct amdgpu_device *adev); void (*reset)(struct amdgpu_device *adev);
...@@ -84,6 +87,12 @@ struct amdgpu_rlc { ...@@ -84,6 +87,12 @@ struct amdgpu_rlc {
bool is_rlc_v2_1; bool is_rlc_v2_1;
}; };
void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev); void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
#endif #endif
...@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable) ...@@ -743,19 +743,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
if (pi->caps_sq_ramping || pi->caps_db_ramping || if (pi->caps_sq_ramping || pi->caps_db_ramping ||
pi->caps_td_ramping || pi->caps_tcp_ramping) { pi->caps_td_ramping || pi->caps_tcp_ramping) {
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable) { if (enable) {
ret = ci_program_pt_config_registers(adev, didt_config_ci); ret = ci_program_pt_config_registers(adev, didt_config_ci);
if (ret) { if (ret) {
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return ret; return ret;
} }
} }
ci_do_enable_didt(adev, enable); ci_do_enable_didt(adev, enable);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
} }
return 0; return 0;
......
...@@ -2355,7 +2355,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) ...@@ -2355,7 +2355,7 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{ {
const u32 *src_ptr; const u32 *src_ptr;
volatile u32 *dst_ptr; volatile u32 *dst_ptr;
u32 dws, i; u32 dws;
u64 reg_list_mc_addr; u64 reg_list_mc_addr;
const struct cs_section_def *cs_data; const struct cs_section_def *cs_data;
int r; int r;
...@@ -2370,28 +2370,12 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) ...@@ -2370,28 +2370,12 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data; cs_data = adev->gfx.rlc.cs_data;
if (src_ptr) { if (src_ptr) {
/* save restore block */ /* init save restore block */
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, r = amdgpu_gfx_rlc_init_sr(adev, dws);
AMDGPU_GEM_DOMAIN_VRAM, if (r)
&adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
r);
amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
/* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
if (cs_data) { if (cs_data) {
/* clear state block */ /* clear state block */
adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev); adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
......
...@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] = ...@@ -882,7 +882,6 @@ static const u32 kalindi_rlc_save_restore_register_list[] =
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev); static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
...@@ -3255,8 +3254,7 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, ...@@ -3255,8 +3254,7 @@ static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
{ {
const u32 *src_ptr; const u32 *src_ptr;
volatile u32 *dst_ptr; u32 dws;
u32 dws, i;
const struct cs_section_def *cs_data; const struct cs_section_def *cs_data;
int r; int r;
...@@ -3283,68 +3281,25 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) ...@@ -3283,68 +3281,25 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data; cs_data = adev->gfx.rlc.cs_data;
if (src_ptr) { if (src_ptr) {
/* save restore block */ /* init save restore block */
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, r = amdgpu_gfx_rlc_init_sr(adev, dws);
AMDGPU_GEM_DOMAIN_VRAM, if (r)
&adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
/* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
dst_ptr[i] = cpu_to_le32(src_ptr[i]);
amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
}
if (cs_data) { if (cs_data) {
/* clear state block */ /* init clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); r = amdgpu_gfx_rlc_init_csb(adev);
if (r)
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v7_0_get_csb_buffer(adev, dst_ptr);
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if (adev->gfx.rlc.cp_table_size) { if (adev->gfx.rlc.cp_table_size) {
r = amdgpu_gfx_rlc_init_cpt(adev);
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, if (r)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
gfx_v7_0_init_cp_pg_table(adev);
amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
}
return 0; return 0;
} }
...@@ -3423,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) ...@@ -3423,7 +3378,12 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
return orig; return orig;
} }
static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) static bool gfx_v7_0_is_rlc_enabled(struct amdgpu_device *adev)
{
return true;
}
static void gfx_v7_0_set_safe_mode(struct amdgpu_device *adev)
{ {
u32 tmp, i, mask; u32 tmp, i, mask;
...@@ -3445,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) ...@@ -3445,7 +3405,7 @@ static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
} }
} }
static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) static void gfx_v7_0_unset_safe_mode(struct amdgpu_device *adev)
{ {
u32 tmp; u32 tmp;
...@@ -3761,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) ...@@ -3761,72 +3721,12 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
WREG32(mmRLC_PG_CNTL, data); WREG32(mmRLC_PG_CNTL, data);
} }
static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev) static int gfx_v7_0_cp_pg_table_num(struct amdgpu_device *adev)
{ {
const __le32 *fw_data;
volatile u32 *dst_ptr;
int me, i, max_me = 4;
u32 bo_offset = 0;
u32 table_offset, table_size;
if (adev->asic_type == CHIP_KAVERI) if (adev->asic_type == CHIP_KAVERI)
max_me = 5; return 5;
else
if (adev->gfx.rlc.cp_table_ptr == NULL) return 4;
return;
/* write the cp table buffer */
dst_ptr = adev->gfx.rlc.cp_table_ptr;
for (me = 0; me < max_me; me++) {
if (me == 0) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 1) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
fw_data = (const __le32 *)
(adev->gfx.pfp_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 2) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
fw_data = (const __le32 *)
(adev->gfx.me_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 3) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
}
for (i = 0; i < table_size; i ++) {
dst_ptr[bo_offset + i] =
cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
}
bo_offset += table_size;
}
} }
static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
...@@ -4265,9 +4165,13 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { ...@@ -4265,9 +4165,13 @@ static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
}; };
static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
.enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, .is_rlc_enabled = gfx_v7_0_is_rlc_enabled,
.exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode, .set_safe_mode = gfx_v7_0_set_safe_mode,
.unset_safe_mode = gfx_v7_0_unset_safe_mode,
.init = gfx_v7_0_rlc_init, .init = gfx_v7_0_rlc_init,
.get_csb_size = gfx_v7_0_get_csb_size,
.get_csb_buffer = gfx_v7_0_get_csb_buffer,
.get_cp_table_num = gfx_v7_0_cp_pg_table_num,
.resume = gfx_v7_0_rlc_resume, .resume = gfx_v7_0_rlc_resume,
.stop = gfx_v7_0_rlc_stop, .stop = gfx_v7_0_rlc_stop,
.reset = gfx_v7_0_rlc_reset, .reset = gfx_v7_0_rlc_reset,
......
...@@ -1283,75 +1283,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, ...@@ -1283,75 +1283,16 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
buffer[count++] = cpu_to_le32(0); buffer[count++] = cpu_to_le32(0);
} }
static void cz_init_cp_jump_table(struct amdgpu_device *adev) static int gfx_v8_0_cp_jump_table_num(struct amdgpu_device *adev)
{ {
const __le32 *fw_data;
volatile u32 *dst_ptr;
int me, i, max_me = 4;
u32 bo_offset = 0;
u32 table_offset, table_size;
if (adev->asic_type == CHIP_CARRIZO) if (adev->asic_type == CHIP_CARRIZO)
max_me = 5; return 5;
else
/* write the cp table buffer */ return 4;
dst_ptr = adev->gfx.rlc.cp_table_ptr;
for (me = 0; me < max_me; me++) {
if (me == 0) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 1) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
fw_data = (const __le32 *)
(adev->gfx.pfp_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 2) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
fw_data = (const __le32 *)
(adev->gfx.me_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 3) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 4) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
}
for (i = 0; i < table_size; i ++) {
dst_ptr[bo_offset + i] =
cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
}
bo_offset += table_size;
}
} }
static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
{ {
volatile u32 *dst_ptr;
u32 dws;
const struct cs_section_def *cs_data; const struct cs_section_def *cs_data;
int r; int r;
...@@ -1360,46 +1301,20 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) ...@@ -1360,46 +1301,20 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data; cs_data = adev->gfx.rlc.cs_data;
if (cs_data) { if (cs_data) {
/* clear state block */ /* init clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); r = amdgpu_gfx_rlc_init_csb(adev);
if (r)
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v8_0_get_csb_buffer(adev, dst_ptr);
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if ((adev->asic_type == CHIP_CARRIZO) || if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) { (adev->asic_type == CHIP_STONEY)) {
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, r = amdgpu_gfx_rlc_init_cpt(adev);
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, if (r)
&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
return r; return r;
} }
cz_init_cp_jump_table(adev);
amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
}
return 0; return 0;
} }
...@@ -4945,7 +4860,7 @@ static int gfx_v8_0_hw_fini(void *handle) ...@@ -4945,7 +4860,7 @@ static int gfx_v8_0_hw_fini(void *handle)
pr_debug("For SRIOV client, shouldn't do anything.\n"); pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0; return 0;
} }
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
if (!gfx_v8_0_wait_for_idle(adev)) if (!gfx_v8_0_wait_for_idle(adev))
gfx_v8_0_cp_enable(adev, false); gfx_v8_0_cp_enable(adev, false);
else else
...@@ -4954,7 +4869,7 @@ static int gfx_v8_0_hw_fini(void *handle) ...@@ -4954,7 +4869,7 @@ static int gfx_v8_0_hw_fini(void *handle)
adev->gfx.rlc.funcs->stop(adev); adev->gfx.rlc.funcs->stop(adev);
else else
pr_err("rlc is busy, skip halt rlc\n"); pr_err("rlc is busy, skip halt rlc\n");
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0; return 0;
} }
...@@ -5417,7 +5332,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, ...@@ -5417,7 +5332,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG)) AMD_PG_SUPPORT_GFX_DMG))
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY: case CHIP_STONEY:
...@@ -5471,7 +5386,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, ...@@ -5471,7 +5386,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
AMD_PG_SUPPORT_RLC_SMU_HS | AMD_PG_SUPPORT_RLC_SMU_HS |
AMD_PG_SUPPORT_CP | AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_GFX_DMG)) AMD_PG_SUPPORT_GFX_DMG))
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0; return 0;
} }
...@@ -5565,21 +5480,28 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, ...@@ -5565,21 +5480,28 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
#define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001
#define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e
static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) static bool gfx_v8_0_is_rlc_enabled(struct amdgpu_device *adev)
{ {
u32 data; uint32_t rlc_setting;
unsigned i;
data = RREG32(mmRLC_CNTL); rlc_setting = RREG32(mmRLC_CNTL);
if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK)) if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
return; return false;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) { return true;
}
static void gfx_v8_0_set_safe_mode(struct amdgpu_device *adev)
{
uint32_t data;
unsigned i;
data = RREG32(mmRLC_CNTL);
data |= RLC_SAFE_MODE__CMD_MASK; data |= RLC_SAFE_MODE__CMD_MASK;
data &= ~RLC_SAFE_MODE__MESSAGE_MASK; data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
WREG32(mmRLC_SAFE_MODE, data); WREG32(mmRLC_SAFE_MODE, data);
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
if ((RREG32(mmRLC_GPM_STAT) & if ((RREG32(mmRLC_GPM_STAT) &
(RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK |
...@@ -5589,33 +5511,22 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) ...@@ -5589,33 +5511,22 @@ static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev)
break; break;
udelay(1); udelay(1);
} }
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break; break;
udelay(1); udelay(1);
} }
adev->gfx.rlc.in_safe_mode = true;
}
} }
static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
{ {
u32 data = 0; uint32_t data;
unsigned i; unsigned i;
data = RREG32(mmRLC_CNTL); data = RREG32(mmRLC_CNTL);
if (!(data & RLC_CNTL__RLC_ENABLE_F32_MASK))
return;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
if (adev->gfx.rlc.in_safe_mode) {
data |= RLC_SAFE_MODE__CMD_MASK; data |= RLC_SAFE_MODE__CMD_MASK;
data &= ~RLC_SAFE_MODE__MESSAGE_MASK; data &= ~RLC_SAFE_MODE__MESSAGE_MASK;
WREG32(mmRLC_SAFE_MODE, data); WREG32(mmRLC_SAFE_MODE, data);
adev->gfx.rlc.in_safe_mode = false;
}
}
for (i = 0; i < adev->usec_timeout; i++) { for (i = 0; i < adev->usec_timeout; i++) {
if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD)) if (!REG_GET_FIELD(RREG32(mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
...@@ -5625,9 +5536,13 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) ...@@ -5625,9 +5536,13 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev)
} }
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.enter_safe_mode = iceland_enter_rlc_safe_mode, .is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
.exit_safe_mode = iceland_exit_rlc_safe_mode, .set_safe_mode = gfx_v8_0_set_safe_mode,
.unset_safe_mode = gfx_v8_0_unset_safe_mode,
.init = gfx_v8_0_rlc_init, .init = gfx_v8_0_rlc_init,
.get_csb_size = gfx_v8_0_get_csb_size,
.get_csb_buffer = gfx_v8_0_get_csb_buffer,
.get_cp_table_num = gfx_v8_0_cp_jump_table_num,
.resume = gfx_v8_0_rlc_resume, .resume = gfx_v8_0_rlc_resume,
.stop = gfx_v8_0_rlc_stop, .stop = gfx_v8_0_rlc_stop,
.reset = gfx_v8_0_rlc_reset, .reset = gfx_v8_0_rlc_reset,
...@@ -5639,7 +5554,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev ...@@ -5639,7 +5554,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
{ {
uint32_t temp, data; uint32_t temp, data;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
/* It is disabled by HW by default */ /* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
...@@ -5735,7 +5650,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev ...@@ -5735,7 +5650,7 @@ static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev); gfx_v8_0_wait_for_rlc_serdes(adev);
} }
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
} }
static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
...@@ -5745,7 +5660,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev ...@@ -5745,7 +5660,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL); temp = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); temp1 = data1 = RREG32(mmRLC_CGTT_MGCG_OVERRIDE);
...@@ -5828,7 +5743,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev ...@@ -5828,7 +5743,7 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
gfx_v8_0_wait_for_rlc_serdes(adev); gfx_v8_0_wait_for_rlc_serdes(adev);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
} }
static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
bool enable) bool enable)
......
...@@ -1050,72 +1050,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) ...@@ -1050,72 +1050,13 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0); WREG32_FIELD15(GC, 0, RLC_LB_CNTL, LOAD_BALANCE_ENABLE, enable ? 1 : 0);
} }
static void rv_init_cp_jump_table(struct amdgpu_device *adev) static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
{ {
const __le32 *fw_data; return 5;
volatile u32 *dst_ptr;
int me, i, max_me = 5;
u32 bo_offset = 0;
u32 table_offset, table_size;
/* write the cp table buffer */
dst_ptr = adev->gfx.rlc.cp_table_ptr;
for (me = 0; me < max_me; me++) {
if (me == 0) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
fw_data = (const __le32 *)
(adev->gfx.ce_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 1) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
fw_data = (const __le32 *)
(adev->gfx.pfp_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 2) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
fw_data = (const __le32 *)
(adev->gfx.me_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 3) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
} else if (me == 4) {
const struct gfx_firmware_header_v1_0 *hdr =
(const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
fw_data = (const __le32 *)
(adev->gfx.mec2_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
table_offset = le32_to_cpu(hdr->jt_offset);
table_size = le32_to_cpu(hdr->jt_size);
}
for (i = 0; i < table_size; i ++) {
dst_ptr[bo_offset + i] =
cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
}
bo_offset += table_size;
}
} }
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
{ {
volatile u32 *dst_ptr;
u32 dws;
const struct cs_section_def *cs_data; const struct cs_section_def *cs_data;
int r; int r;
...@@ -1124,47 +1065,20 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) ...@@ -1124,47 +1065,20 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
cs_data = adev->gfx.rlc.cs_data; cs_data = adev->gfx.rlc.cs_data;
if (cs_data) { if (cs_data) {
/* clear state block */ /* init clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); r = amdgpu_gfx_rlc_init_csb(adev);
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, if (r)
AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
r);
amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
/* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v9_0_get_csb_buffer(adev, dst_ptr);
amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
}
if (adev->asic_type == CHIP_RAVEN) { if (adev->asic_type == CHIP_RAVEN) {
/* TODO: double check the cp_table_size for RV */ /* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, r = amdgpu_gfx_rlc_init_cpt(adev);
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, if (r)
&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
dev_err(adev->dev,
"(%d) failed to create cp table bo\n", r);
amdgpu_gfx_rlc_fini(adev);
return r; return r;
} }
rv_init_cp_jump_table(adev);
amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
}
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_RAVEN: case CHIP_RAVEN:
gfx_v9_0_init_lbpw(adev); gfx_v9_0_init_lbpw(adev);
...@@ -3585,22 +3499,23 @@ static int gfx_v9_0_late_init(void *handle) ...@@ -3585,22 +3499,23 @@ static int gfx_v9_0_late_init(void *handle)
return 0; return 0;
} }
static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) static bool gfx_v9_0_is_rlc_enabled(struct amdgpu_device *adev)
{ {
uint32_t rlc_setting, data; uint32_t rlc_setting;
unsigned i;
if (adev->gfx.rlc.in_safe_mode)
return;
/* if RLC is not enabled, do nothing */ /* if RLC is not enabled, do nothing */
rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL); rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK)) if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
return; return false;
return true;
}
static void gfx_v9_0_set_safe_mode(struct amdgpu_device *adev)
{
uint32_t data;
unsigned i;
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_3D_CGCG)) {
data = RLC_SAFE_MODE__CMD_MASK; data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT); data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
...@@ -3611,38 +3526,20 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev) ...@@ -3611,38 +3526,20 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
break; break;
udelay(1); udelay(1);
} }
adev->gfx.rlc.in_safe_mode = true;
}
} }
static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev) static void gfx_v9_0_unset_safe_mode(struct amdgpu_device *adev)
{ {
uint32_t rlc_setting, data; uint32_t data;
if (!adev->gfx.rlc.in_safe_mode)
return;
/* if RLC is not enabled, do nothing */
rlc_setting = RREG32_SOC15(GC, 0, mmRLC_CNTL);
if (!(rlc_setting & RLC_CNTL__RLC_ENABLE_F32_MASK))
return;
if (adev->cg_flags &
(AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) {
/*
* Try to exit safe mode only if it is already in safe
* mode.
*/
data = RLC_SAFE_MODE__CMD_MASK; data = RLC_SAFE_MODE__CMD_MASK;
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data); WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE, data);
adev->gfx.rlc.in_safe_mode = false;
}
} }
static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
bool enable) bool enable)
{ {
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
gfx_v9_0_enable_gfx_cg_power_gating(adev, true); gfx_v9_0_enable_gfx_cg_power_gating(adev, true);
...@@ -3653,7 +3550,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, ...@@ -3653,7 +3550,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev,
gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); gfx_v9_0_enable_gfx_pipeline_powergating(adev, false);
} }
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
} }
static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev,
...@@ -3751,7 +3648,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, ...@@ -3751,7 +3648,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
{ {
uint32_t data, def; uint32_t data, def;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */ /* Enable 3D CGCG/CGLS */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
...@@ -3791,7 +3688,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev, ...@@ -3791,7 +3688,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data); WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D, data);
} }
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
} }
static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev, static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev,
...@@ -3799,7 +3696,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev ...@@ -3799,7 +3696,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
{ {
uint32_t def, data; uint32_t def, data;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
...@@ -3839,7 +3736,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev ...@@ -3839,7 +3736,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data); WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, data);
} }
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
} }
static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
...@@ -3868,9 +3765,13 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev, ...@@ -3868,9 +3765,13 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
} }
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.enter_safe_mode = gfx_v9_0_enter_rlc_safe_mode, .is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
.exit_safe_mode = gfx_v9_0_exit_rlc_safe_mode, .set_safe_mode = gfx_v9_0_set_safe_mode,
.unset_safe_mode = gfx_v9_0_unset_safe_mode,
.init = gfx_v9_0_rlc_init, .init = gfx_v9_0_rlc_init,
.get_csb_size = gfx_v9_0_get_csb_size,
.get_csb_buffer = gfx_v9_0_get_csb_buffer,
.get_cp_table_num = gfx_v9_0_cp_jump_table_num,
.resume = gfx_v9_0_rlc_resume, .resume = gfx_v9_0_rlc_resume,
.stop = gfx_v9_0_rlc_stop, .stop = gfx_v9_0_rlc_stop,
.reset = gfx_v9_0_rlc_reset, .reset = gfx_v9_0_rlc_reset,
......
...@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable) ...@@ -508,19 +508,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
pi->caps_db_ramping || pi->caps_db_ramping ||
pi->caps_td_ramping || pi->caps_td_ramping ||
pi->caps_tcp_ramping) { pi->caps_tcp_ramping) {
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
if (enable) { if (enable) {
ret = kv_program_pt_config_registers(adev, didt_config_kv); ret = kv_program_pt_config_registers(adev, didt_config_kv);
if (ret) { if (ret) {
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return ret; return ret;
} }
} }
kv_do_enable_didt(adev, enable); kv_do_enable_didt(adev, enable);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
} }
return 0; return 0;
......
...@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) ...@@ -967,7 +967,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) || PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) { PP_CAP(PHM_PlatformCaps_TCPRamping)) {
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
value = 0; value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
...@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) ...@@ -1014,13 +1014,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
"Failed to enable DPM DIDT.", goto error); "Failed to enable DPM DIDT.", goto error);
} }
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
} }
return 0; return 0;
error: error:
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return result; return result;
} }
...@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) ...@@ -1034,7 +1034,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_CAP(PHM_PlatformCaps_TDRamping) || PP_CAP(PHM_PlatformCaps_TDRamping) ||
PP_CAP(PHM_PlatformCaps_TCPRamping)) { PP_CAP(PHM_PlatformCaps_TCPRamping)) {
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
result = smu7_enable_didt(hwmgr, false); result = smu7_enable_didt(hwmgr, false);
PP_ASSERT_WITH_CODE((result == 0), PP_ASSERT_WITH_CODE((result == 0),
...@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) ...@@ -1046,12 +1046,12 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == result), PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", goto error); "Failed to disable DPM DIDT.", goto error);
} }
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
} }
return 0; return 0;
error: error:
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return result; return result;
} }
......
...@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) ...@@ -937,7 +937,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines; num_se = adev->gfx.config.max_shader_engines;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) { for (count = 0; count < num_se; count++) {
...@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) ...@@ -962,7 +962,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true); vega10_didt_set_mask(hwmgr, true);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0; return 0;
} }
...@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) ...@@ -971,11 +971,11 @@ static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr)
{ {
struct amdgpu_device *adev = hwmgr->adev; struct amdgpu_device *adev = hwmgr->adev;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false); vega10_didt_set_mask(hwmgr, false);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0; return 0;
} }
...@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) ...@@ -988,7 +988,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines; num_se = adev->gfx.config.max_shader_engines;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) { for (count = 0; count < num_se; count++) {
...@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) ...@@ -1007,7 +1007,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true); vega10_didt_set_mask(hwmgr, true);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) if (PP_CAP(PHM_PlatformCaps_GCEDC))
...@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) ...@@ -1024,11 +1024,11 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev; struct amdgpu_device *adev = hwmgr->adev;
uint32_t data; uint32_t data;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false); vega10_didt_set_mask(hwmgr, false);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) { if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000; data = 0x00000000;
...@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) ...@@ -1049,7 +1049,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines; num_se = adev->gfx.config.max_shader_engines;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
for (count = 0; count < num_se; count++) { for (count = 0; count < num_se; count++) {
...@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) ...@@ -1070,7 +1070,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true); vega10_didt_set_mask(hwmgr, true);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0; return 0;
} }
...@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) ...@@ -1079,11 +1079,11 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
{ {
struct amdgpu_device *adev = hwmgr->adev; struct amdgpu_device *adev = hwmgr->adev;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false); vega10_didt_set_mask(hwmgr, false);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0; return 0;
} }
...@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) ...@@ -1097,7 +1097,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
num_se = adev->gfx.config.max_shader_engines; num_se = adev->gfx.config.max_shader_engines;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
...@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) ...@@ -1118,7 +1118,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, true); vega10_didt_set_mask(hwmgr, true);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
...@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) ...@@ -1138,11 +1138,11 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev; struct amdgpu_device *adev = hwmgr->adev;
uint32_t data; uint32_t data;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
vega10_didt_set_mask(hwmgr, false); vega10_didt_set_mask(hwmgr, false);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
if (PP_CAP(PHM_PlatformCaps_GCEDC)) { if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000; data = 0x00000000;
...@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) ...@@ -1160,7 +1160,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev; struct amdgpu_device *adev = hwmgr->adev;
int result; int result;
adev->gfx.rlc.funcs->enter_safe_mode(adev); amdgpu_gfx_rlc_enter_safe_mode(adev);
mutex_lock(&adev->grbm_idx_mutex); mutex_lock(&adev->grbm_idx_mutex);
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000);
...@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) ...@@ -1173,7 +1173,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr)
vega10_didt_set_mask(hwmgr, false); vega10_didt_set_mask(hwmgr, false);
adev->gfx.rlc.funcs->exit_safe_mode(adev); amdgpu_gfx_rlc_exit_safe_mode(adev);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment