Commit aca935c7 authored by Bhawanpreet Lakha's avatar Bhawanpreet Lakha Committed by Alex Deucher

drm/amd/display: Drop CONFIG_DRM_AMD_DC_DCN2_1 flag

[Why]

DCN21 is stable enough to be build by default. So drop the flags.

[How]

Remove them using the unifdef tool. The following commands were executed
in sequence:

$ find -name '*.c' -exec unifdef -m -DCONFIG_DRM_AMD_DC_DCN2_1 -UCONFIG_TRIM_DRM_AMD_DC_DCN2_1 '{}' ';'
$ find -name '*.h' -exec unifdef -m -DCONFIG_DRM_AMD_DC_DCN2_1 -UCONFIG_TRIM_DRM_AMD_DC_DCN2_1 '{}' ';'

In addition:

* Remove from kconfig, and replace any dependencies with DCN1_0.
* Remove from any makefiles.
* Fix and cleanup Renoir definitions in dal_asic_id.h
* Expand DCN1 ifdef to include DCN21 code in the following files:
    * clk_mgr/clk_mgr.c: dc_clk_mgr_create()
    * core/dc_resources.c: dc_create_resource_pool()
    * gpio/hw_factory.c: dal_hw_factory_init()
    * gpio/hw_translate.c: dal_hw_translate_init()
Signed-off-by: default avatarBhawanpreet Lakha <Bhawanpreet.Lakha@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1da37801
...@@ -2603,8 +2603,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) ...@@ -2603,8 +2603,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_NAVI10: case CHIP_NAVI10:
case CHIP_NAVI14: case CHIP_NAVI14:
case CHIP_NAVI12: case CHIP_NAVI12:
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case CHIP_RENOIR: case CHIP_RENOIR:
#endif #endif
return amdgpu_dc != 0; return amdgpu_dc != 0;
......
...@@ -15,23 +15,7 @@ config DRM_AMD_DC ...@@ -15,23 +15,7 @@ config DRM_AMD_DC
config DRM_AMD_DC_DCN1_0 config DRM_AMD_DC_DCN1_0
def_bool n def_bool n
help help
RV and NV family support for display engine Raven, Navi and Renoir family support for display engine
config DRM_AMD_DC_DCN2_1
bool "DCN 2.1 family"
depends on DRM_AMD_DC && X86
help
Choose this option if you want to have
Renoir support for display engine
config DRM_AMD_DC_DSC_SUPPORT
bool "DSC support"
default y
depends on DRM_AMD_DC && X86
depends on DRM_AMD_DC_DCN1_0
help
Choose this option if you want to have
Dynamic Stream Compression support
config DRM_AMD_DC_HDCP config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC" bool "Enable HDCP support in DC"
......
...@@ -2756,9 +2756,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -2756,9 +2756,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
case CHIP_NAVI12: case CHIP_NAVI12:
case CHIP_NAVI10: case CHIP_NAVI10:
case CHIP_NAVI14: case CHIP_NAVI14:
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case CHIP_RENOIR: case CHIP_RENOIR:
#endif
if (dcn10_register_irq_handlers(dm->adev)) { if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n"); DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail; goto fail;
...@@ -2922,13 +2920,11 @@ static int dm_early_init(void *handle) ...@@ -2922,13 +2920,11 @@ static int dm_early_init(void *handle)
adev->mode_info.num_hpd = 5; adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5; adev->mode_info.num_dig = 5;
break; break;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case CHIP_RENOIR: case CHIP_RENOIR:
adev->mode_info.num_crtc = 4; adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4; adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4; adev->mode_info.num_dig = 4;
break; break;
#endif
default: default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL; return -EINVAL;
...@@ -3224,9 +3220,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev, ...@@ -3224,9 +3220,7 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_NAVI10 ||
adev->asic_type == CHIP_NAVI14 || adev->asic_type == CHIP_NAVI14 ||
adev->asic_type == CHIP_NAVI12 || adev->asic_type == CHIP_NAVI12 ||
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
adev->asic_type == CHIP_RENOIR || adev->asic_type == CHIP_RENOIR ||
#endif
adev->asic_type == CHIP_RAVEN) { adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */ /* Fill GFX9 params */
tiling_info->gfx9.num_pipes = tiling_info->gfx9.num_pipes =
......
...@@ -891,7 +891,6 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, ...@@ -891,7 +891,6 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_FAIL;
} }
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
enum pp_smu_status pp_rn_get_dpm_clock_table( enum pp_smu_status pp_rn_get_dpm_clock_table(
struct pp_smu *pp, struct dpm_clocks *clock_table) struct pp_smu *pp, struct dpm_clocks *clock_table)
{ {
...@@ -973,7 +972,6 @@ enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, ...@@ -973,7 +972,6 @@ enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp,
return PP_SMU_RESULT_OK; return PP_SMU_RESULT_OK;
} }
#endif
void dm_pp_get_funcs( void dm_pp_get_funcs(
struct dc_context *ctx, struct dc_context *ctx,
...@@ -1018,14 +1016,12 @@ void dm_pp_get_funcs( ...@@ -1018,14 +1016,12 @@ void dm_pp_get_funcs(
funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
break; break;
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
case DCN_VERSION_2_1: case DCN_VERSION_2_1:
funcs->ctx.ver = PP_SMU_VER_RN; funcs->ctx.ver = PP_SMU_VER_RN;
funcs->rn_funcs.pp_smu.dm = ctx; funcs->rn_funcs.pp_smu.dm = ctx;
funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges;
funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table;
break; break;
#endif
default: default:
DRM_ERROR("smu version is not supported !\n"); DRM_ERROR("smu version is not supported !\n");
break; break;
......
...@@ -29,9 +29,6 @@ ifdef CONFIG_DRM_AMD_DC_DCN1_0 ...@@ -29,9 +29,6 @@ ifdef CONFIG_DRM_AMD_DC_DCN1_0
DC_LIBS += dcn20 DC_LIBS += dcn20
DC_LIBS += dsc DC_LIBS += dsc
DC_LIBS += dcn10 dml DC_LIBS += dcn10 dml
endif
ifdef CONFIG_DRM_AMD_DC_DCN2_1
DC_LIBS += dcn21 DC_LIBS += dcn21
endif endif
......
...@@ -65,11 +65,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2( ...@@ -65,11 +65,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCN_VERSION_2_0: case DCN_VERSION_2_0:
*h = dal_cmd_tbl_helper_dce112_get_table2(); *h = dal_cmd_tbl_helper_dce112_get_table2();
return true; return true;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case DCN_VERSION_2_1: case DCN_VERSION_2_1:
*h = dal_cmd_tbl_helper_dce112_get_table2(); *h = dal_cmd_tbl_helper_dce112_get_table2();
return true; return true;
#endif
case DCE_VERSION_12_0: case DCE_VERSION_12_0:
case DCE_VERSION_12_1: case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2(); *h = dal_cmd_tbl_helper_dce112_get_table2();
......
...@@ -81,9 +81,7 @@ CLK_MGR_DCN20 = dcn20_clk_mgr.o ...@@ -81,9 +81,7 @@ CLK_MGR_DCN20 = dcn20_clk_mgr.o
AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20)) AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DCN20))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20) AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
endif
ifdef CONFIG_DRM_AMD_DC_DCN2_1
############################################################################### ###############################################################################
# DCN21 # DCN21
############################################################################### ###############################################################################
......
...@@ -37,9 +37,7 @@ ...@@ -37,9 +37,7 @@
#include "dcn10/rv1_clk_mgr.h" #include "dcn10/rv1_clk_mgr.h"
#include "dcn10/rv2_clk_mgr.h" #include "dcn10/rv2_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h" #include "dcn20/dcn20_clk_mgr.h"
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#include "dcn21/rn_clk_mgr.h" #include "dcn21/rn_clk_mgr.h"
#endif
int clk_mgr_helper_get_active_display_cnt( int clk_mgr_helper_get_active_display_cnt(
...@@ -136,12 +134,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p ...@@ -136,12 +134,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
#if defined(CONFIG_DRM_AMD_DC_DCN1_0) #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case FAMILY_RV: case FAMILY_RV:
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) { if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break; break;
} }
#endif /* DCN2_1 */
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) { if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) {
rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu); rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
break; break;
......
...@@ -705,10 +705,8 @@ static bool construct(struct dc *dc, ...@@ -705,10 +705,8 @@ static bool construct(struct dc *dc,
if (!dc->clk_mgr) if (!dc->clk_mgr)
goto fail; goto fail;
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
if (dc->res_pool->funcs->update_bw_bounding_box) if (dc->res_pool->funcs->update_bw_bounding_box)
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params); dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
#endif
/* Creation of current_state must occur after dc->dml /* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because * is initialized in dc_create_resource_pool because
......
...@@ -50,9 +50,7 @@ ...@@ -50,9 +50,7 @@
#include "dcn10/dcn10_resource.h" #include "dcn10/dcn10_resource.h"
#endif #endif
#include "dcn20/dcn20_resource.h" #include "dcn20/dcn20_resource.h"
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#include "dcn21/dcn21_resource.h" #include "dcn21/dcn21_resource.h"
#endif
#include "dce120/dce120_resource.h" #include "dce120/dce120_resource.h"
#define DC_LOGGER_INIT(logger) #define DC_LOGGER_INIT(logger)
...@@ -102,10 +100,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) ...@@ -102,10 +100,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
dc_version = DCN_VERSION_1_0; dc_version = DCN_VERSION_1_0;
if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev)) if (ASICREV_IS_RAVEN2(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_1_01; dc_version = DCN_VERSION_1_01;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_2_1; dc_version = DCN_VERSION_2_1;
#endif
break; break;
#endif #endif
...@@ -168,11 +164,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, ...@@ -168,11 +164,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_2_0: case DCN_VERSION_2_0:
res_pool = dcn20_create_resource_pool(init_data, dc); res_pool = dcn20_create_resource_pool(init_data, dc);
break; break;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case DCN_VERSION_2_1: case DCN_VERSION_2_1:
res_pool = dcn21_create_resource_pool(init_data, dc); res_pool = dcn21_create_resource_pool(init_data, dc);
break; break;
#endif
#endif #endif
default: default:
......
...@@ -401,9 +401,7 @@ struct dc_debug_options { ...@@ -401,9 +401,7 @@ struct dc_debug_options {
bool dmub_command_table; /* for testing only */ bool dmub_command_table; /* for testing only */
struct dc_bw_validation_profile bw_val_profile; struct dc_bw_validation_profile bw_val_profile;
bool disable_fec; bool disable_fec;
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
bool disable_48mhz_pwrdwn; bool disable_48mhz_pwrdwn;
#endif
/* This forces a hard min on the DCFCLK requested to SMU/PP /* This forces a hard min on the DCFCLK requested to SMU/PP
* watermarks are not affected. * watermarks are not affected.
*/ */
......
...@@ -76,7 +76,6 @@ ...@@ -76,7 +76,6 @@
SRII(PIXEL_RATE_CNTL, OTG, 4),\ SRII(PIXEL_RATE_CNTL, OTG, 4),\
SRII(PIXEL_RATE_CNTL, OTG, 5) SRII(PIXEL_RATE_CNTL, OTG, 5)
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \ #define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
SRII(PHASE, DP_DTO, 0),\ SRII(PHASE, DP_DTO, 0),\
...@@ -91,7 +90,6 @@ ...@@ -91,7 +90,6 @@
SRII(PIXEL_RATE_CNTL, OTG, 1),\ SRII(PIXEL_RATE_CNTL, OTG, 1),\
SRII(PIXEL_RATE_CNTL, OTG, 2),\ SRII(PIXEL_RATE_CNTL, OTG, 2),\
SRII(PIXEL_RATE_CNTL, OTG, 3) SRII(PIXEL_RATE_CNTL, OTG, 3)
#endif
#define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\ #define CS_COMMON_MASK_SH_LIST_DCN2_0(mask_sh)\
CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\ CS_SF(DP_DTO0_PHASE, DP_DTO0_PHASE, mask_sh),\
......
...@@ -440,7 +440,6 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu) ...@@ -440,7 +440,6 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
return status; return status;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
static bool dcn21_dmcu_init(struct dmcu *dmcu) static bool dcn21_dmcu_init(struct dmcu *dmcu)
{ {
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu); struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
...@@ -452,7 +451,6 @@ static bool dcn21_dmcu_init(struct dmcu *dmcu) ...@@ -452,7 +451,6 @@ static bool dcn21_dmcu_init(struct dmcu *dmcu)
return dcn10_dmcu_init(dmcu); return dcn10_dmcu_init(dmcu);
} }
#endif
static bool dcn10_dmcu_load_iram(struct dmcu *dmcu, static bool dcn10_dmcu_load_iram(struct dmcu *dmcu,
unsigned int start_offset, unsigned int start_offset,
...@@ -834,7 +832,6 @@ static const struct dmcu_funcs dcn20_funcs = { ...@@ -834,7 +832,6 @@ static const struct dmcu_funcs dcn20_funcs = {
.unlock_phy = dcn20_unlock_phy .unlock_phy = dcn20_unlock_phy
}; };
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
static const struct dmcu_funcs dcn21_funcs = { static const struct dmcu_funcs dcn21_funcs = {
.dmcu_init = dcn21_dmcu_init, .dmcu_init = dcn21_dmcu_init,
.load_iram = dcn10_dmcu_load_iram, .load_iram = dcn10_dmcu_load_iram,
...@@ -848,7 +845,6 @@ static const struct dmcu_funcs dcn21_funcs = { ...@@ -848,7 +845,6 @@ static const struct dmcu_funcs dcn21_funcs = {
.unlock_phy = dcn20_unlock_phy .unlock_phy = dcn20_unlock_phy
}; };
#endif #endif
#endif
static void dce_dmcu_construct( static void dce_dmcu_construct(
struct dce_dmcu *dmcu_dce, struct dce_dmcu *dmcu_dce,
...@@ -952,7 +948,6 @@ struct dmcu *dcn20_dmcu_create( ...@@ -952,7 +948,6 @@ struct dmcu *dcn20_dmcu_create(
return &dmcu_dce->base; return &dmcu_dce->base;
} }
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
struct dmcu *dcn21_dmcu_create( struct dmcu *dcn21_dmcu_create(
struct dc_context *ctx, struct dc_context *ctx,
const struct dce_dmcu_registers *regs, const struct dce_dmcu_registers *regs,
...@@ -974,7 +969,6 @@ struct dmcu *dcn21_dmcu_create( ...@@ -974,7 +969,6 @@ struct dmcu *dcn21_dmcu_create(
return &dmcu_dce->base; return &dmcu_dce->base;
} }
#endif #endif
#endif
void dce_dmcu_destroy(struct dmcu **dmcu) void dce_dmcu_destroy(struct dmcu **dmcu)
{ {
......
...@@ -272,13 +272,11 @@ struct dmcu *dcn20_dmcu_create( ...@@ -272,13 +272,11 @@ struct dmcu *dcn20_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask); const struct dce_dmcu_mask *dmcu_mask);
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
struct dmcu *dcn21_dmcu_create( struct dmcu *dcn21_dmcu_create(
struct dc_context *ctx, struct dc_context *ctx,
const struct dce_dmcu_registers *regs, const struct dce_dmcu_registers *regs,
const struct dce_dmcu_shift *dmcu_shift, const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask); const struct dce_dmcu_mask *dmcu_mask);
#endif
void dce_dmcu_destroy(struct dmcu **dmcu); void dce_dmcu_destroy(struct dmcu **dmcu);
......
...@@ -276,7 +276,6 @@ ...@@ -276,7 +276,6 @@
SR(DC_IP_REQUEST_CNTL), \ SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST() BL_REG_LIST()
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define HWSEQ_DCN21_REG_LIST()\ #define HWSEQ_DCN21_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \ HWSEQ_DCN_REG_LIST(), \
HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \ HSWEQ_DCN_PIXEL_RATE_REG_LIST(OTG, 0), \
...@@ -327,7 +326,6 @@ ...@@ -327,7 +326,6 @@
SR(D6VGA_CONTROL), \ SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL), \ SR(DC_IP_REQUEST_CNTL), \
BL_REG_LIST() BL_REG_LIST()
#endif
struct dce_hwseq_registers { struct dce_hwseq_registers {
...@@ -635,7 +633,6 @@ struct dce_hwseq_registers { ...@@ -635,7 +633,6 @@ struct dce_hwseq_registers {
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh) HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\ #define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
...@@ -678,7 +675,6 @@ struct dce_hwseq_registers { ...@@ -678,7 +675,6 @@ struct dce_hwseq_registers {
HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \ HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
#endif
#define HWSEQ_REG_FIELD_LIST(type) \ #define HWSEQ_REG_FIELD_LIST(type) \
type DCFE_CLOCK_ENABLE; \ type DCFE_CLOCK_ENABLE; \
......
...@@ -121,7 +121,6 @@ struct dcn_hubbub_registers { ...@@ -121,7 +121,6 @@ struct dcn_hubbub_registers {
uint32_t DCN_VM_AGP_BASE; uint32_t DCN_VM_AGP_BASE;
uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB; uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_MSB;
uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB; uint32_t DCN_VM_PROTECTION_FAULT_DEFAULT_ADDR_LSB;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_A;
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_B;
uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C; uint32_t DCHUBBUB_ARB_FRAC_URG_BW_NOM_C;
...@@ -140,7 +139,6 @@ struct dcn_hubbub_registers { ...@@ -140,7 +139,6 @@ struct dcn_hubbub_registers {
uint32_t DCHVM_CLK_CTRL; uint32_t DCHVM_CLK_CTRL;
uint32_t DCHVM_RIOMMU_CTRL0; uint32_t DCHVM_RIOMMU_CTRL0;
uint32_t DCHVM_RIOMMU_STAT0; uint32_t DCHVM_RIOMMU_STAT0;
#endif
}; };
/* set field name */ /* set field name */
...@@ -232,7 +230,6 @@ struct dcn_hubbub_registers { ...@@ -232,7 +230,6 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define HUBBUB_HVM_REG_FIELD_LIST(type) \ #define HUBBUB_HVM_REG_FIELD_LIST(type) \
type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD;\
type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\ type DCHUBBUB_ARB_VM_ROW_URGENCY_WATERMARK_A;\
...@@ -278,22 +275,17 @@ struct dcn_hubbub_registers { ...@@ -278,22 +275,17 @@ struct dcn_hubbub_registers {
type HOSTVM_POWERSTATUS; \ type HOSTVM_POWERSTATUS; \
type RIOMMU_ACTIVE; \ type RIOMMU_ACTIVE; \
type HOSTVM_PREFETCH_DONE type HOSTVM_PREFETCH_DONE
#endif
struct dcn_hubbub_shift { struct dcn_hubbub_shift {
DCN_HUBBUB_REG_FIELD_LIST(uint8_t); DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t); HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
HUBBUB_HVM_REG_FIELD_LIST(uint8_t); HUBBUB_HVM_REG_FIELD_LIST(uint8_t);
#endif
}; };
struct dcn_hubbub_mask { struct dcn_hubbub_mask {
DCN_HUBBUB_REG_FIELD_LIST(uint32_t); DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t); HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
HUBBUB_HVM_REG_FIELD_LIST(uint32_t); HUBBUB_HVM_REG_FIELD_LIST(uint32_t);
#endif
}; };
struct dc; struct dc;
......
...@@ -677,10 +677,8 @@ static void dcn10_bios_golden_init(struct dc *dc) ...@@ -677,10 +677,8 @@ static void dcn10_bios_golden_init(struct dc *dc)
int i; int i;
bool allow_self_fresh_force_enable = true; bool allow_self_fresh_force_enable = true;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc)) if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc))
return; return;
#endif
if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled) if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
allow_self_fresh_force_enable = allow_self_fresh_force_enable =
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
......
...@@ -148,7 +148,6 @@ ...@@ -148,7 +148,6 @@
uint32_t VMID_SETTINGS_0 uint32_t VMID_SETTINGS_0
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \
uint32_t FLIP_PARAMETERS_3;\ uint32_t FLIP_PARAMETERS_3;\
...@@ -157,7 +156,6 @@ ...@@ -157,7 +156,6 @@
uint32_t FLIP_PARAMETERS_6;\ uint32_t FLIP_PARAMETERS_6;\
uint32_t VBLANK_PARAMETERS_5;\ uint32_t VBLANK_PARAMETERS_5;\
uint32_t VBLANK_PARAMETERS_6 uint32_t VBLANK_PARAMETERS_6
#endif
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \ #define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \ DCN_HUBP_REG_FIELD_BASE_LIST(type); \
...@@ -184,7 +182,6 @@ ...@@ -184,7 +182,6 @@
type SURFACE_TRIPLE_BUFFER_ENABLE;\ type SURFACE_TRIPLE_BUFFER_ENABLE;\
type VMID type VMID
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
#define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \ #define DCN21_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\ DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type);\
type REFCYC_PER_VM_GROUP_FLIP;\ type REFCYC_PER_VM_GROUP_FLIP;\
...@@ -194,31 +191,18 @@ ...@@ -194,31 +191,18 @@
type REFCYC_PER_PTE_GROUP_FLIP_C; \ type REFCYC_PER_PTE_GROUP_FLIP_C; \
type REFCYC_PER_META_CHUNK_FLIP_C; \ type REFCYC_PER_META_CHUNK_FLIP_C; \
type VM_GROUP_SIZE type VM_GROUP_SIZE
#endif
struct dcn_hubp2_registers { struct dcn_hubp2_registers {
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN21_HUBP_REG_COMMON_VARIABLE_LIST; DCN21_HUBP_REG_COMMON_VARIABLE_LIST;
#else
DCN2_HUBP_REG_COMMON_VARIABLE_LIST;
#endif
}; };
struct dcn_hubp2_shift { struct dcn_hubp2_shift {
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t); DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
#else
DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
#endif
}; };
struct dcn_hubp2_mask { struct dcn_hubp2_mask {
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t); DCN21_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
#else
DCN2_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
#endif
}; };
struct dcn20_hubp { struct dcn20_hubp {
......
...@@ -2599,11 +2599,9 @@ static void dcn20_calculate_wm( ...@@ -2599,11 +2599,9 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
#endif
if (vlevel < 2) { if (vlevel < 2) {
pipes[0].clks_cfg.voltage = 2; pipes[0].clks_cfg.voltage = 2;
...@@ -2615,10 +2613,8 @@ static void dcn20_calculate_wm( ...@@ -2615,10 +2613,8 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
#endif
if (vlevel < 3) { if (vlevel < 3) {
pipes[0].clks_cfg.voltage = 3; pipes[0].clks_cfg.voltage = 3;
...@@ -2630,10 +2626,8 @@ static void dcn20_calculate_wm( ...@@ -2630,10 +2626,8 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
#endif
pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz;
...@@ -2643,10 +2637,8 @@ static void dcn20_calculate_wm( ...@@ -2643,10 +2637,8 @@ static void dcn20_calculate_wm(
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
#endif
} }
void dcn20_calculate_dlg_params( void dcn20_calculate_dlg_params(
......
...@@ -976,11 +976,9 @@ static void calculate_wm_set_for_vlevel( ...@@ -976,11 +976,9 @@ static void calculate_wm_set_for_vlevel(
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000; wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000; wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000; wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
#endif
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
} }
......
...@@ -42,9 +42,7 @@ enum pp_smu_ver { ...@@ -42,9 +42,7 @@ enum pp_smu_ver {
PP_SMU_UNSUPPORTED, PP_SMU_UNSUPPORTED,
PP_SMU_VER_RV, PP_SMU_VER_RV,
PP_SMU_VER_NV, PP_SMU_VER_NV,
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
PP_SMU_VER_RN, PP_SMU_VER_RN,
#endif
PP_SMU_VER_MAX PP_SMU_VER_MAX
}; };
...@@ -288,9 +286,7 @@ struct pp_smu_funcs { ...@@ -288,9 +286,7 @@ struct pp_smu_funcs {
union { union {
struct pp_smu_funcs_rv rv_funcs; struct pp_smu_funcs_rv rv_funcs;
struct pp_smu_funcs_nv nv_funcs; struct pp_smu_funcs_nv nv_funcs;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
struct pp_smu_funcs_rn rn_funcs; struct pp_smu_funcs_rn rn_funcs;
#endif
}; };
}; };
......
...@@ -44,8 +44,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) ...@@ -44,8 +44,6 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
endif
ifdef CONFIG_DRM_AMD_DC_DCN2_1
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
endif endif
...@@ -59,8 +57,6 @@ DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ ...@@ -59,8 +57,6 @@ DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
ifdef CONFIG_DRM_AMD_DC_DCN1_0 ifdef CONFIG_DRM_AMD_DC_DCN1_0
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
endif
ifdef CONFIG_DRM_AMD_DC_DCN2_1
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
endif endif
......
...@@ -29,10 +29,8 @@ ...@@ -29,10 +29,8 @@
#include "dcn20/display_rq_dlg_calc_20.h" #include "dcn20/display_rq_dlg_calc_20.h"
#include "dcn20/display_mode_vba_20v2.h" #include "dcn20/display_mode_vba_20v2.h"
#include "dcn20/display_rq_dlg_calc_20v2.h" #include "dcn20/display_rq_dlg_calc_20v2.h"
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
#include "dcn21/display_mode_vba_21.h" #include "dcn21/display_mode_vba_21.h"
#include "dcn21/display_rq_dlg_calc_21.h" #include "dcn21/display_rq_dlg_calc_21.h"
#endif
const struct dml_funcs dml20_funcs = { const struct dml_funcs dml20_funcs = {
.validate = dml20_ModeSupportAndSystemConfigurationFull, .validate = dml20_ModeSupportAndSystemConfigurationFull,
...@@ -48,14 +46,12 @@ const struct dml_funcs dml20v2_funcs = { ...@@ -48,14 +46,12 @@ const struct dml_funcs dml20v2_funcs = {
.rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg .rq_dlg_get_rq_reg = dml20v2_rq_dlg_get_rq_reg
}; };
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
const struct dml_funcs dml21_funcs = { const struct dml_funcs dml21_funcs = {
.validate = dml21_ModeSupportAndSystemConfigurationFull, .validate = dml21_ModeSupportAndSystemConfigurationFull,
.recalculate = dml21_recalculate, .recalculate = dml21_recalculate,
.rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg, .rq_dlg_get_dlg_reg = dml21_rq_dlg_get_dlg_reg,
.rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg .rq_dlg_get_rq_reg = dml21_rq_dlg_get_rq_reg
}; };
#endif
void dml_init_instance(struct display_mode_lib *lib, void dml_init_instance(struct display_mode_lib *lib,
const struct _vcs_dpi_soc_bounding_box_st *soc_bb, const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
...@@ -72,11 +68,9 @@ void dml_init_instance(struct display_mode_lib *lib, ...@@ -72,11 +68,9 @@ void dml_init_instance(struct display_mode_lib *lib,
case DML_PROJECT_NAVI10v2: case DML_PROJECT_NAVI10v2:
lib->funcs = dml20v2_funcs; lib->funcs = dml20v2_funcs;
break; break;
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
case DML_PROJECT_DCN21: case DML_PROJECT_DCN21:
lib->funcs = dml21_funcs; lib->funcs = dml21_funcs;
break; break;
#endif
default: default:
break; break;
......
...@@ -34,9 +34,7 @@ enum dml_project { ...@@ -34,9 +34,7 @@ enum dml_project {
DML_PROJECT_RAVEN1, DML_PROJECT_RAVEN1,
DML_PROJECT_NAVI10, DML_PROJECT_NAVI10,
DML_PROJECT_NAVI10v2, DML_PROJECT_NAVI10v2,
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
DML_PROJECT_DCN21, DML_PROJECT_DCN21,
#endif
}; };
struct display_mode_lib; struct display_mode_lib;
......
...@@ -76,9 +76,10 @@ GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o ...@@ -76,9 +76,10 @@ GPIO_DCN20 = hw_translate_dcn20.o hw_factory_dcn20.o
AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20)) AMD_DAL_GPIO_DCN20 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn20/,$(GPIO_DCN20))
AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20) AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCN20)
endif
ifdef CONFIG_DRM_AMD_DC_DCN2_1 ###############################################################################
# DCN 21
###############################################################################
GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o GPIO_DCN21 = hw_translate_dcn21.o hw_factory_dcn21.o
AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21)) AMD_DAL_GPIO_DCN21 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn21/,$(GPIO_DCN21))
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
* Authors: AMD * Authors: AMD
* *
*/ */
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#ifndef __DAL_HW_FACTORY_DCN21_H__ #ifndef __DAL_HW_FACTORY_DCN21_H__
#define __DAL_HW_FACTORY_DCN21_H__ #define __DAL_HW_FACTORY_DCN21_H__
...@@ -30,4 +29,3 @@ ...@@ -30,4 +29,3 @@
void dal_hw_factory_dcn21_init(struct hw_factory *factory); void dal_hw_factory_dcn21_init(struct hw_factory *factory);
#endif /* __DAL_HW_FACTORY_DCN20_H__ */ #endif /* __DAL_HW_FACTORY_DCN20_H__ */
#endif
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
* Authors: AMD * Authors: AMD
* *
*/ */
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#ifndef __DAL_HW_TRANSLATE_DCN21_H__ #ifndef __DAL_HW_TRANSLATE_DCN21_H__
#define __DAL_HW_TRANSLATE_DCN21_H__ #define __DAL_HW_TRANSLATE_DCN21_H__
...@@ -32,4 +31,3 @@ struct hw_translate; ...@@ -32,4 +31,3 @@ struct hw_translate;
void dal_hw_translate_dcn21_init(struct hw_translate *tr); void dal_hw_translate_dcn21_init(struct hw_translate *tr);
#endif /* __DAL_HW_TRANSLATE_DCN21_H__ */ #endif /* __DAL_HW_TRANSLATE_DCN21_H__ */
#endif
...@@ -49,9 +49,7 @@ ...@@ -49,9 +49,7 @@
#include "dcn10/hw_factory_dcn10.h" #include "dcn10/hw_factory_dcn10.h"
#endif #endif
#include "dcn20/hw_factory_dcn20.h" #include "dcn20/hw_factory_dcn20.h"
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#include "dcn21/hw_factory_dcn21.h" #include "dcn21/hw_factory_dcn21.h"
#endif
#include "diagnostics/hw_factory_diag.h" #include "diagnostics/hw_factory_diag.h"
...@@ -97,11 +95,9 @@ bool dal_hw_factory_init( ...@@ -97,11 +95,9 @@ bool dal_hw_factory_init(
case DCN_VERSION_2_0: case DCN_VERSION_2_0:
dal_hw_factory_dcn20_init(factory); dal_hw_factory_dcn20_init(factory);
return true; return true;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case DCN_VERSION_2_1: case DCN_VERSION_2_1:
dal_hw_factory_dcn21_init(factory); dal_hw_factory_dcn21_init(factory);
return true; return true;
#endif
#endif #endif
default: default:
......
...@@ -47,9 +47,7 @@ ...@@ -47,9 +47,7 @@
#include "dcn10/hw_translate_dcn10.h" #include "dcn10/hw_translate_dcn10.h"
#endif #endif
#include "dcn20/hw_translate_dcn20.h" #include "dcn20/hw_translate_dcn20.h"
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#include "dcn21/hw_translate_dcn21.h" #include "dcn21/hw_translate_dcn21.h"
#endif
#include "diagnostics/hw_translate_diag.h" #include "diagnostics/hw_translate_diag.h"
...@@ -92,11 +90,9 @@ bool dal_hw_translate_init( ...@@ -92,11 +90,9 @@ bool dal_hw_translate_init(
case DCN_VERSION_2_0: case DCN_VERSION_2_0:
dal_hw_translate_dcn20_init(translate); dal_hw_translate_dcn20_init(translate);
return true; return true;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case DCN_VERSION_2_1: case DCN_VERSION_2_1:
dal_hw_translate_dcn21_init(translate); dal_hw_translate_dcn21_init(translate);
return true; return true;
#endif
#endif #endif
default: default:
......
...@@ -87,9 +87,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); ...@@ -87,9 +87,7 @@ void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
struct resource_pool; struct resource_pool;
struct dc_state; struct dc_state;
struct resource_context; struct resource_context;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
struct clk_bw_params; struct clk_bw_params;
#endif
struct resource_funcs { struct resource_funcs {
void (*destroy)(struct resource_pool **pool); void (*destroy)(struct resource_pool **pool);
...@@ -143,11 +141,9 @@ struct resource_funcs { ...@@ -143,11 +141,9 @@ struct resource_funcs {
struct dc_state *context, struct dc_state *context,
display_e2e_pipe_params_st *pipes, display_e2e_pipe_params_st *pipes,
int pipe_cnt); int pipe_cnt);
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
void (*update_bw_bounding_box)( void (*update_bw_bounding_box)(
struct dc *dc, struct dc *dc,
struct clk_bw_params *bw_params); struct clk_bw_params *bw_params);
#endif
}; };
......
...@@ -31,7 +31,6 @@ ...@@ -31,7 +31,6 @@
#define DCN_MINIMUM_DISPCLK_Khz 100000 #define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000 #define DCN_MINIMUM_DPPCLK_Khz 100000
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
/* Constants */ /* Constants */
#define DDR4_DRAM_WIDTH 64 #define DDR4_DRAM_WIDTH 64
#define WM_A 0 #define WM_A 0
...@@ -39,12 +38,10 @@ ...@@ -39,12 +38,10 @@
#define WM_C 2 #define WM_C 2
#define WM_D 3 #define WM_D 3
#define WM_SET_COUNT 4 #define WM_SET_COUNT 4
#endif
#define DCN_MINIMUM_DISPCLK_Khz 100000 #define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000 #define DCN_MINIMUM_DPPCLK_Khz 100000
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
/* Will these bw structures be ASIC specific? */ /* Will these bw structures be ASIC specific? */
#define MAX_NUM_DPM_LVL 8 #define MAX_NUM_DPM_LVL 8
...@@ -152,7 +149,6 @@ struct clk_bw_params { ...@@ -152,7 +149,6 @@ struct clk_bw_params {
struct clk_limit_table clk_table; struct clk_limit_table clk_table;
struct wm_table wm_table; struct wm_table wm_table;
}; };
#endif
/* Public interfaces */ /* Public interfaces */
struct clk_states { struct clk_states {
...@@ -193,9 +189,7 @@ struct clk_mgr { ...@@ -193,9 +189,7 @@ struct clk_mgr {
bool psr_allow_active_cache; bool psr_allow_active_cache;
int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes
int dentist_vco_freq_khz; int dentist_vco_freq_khz;
#ifdef CONFIG_DRM_AMD_DC_DCN2_1
struct clk_bw_params *bw_params; struct clk_bw_params *bw_params;
#endif
}; };
/* forward declarations */ /* forward declarations */
......
...@@ -40,11 +40,9 @@ struct cstate_pstate_watermarks_st { ...@@ -40,11 +40,9 @@ struct cstate_pstate_watermarks_st {
struct dcn_watermarks { struct dcn_watermarks {
uint32_t pte_meta_urgent_ns; uint32_t pte_meta_urgent_ns;
uint32_t urgent_ns; uint32_t urgent_ns;
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
uint32_t frac_urg_bw_nom; uint32_t frac_urg_bw_nom;
uint32_t frac_urg_bw_flip; uint32_t frac_urg_bw_flip;
int32_t urgent_latency_ns; int32_t urgent_latency_ns;
#endif
struct cstate_pstate_watermarks_st cstate_pstate; struct cstate_pstate_watermarks_st cstate_pstate;
}; };
......
...@@ -337,9 +337,7 @@ struct hw_sequencer_funcs { ...@@ -337,9 +337,7 @@ struct hw_sequencer_funcs {
enum dc_clock_type clock_type, enum dc_clock_type clock_type,
struct dc_clock_config *clock_cfg); struct dc_clock_config *clock_cfg);
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
bool (*s0i3_golden_init_wa)(struct dc *dc); bool (*s0i3_golden_init_wa)(struct dc *dc);
#endif
}; };
void color_space_to_black_color( void color_space_to_black_color(
......
...@@ -74,11 +74,9 @@ IRQ_DCN2 = irq_service_dcn20.o ...@@ -74,11 +74,9 @@ IRQ_DCN2 = irq_service_dcn20.o
AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2)) AMD_DAL_IRQ_DCN2 = $(addprefix $(AMDDALPATH)/dc/irq/dcn20/,$(IRQ_DCN2))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2) AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN2)
endif
############################################################################### ###############################################################################
# DCN 21 # DCN 21
############################################################################### ###############################################################################
ifdef CONFIG_DRM_AMD_DC_DCN2_1
IRQ_DCN21 = irq_service_dcn21.o IRQ_DCN21 = irq_service_dcn21.o
AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21)) AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21))
......
...@@ -163,11 +163,9 @@ enum { ...@@ -163,11 +163,9 @@ enum {
#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0) #define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0)
#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0)) #define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN)) #define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
#define RENOIR_A0 0x91 #define RENOIR_A0 0x91
#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir #define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF)) #define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
#endif
/* /*
* ASIC chip ID * ASIC chip ID
......
...@@ -47,9 +47,7 @@ enum dce_version { ...@@ -47,9 +47,7 @@ enum dce_version {
DCN_VERSION_1_0, DCN_VERSION_1_0,
DCN_VERSION_1_01, DCN_VERSION_1_01,
DCN_VERSION_2_0, DCN_VERSION_2_0,
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
DCN_VERSION_2_1, DCN_VERSION_2_1,
#endif
DCN_VERSION_MAX DCN_VERSION_MAX
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment