Commit 3a1627b0 authored by Nicholas Kazlauskas's avatar Nicholas Kazlauskas Committed by Alex Deucher

drm/amd/display: Add DMUB support to DC

DC will use DMUB for command submission and flow control during
initialization.

Register offloading as well as submitting some BIOS commands are part
of the DC internal interface but are guarded behind debug options.

It won't be functional in amdgpu_dm yet since we don't pass the
DMUB service to DC for use.
Signed-off-by: default avatarNicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Reviewed-by: default avatarHersen Wu <hersenxs.wu@amd.com>
Acked-by: default avatarHarry Wentland <harry.wentland@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 976e51a7
......@@ -70,5 +70,9 @@ AMD_DM_REG_UPDATE = $(addprefix $(AMDDALPATH)/dc/,dc_helper.o)
AMD_DISPLAY_FILES += $(AMD_DISPLAY_CORE)
AMD_DISPLAY_FILES += $(AMD_DM_REG_UPDATE)
ifdef CONFIG_DRM_AMD_DC_DMUB
DC_DMUB += dc_dmub_srv.o
AMD_DISPLAY_DMUB = $(addprefix $(AMDDALPATH)/dc/,$(DC_DMUB))
AMD_DISPLAY_FILES += $(AMD_DISPLAY_DMUB)
endif
......@@ -37,6 +37,10 @@
#include "bios_parser_types_internal2.h"
#include "amdgpu.h"
#ifdef CONFIG_DRM_AMD_DC_DMUB
#include "dc_dmub_srv.h"
#include "dc.h"
#endif
#define DC_LOGGER \
bp->base.ctx->logger
......@@ -103,6 +107,21 @@ static void init_dig_encoder_control(struct bios_parser *bp)
}
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
static void encoder_control_dmcub(
struct dc_dmub_srv *dmcub,
struct dig_encoder_stream_setup_parameters_v1_5 *dig)
{
struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
encoder_control.header.type = DMUB_CMD__DIGX_ENCODER_CONTROL;
encoder_control.encoder_control.dig.stream_param = *dig;
dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
#endif
static enum bp_result encoder_control_digx_v1_5(
struct bios_parser *bp,
struct bp_encoder_control *cntl)
......@@ -154,6 +173,13 @@ static enum bp_result encoder_control_digx_v1_5(
default:
break;
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
if (bp->base.ctx->dc->ctx->dmub_srv &&
bp->base.ctx->dc->debug.dmub_command_table) {
encoder_control_dmcub(bp->base.ctx->dmub_srv, &params);
return BP_RESULT_OK;
}
#endif
if (EXEC_BIOS_CMD_TABLE(digxencodercontrol, params))
result = BP_RESULT_OK;
......@@ -190,7 +216,21 @@ static void init_transmitter_control(struct bios_parser *bp)
break;
}
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
static void transmitter_control_dmcub(
struct dc_dmub_srv *dmcub,
struct dig_transmitter_control_parameters_v1_6 *dig)
{
struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
transmitter_control.header.type = DMUB_CMD__DIG1_TRANSMITTER_CONTROL;
transmitter_control.transmitter_control.dig = *dig;
dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
#endif
static enum bp_result transmitter_control_v1_6(
struct bios_parser *bp,
struct bp_transmitter_control *cntl)
......@@ -223,6 +263,14 @@ static enum bp_result transmitter_control_v1_6(
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
if (bp->base.ctx->dc->ctx->dmub_srv &&
bp->base.ctx->dc->debug.dmub_command_table) {
transmitter_control_dmcub(bp->base.ctx->dmub_srv, &ps.param);
return BP_RESULT_OK;
}
#endif
/*color_depth not used any more, driver has deep color factor in the Phyclk*/
if (EXEC_BIOS_CMD_TABLE(dig1transmittercontrol, ps))
result = BP_RESULT_OK;
......@@ -255,7 +303,21 @@ static void init_set_pixel_clock(struct bios_parser *bp)
}
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
static void set_pixel_clock_dmcub(
struct dc_dmub_srv *dmcub,
struct set_pixel_clock_parameter_v1_7 *clk)
{
struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
pixel_clock.header.type = DMUB_CMD__SET_PIXEL_CLOCK;
pixel_clock.pixel_clock.clk = *clk;
dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
#endif
static enum bp_result set_pixel_clock_v7(
struct bios_parser *bp,
......@@ -331,6 +393,13 @@ static enum bp_result set_pixel_clock_v7(
if (bp_params->signal_type == SIGNAL_TYPE_DVI_DUAL_LINK)
clk.miscinfo |= PIXEL_CLOCK_V7_MISC_DVI_DUALLINK_EN;
#ifdef CONFIG_DRM_AMD_DC_DMUB
if (bp->base.ctx->dc->ctx->dmub_srv &&
bp->base.ctx->dc->debug.dmub_command_table) {
set_pixel_clock_dmcub(bp->base.ctx->dmub_srv, &clk);
return BP_RESULT_OK;
}
#endif
if (EXEC_BIOS_CMD_TABLE(setpixelclock, clk))
result = BP_RESULT_OK;
}
......@@ -584,7 +653,21 @@ static void init_enable_disp_power_gating(
break;
}
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
static void enable_disp_power_gating_dmcub(
struct dc_dmub_srv *dmcub,
struct enable_disp_power_gating_parameters_v2_1 *pwr)
{
struct dmub_rb_cmd_enable_disp_power_gating power_gating;
power_gating.header.type = DMUB_CMD__ENABLE_DISP_POWER_GATING;
power_gating.power_gating.pwr = *pwr;
dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
#endif
static enum bp_result enable_disp_power_gating_v2_1(
struct bios_parser *bp,
enum controller_id crtc_id,
......@@ -604,6 +687,14 @@ static enum bp_result enable_disp_power_gating_v2_1(
ps.param.enable =
bp->cmd_helper->disp_power_gating_action_to_atom(action);
#ifdef CONFIG_DRM_AMD_DC_DMUB
if (bp->base.ctx->dc->ctx->dmub_srv &&
bp->base.ctx->dc->debug.dmub_command_table) {
enable_disp_power_gating_dmcub(bp->base.ctx->dmub_srv,
&ps.param);
return BP_RESULT_OK;
}
#endif
if (EXEC_BIOS_CMD_TABLE(enabledisppowergating, ps.param))
result = BP_RESULT_OK;
......
......@@ -59,6 +59,10 @@
#include "dc_link_dp.h"
#ifdef CONFIG_DRM_AMD_DC_DMUB
#include "dc_dmub_srv.h"
#endif
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
#include "dsc.h"
#endif
......@@ -2406,6 +2410,10 @@ void dc_set_power_state(
switch (power_state) {
case DC_ACPI_CM_POWER_STATE_D0:
dc_resource_state_construct(dc, dc->current_state);
#ifdef CONFIG_DRM_AMD_DC_DMUB
if (dc->ctx->dmub_srv)
dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
#endif
dc->hwss.init_hw(dc);
......
......@@ -112,6 +112,9 @@ struct dc_caps {
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
bool extended_aux_timeout_support;
#ifdef CONFIG_DRM_AMD_DC_DMUB
bool dmcub_support;
#endif
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
bool hw_3d_lut;
#endif
......@@ -401,6 +404,11 @@ struct dc_debug_options {
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int force_fclk_khz;
bool disable_tri_buf;
#ifdef CONFIG_DRM_AMD_DC_DMUB
bool dmub_offload_enabled;
bool dmcub_emulation;
bool dmub_command_table; /* for testing only */
#endif
struct dc_bw_validation_profile bw_val_profile;
#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
bool disable_fec;
......@@ -558,6 +566,10 @@ struct dc_init_data {
struct dc_bios *vbios_override;
enum dce_environment dce_environment;
#ifdef CONFIG_DRM_AMD_DC_DMUB
struct dmub_offload_funcs *dmub_if;
struct dc_reg_helper_state *dmub_offload;
#endif
struct dc_config flags;
uint32_t log_mask;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
......
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../dmub/inc/dmub_srv.h"
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
{
dc_srv->dmub = dmub;
dc_srv->ctx = dc->ctx;
}
struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub)
{
struct dc_dmub_srv *dc_srv =
kzalloc(sizeof(struct dc_dmub_srv), GFP_KERNEL);
if (dc_srv == NULL) {
BREAK_TO_DEBUGGER();
return NULL;
}
dc_dmub_srv_construct(dc_srv, dc, dmub);
return dc_srv;
}
void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
{
if (*dmub_srv) {
kfree(*dmub_srv);
*dmub_srv = NULL;
}
}
void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
struct dmub_cmd_header *cmd)
{
struct dmub_srv *dmub = dc_dmub_srv->dmub;
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
status = dmub_srv_cmd_queue(dmub, cmd);
if (status == DMUB_STATUS_OK)
return;
if (status != DMUB_STATUS_QUEUE_FULL)
goto error;
/* Execute and wait for queue to become empty again. */
dc_dmub_srv_cmd_execute(dc_dmub_srv);
dc_dmub_srv_wait_idle(dc_dmub_srv);
/* Requeue the command. */
status = dmub_srv_cmd_queue(dmub, cmd);
if (status == DMUB_STATUS_OK)
return;
error:
DC_ERROR("Error queuing DMUB command: status=%d\n", status);
}
void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_srv *dmub = dc_dmub_srv->dmub;
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK)
DC_ERROR("Error starting DMUB exeuction: status=%d\n", status);
}
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_srv *dmub = dc_dmub_srv->dmub;
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
status = dmub_srv_wait_for_idle(dmub, 100000);
if (status != DMUB_STATUS_OK)
DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
}
void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv)
{
struct dmub_srv *dmub = dc_dmub_srv->dmub;
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
enum dmub_status status;
status = dmub_srv_wait_for_phy_init(dmub, 1000000);
if (status != DMUB_STATUS_OK)
DC_ERROR("Error waiting for DMUB phy init: status=%d\n",
status);
}
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef _DMUB_DC_SRV_H_
#define _DMUB_DC_SRV_H_
#include "os_types.h"
#include "../dmub/inc/dmub_cmd.h"
struct dmub_srv;
struct dmub_cmd_header;
struct dc_reg_helper_state {
bool gather_in_progress;
uint32_t same_addr_count;
bool should_burst_write;
union dmub_rb_cmd cmd_data;
unsigned int reg_seq_count;
};
struct dc_dmub_srv {
struct dmub_srv *dmub;
struct dc_reg_helper_state reg_helper_offload;
struct dc_context *ctx;
void *dm;
};
void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
struct dmub_cmd_header *cmd);
void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
void dc_dmub_srv_wait_phy_init(struct dc_dmub_srv *dc_dmub_srv);
#endif /* _DMUB_DC_SRV_H_ */
......@@ -32,6 +32,76 @@
#include "dm_services.h"
#include <stdarg.h>
#ifdef CONFIG_DRM_AMD_DC_DMUB
#include "dc.h"
#include "dc_dmub_srv.h"
static inline void submit_dmub_read_modify_write(
struct dc_reg_helper_state *offload,
const struct dc_context *ctx)
{
struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
bool gather = false;
offload->should_burst_write =
(offload->same_addr_count == (DMUB_READ_MODIFY_WRITE_SEQ__MAX - 1));
cmd_buf->header.payload_bytes =
sizeof(struct dmub_cmd_read_modify_write_sequence) * offload->reg_seq_count;
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
memset(cmd_buf, 0, sizeof(*cmd_buf));
offload->reg_seq_count = 0;
offload->same_addr_count = 0;
}
static inline void submit_dmub_burst_write(
struct dc_reg_helper_state *offload,
const struct dc_context *ctx)
{
struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
bool gather = false;
cmd_buf->header.payload_bytes =
sizeof(uint32_t) * offload->reg_seq_count;
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
memset(cmd_buf, 0, sizeof(*cmd_buf));
offload->reg_seq_count = 0;
}
static inline void submit_dmub_reg_wait(
struct dc_reg_helper_state *offload,
const struct dc_context *ctx)
{
struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
bool gather = false;
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
memset(cmd_buf, 0, sizeof(*cmd_buf));
offload->reg_seq_count = 0;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
}
#endif
struct dc_reg_value_masks {
uint32_t value;
uint32_t mask;
......@@ -77,6 +147,100 @@ static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
}
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
static void dmub_flush_buffer_execute(
struct dc_reg_helper_state *offload,
const struct dc_context *ctx)
{
submit_dmub_read_modify_write(offload, ctx);
dc_dmub_srv_cmd_execute(ctx->dmub_srv);
}
static void dmub_flush_burst_write_buffer_execute(
struct dc_reg_helper_state *offload,
const struct dc_context *ctx)
{
submit_dmub_burst_write(offload, ctx);
dc_dmub_srv_cmd_execute(ctx->dmub_srv);
}
static bool dmub_reg_value_burst_set_pack(const struct dc_context *ctx, uint32_t addr,
uint32_t reg_val)
{
struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
struct dmub_rb_cmd_burst_write *cmd_buf = &offload->cmd_data.burst_write;
/* flush command if buffer is full */
if (offload->reg_seq_count == DMUB_BURST_WRITE_VALUES__MAX)
dmub_flush_burst_write_buffer_execute(offload, ctx);
if (offload->cmd_data.cmd_common.header.type == DMUB_CMD__REG_SEQ_BURST_WRITE &&
addr != cmd_buf->addr) {
dmub_flush_burst_write_buffer_execute(offload, ctx);
return false;
}
cmd_buf->header.type = DMUB_CMD__REG_SEQ_BURST_WRITE;
cmd_buf->addr = addr;
cmd_buf->write_values[offload->reg_seq_count] = reg_val;
offload->reg_seq_count++;
return true;
}
static uint32_t dmub_reg_value_pack(const struct dc_context *ctx, uint32_t addr,
struct dc_reg_value_masks *field_value_mask)
{
struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
struct dmub_rb_cmd_read_modify_write *cmd_buf = &offload->cmd_data.read_modify_write;
struct dmub_cmd_read_modify_write_sequence *seq;
/* flush command if buffer is full */
if (offload->cmd_data.cmd_common.header.type != DMUB_CMD__REG_SEQ_BURST_WRITE &&
offload->reg_seq_count == DMUB_READ_MODIFY_WRITE_SEQ__MAX)
dmub_flush_buffer_execute(offload, ctx);
if (offload->should_burst_write) {
if (dmub_reg_value_burst_set_pack(ctx, addr, field_value_mask->value))
return field_value_mask->value;
else
offload->should_burst_write = false;
}
/* pack commands */
cmd_buf->header.type = DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE;
seq = &cmd_buf->seq[offload->reg_seq_count];
if (offload->reg_seq_count) {
if (cmd_buf->seq[offload->reg_seq_count - 1].addr == addr)
offload->same_addr_count++;
else
offload->same_addr_count = 0;
}
seq->addr = addr;
seq->modify_mask = field_value_mask->mask;
seq->modify_value = field_value_mask->value;
offload->reg_seq_count++;
return field_value_mask->value;
}
static void dmub_reg_wait_done_pack(const struct dc_context *ctx, uint32_t addr,
uint32_t mask, uint32_t shift, uint32_t condition_value, uint32_t time_out_us)
{
struct dc_reg_helper_state *offload = &ctx->dmub_srv->reg_helper_offload;
struct dmub_rb_cmd_reg_wait *cmd_buf = &offload->cmd_data.reg_wait;
cmd_buf->header.type = DMUB_CMD__REG_REG_WAIT;
cmd_buf->reg_wait.addr = addr;
cmd_buf->reg_wait.condition_field_value = mask & (condition_value << shift);
cmd_buf->reg_wait.mask = mask;
cmd_buf->reg_wait.time_out_us = time_out_us;
}
#endif
uint32_t generic_reg_update_ex(const struct dc_context *ctx,
uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
......@@ -93,6 +257,13 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
va_end(ap);
#ifdef CONFIG_DRM_AMD_DC_DMUB
if (ctx->dmub_srv &&
ctx->dmub_srv->reg_helper_offload.gather_in_progress)
return dmub_reg_value_pack(ctx, addr, &field_value_mask);
/* todo: return void so we can decouple code running in driver from register states */
#endif
/* mmio write directly */
reg_val = dm_read_reg(ctx, addr);
reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
......@@ -118,6 +289,13 @@ uint32_t generic_reg_set_ex(const struct dc_context *ctx,
/* mmio write directly */
reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
#ifdef CONFIG_DRM_AMD_DC_DMUB
if (ctx->dmub_srv &&
ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
return dmub_reg_value_burst_set_pack(ctx, addr, reg_val);
/* todo: return void so we can decouple code running in driver from register states */
}
#endif
dm_write_reg(ctx, addr, reg_val);
return reg_val;
}
......@@ -134,6 +312,16 @@ uint32_t dm_read_reg_func(
return 0;
}
#endif
#ifdef CONFIG_DRM_AMD_DC_DMUB
if (ctx->dmub_srv &&
ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
!ctx->dmub_srv->reg_helper_offload.should_burst_write) {
ASSERT(false);
return 0;
}
#endif
value = cgs_read_register(ctx->cgs_device, address);
trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
......@@ -299,6 +487,15 @@ void generic_reg_wait(const struct dc_context *ctx,
uint32_t reg_val;
int i;
#ifdef CONFIG_DRM_AMD_DC_DMUB
if (ctx->dmub_srv &&
ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
dmub_reg_wait_done_pack(ctx, addr, mask, shift, condition_value,
delay_between_poll_us * time_out_num_tries);
return;
}
#endif
/* something is terribly wrong if time out is > 200ms. (5Hz) */
ASSERT(delay_between_poll_us * time_out_num_tries <= 3000000);
......@@ -345,6 +542,13 @@ uint32_t generic_read_indirect_reg(const struct dc_context *ctx,
uint32_t index)
{
uint32_t value = 0;
#ifdef CONFIG_DRM_AMD_DC_DMUB
// when reg read, there should not be any offload.
if (ctx->dmub_srv &&
ctx->dmub_srv->reg_helper_offload.gather_in_progress) {
ASSERT(false);
}
#endif
dm_write_reg(ctx, addr_index, index);
value = dm_read_reg(ctx, addr_data);
......@@ -382,3 +586,72 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
return reg_val;
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
void reg_sequence_start_gather(const struct dc_context *ctx)
{
/* if reg sequence is supported and enabled, set flag to
* indicate we want to have REG_SET, REG_UPDATE macro build
* reg sequence command buffer rather than MMIO directly.
*/
if (ctx->dmub_srv && ctx->dc->debug.dmub_offload_enabled) {
struct dc_reg_helper_state *offload =
&ctx->dmub_srv->reg_helper_offload;
/* caller sequence mismatch. need to debug caller. offload will not work!!! */
ASSERT(!offload->gather_in_progress);
offload->gather_in_progress = true;
}
}
void reg_sequence_start_execute(const struct dc_context *ctx)
{
struct dc_reg_helper_state *offload;
if (!ctx->dmub_srv)
return;
offload = &ctx->dmub_srv->reg_helper_offload;
if (offload && offload->gather_in_progress) {
offload->gather_in_progress = false;
offload->should_burst_write = false;
switch (offload->cmd_data.cmd_common.header.type) {
case DMUB_CMD__REG_SEQ_READ_MODIFY_WRITE:
submit_dmub_read_modify_write(offload, ctx);
break;
case DMUB_CMD__REG_REG_WAIT:
submit_dmub_reg_wait(offload, ctx);
break;
case DMUB_CMD__REG_SEQ_BURST_WRITE:
submit_dmub_burst_write(offload, ctx);
break;
default:
return;
}
dc_dmub_srv_cmd_execute(ctx->dmub_srv);
}
}
void reg_sequence_wait_done(const struct dc_context *ctx)
{
/* callback to DM to poll for last submission done*/
struct dc_reg_helper_state *offload;
if (!ctx->dmub_srv)
return;
offload = &ctx->dmub_srv->reg_helper_offload;
if (offload &&
ctx->dc->debug.dmub_offload_enabled &&
!ctx->dc->debug.dmcub_emulation) {
dc_dmub_srv_wait_idle(ctx->dmub_srv);
}
}
#endif
......@@ -48,6 +48,9 @@ struct dc_stream_state;
struct dc_link;
struct dc_sink;
struct dal;
#ifdef CONFIG_DRM_AMD_DC_DMUB
struct dc_dmub_srv;
#endif
/********************************
* Environment definitions
......@@ -109,6 +112,9 @@ struct dc_context {
uint32_t dc_sink_id_count;
uint32_t dc_stream_id_count;
uint64_t fbc_gpu_addr;
#ifdef CONFIG_DRM_AMD_DC_DMUB
struct dc_dmub_srv *dmub_srv;
#endif
#ifdef CONFIG_DRM_AMD_DC_HDCP
struct cp_psp cp_psp;
#endif
......
......@@ -352,6 +352,9 @@ void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
uint32_t i;
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
#ifdef CONFIG_DRM_AMD_DC_DMUB
REG_SEQ_START();
#endif
for (i = 0 ; i < num; i++) {
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
......@@ -630,6 +633,10 @@ void dpp1_set_degamma(
BREAK_TO_DEBUGGER();
break;
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
REG_SEQ_SUBMIT();
REG_SEQ_WAIT_DONE();
#endif
}
void dpp1_degamma_ram_select(
......
......@@ -457,11 +457,19 @@ static bool optc1_enable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 1);
#ifdef CONFIG_DRM_AMD_DC_DMUB
REG_SEQ_START();
#endif
/* Enable CRTC */
REG_UPDATE_2(OTG_CONTROL,
OTG_DISABLE_POINT_CNTL, 3,
OTG_MASTER_EN, 1);
#ifdef CONFIG_DRM_AMD_DC_DMUB
REG_SEQ_SUBMIT();
REG_SEQ_WAIT_DONE();
#endif
return true;
}
......
......@@ -345,6 +345,11 @@ static void mpc20_program_ogam_pwl(
uint32_t i;
struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc);
#ifdef CONFIG_DRM_AMD_DC_DMUB
PERF_TRACE();
REG_SEQ_START();
#endif
for (i = 0 ; i < num; i++) {
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].red_reg);
REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].green_reg);
......@@ -463,6 +468,12 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id)
ASSERT(!mpc_disabled);
ASSERT(!mpc_idle);
}
#ifdef CONFIG_DRM_AMD_DC_DMUB
REG_SEQ_SUBMIT();
PERF_TRACE();
REG_SEQ_WAIT_DONE();
PERF_TRACE();
#endif
}
static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
......
......@@ -1678,6 +1678,9 @@ static bool construct(
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
#ifdef CONFIG_DRM_AMD_DC_DMUB
dc->caps.dmcub_support = true;
#endif
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
......
......@@ -40,6 +40,11 @@
#undef DEPRECATED
#ifdef CONFIG_DRM_AMD_DC_DMUB
struct dmub_srv;
struct dc_dmub_srv;
#endif
irq_handler_idx dm_register_interrupt(
struct dc_context *ctx,
struct dc_interrupt_params *int_params,
......@@ -139,6 +144,15 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
#ifdef CONFIG_DRM_AMD_DC_DMUB
struct dc_dmub_srv *dc_dmub_srv_create(struct dc *dc, struct dmub_srv *dmub);
void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv);
void reg_sequence_start_gather(const struct dc_context *ctx);
void reg_sequence_start_execute(const struct dc_context *ctx);
void reg_sequence_wait_done(const struct dc_context *ctx);
#endif
#define FD(reg_field) reg_field ## __SHIFT, \
reg_field ## _MASK
......
......@@ -485,4 +485,26 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
...);
#ifdef CONFIG_DRM_AMD_DC_DMUB
/* register offload macros
*
* instead of MMIO to register directly, in some cases we want
* to gather register sequence and execute the register sequence
* from another thread so we optimize time required for lengthy ops
*/
/* start gathering register sequence */
#define REG_SEQ_START() \
reg_sequence_start_gather(CTX)
/* start execution of register sequence gathered since REG_SEQ_START */
#define REG_SEQ_SUBMIT() \
reg_sequence_start_execute(CTX)
/* wait for the last REG_SEQ_SUBMIT to finish */
#define REG_SEQ_WAIT_DONE() \
reg_sequence_wait_done(CTX)
#endif
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */
......@@ -29,6 +29,7 @@
#include <linux/kgdb.h>
#include <linux/kref.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment