Commit 86bc2219 authored by Wayne Lin's avatar Wayne Lin Committed by Alex Deucher

drm/amd/display: Support crc on specific region

[Why]
To support feature that calculates CRTC CRC value on specific
region (crc window).

[How]
1. Use debugfs to specify crtc crc window
2. Use vline0 IRQ to write crtc crc window
Signed-off-by: default avatarWayne Lin <Wayne.Lin@amd.com>
Reviewed-by: default avatarNicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
Acked-by: default avatarEryk Brol <eryk.brol@amd.com>
Acked-by: default avatarRodrigo Siqueira <Rodrigo.Siqueira@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8e7b6fee
......@@ -38,4 +38,18 @@ config DEBUG_KERNEL_DC
help
Choose this option if you want to hit kdgb_break in assert.
config DRM_AMD_SECURE_DISPLAY
bool "Enable secure display support"
default n
depends on DEBUG_FS
depends on DRM_AMD_DC_DCN
help
Choose this option if you want to
support secure display
This option enables the calculation
of crc of specific region via debugfs.
Cooperate with specific DMCU FW.
endmenu
......@@ -581,6 +581,31 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
/**
* dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
* DCN generation ASICs
* @interrupt params - interrupt parameters
*
* Used to set crc window/read out crc value at vertical line 0 position
*/
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
if (!acrtc)
return;
amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
}
#endif
#endif
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
......@@ -2957,6 +2982,34 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
adev, &int_params, dm_crtc_high_irq, c_irq_params);
}
/* Use otg vertical line interrupt */
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
for (i = DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL;
i <= DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL
+ adev->mode_info.num_crtc - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vline0_irq);
if (r) {
DRM_ERROR("Failed to add vline0 irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.vline0_params[int_params.irq_source
- DC_IRQ_SOURCE_DC1_VLINE0];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
}
#endif
/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
* to trigger at end of each vblank, regardless of state of the lock,
......@@ -5512,12 +5565,20 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
{
crtc_debugfs_init(crtc);
return 0;
}
#endif
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
......@@ -5603,6 +5664,9 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
.late_register = amdgpu_dm_crtc_late_register,
#endif
};
static enum drm_connector_status
......@@ -7502,8 +7566,19 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
adev,
&adev->pageflip_irq,
irq_type);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_irq_get(
adev,
&adev->vline0_irq,
irq_type);
#endif
} else {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_irq_put(
adev,
&adev->vline0_irq,
irq_type);
#endif
amdgpu_irq_put(
adev,
&adev->pageflip_irq,
......@@ -8650,9 +8725,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
bool configure_crc = false;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
#endif
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (new_crtc_state->active &&
......@@ -8673,10 +8748,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state,
cur_crc_src);
configure_crc = true;
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc))
configure_crc = false;
#endif
}
if (configure_crc)
amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, cur_crc_src);
#endif
}
}
......
......@@ -311,6 +311,15 @@ struct amdgpu_display_manager {
struct common_irq_params
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
/**
* @vline0_params:
*
* OTG vertical interrupt0 IRQ parameters, passed to registered
* handlers when triggered.
*/
struct common_irq_params
vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1];
/**
* @vupdate_params:
*
......
......@@ -81,6 +81,36 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
return pipe_crc_sources;
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
{
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
acrtc->dm_irq_params.crc_window.x_start = 0;
acrtc->dm_irq_params.crc_window.y_start = 0;
acrtc->dm_irq_params.crc_window.x_end = 0;
acrtc->dm_irq_params.crc_window.y_end = 0;
acrtc->dm_irq_params.crc_window.activated = false;
acrtc->dm_irq_params.crc_window.update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
}
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
{
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
bool ret = false;
spin_lock_irq(&drm_dev->event_lock);
ret = acrtc->dm_irq_params.crc_window.activated;
spin_unlock_irq(&drm_dev->event_lock);
return ret;
}
#endif
int
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
size_t *values_cnt)
......@@ -234,6 +264,10 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
amdgpu_dm_set_crc_window_default(crtc);
#endif
if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) {
ret = -EINVAL;
goto cleanup;
......@@ -336,3 +370,62 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
drm_crtc_accurate_vblank_count(crtc), crcs);
}
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
{
struct dc_stream_state *stream_state;
struct drm_device *drm_dev = NULL;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
struct amdgpu_crtc *acrtc = NULL;
struct crc_params *crc_window = NULL, tmp_window;
unsigned long flags;
if (crtc == NULL)
return;
acrtc = to_amdgpu_crtc(crtc);
drm_dev = crtc->dev;
spin_lock_irqsave(&drm_dev->event_lock, flags);
stream_state = acrtc->dm_irq_params.stream;
cur_crc_src = acrtc->dm_irq_params.crc_src;
/* Early return if CRC capture is not enabled. */
if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
goto cleanup;
if (dm_is_crc_source_crtc(cur_crc_src)) {
if (acrtc->dm_irq_params.crc_window.activated) {
if (acrtc->dm_irq_params.crc_window.update_win) {
crc_window = &tmp_window;
tmp_window.windowa_x_start =
acrtc->dm_irq_params.crc_window.x_start;
tmp_window.windowa_y_start =
acrtc->dm_irq_params.crc_window.y_start;
tmp_window.windowa_x_end =
acrtc->dm_irq_params.crc_window.x_end;
tmp_window.windowa_y_end =
acrtc->dm_irq_params.crc_window.y_end;
tmp_window.windowb_x_start =
acrtc->dm_irq_params.crc_window.x_start;
tmp_window.windowb_y_start =
acrtc->dm_irq_params.crc_window.y_start;
tmp_window.windowb_x_end =
acrtc->dm_irq_params.crc_window.x_end;
tmp_window.windowb_y_end =
acrtc->dm_irq_params.crc_window.y_end;
dc_stream_configure_crc(stream_state->ctx->dc,
stream_state, crc_window, true, true);
acrtc->dm_irq_params.crc_window.update_win = false;
}
}
}
cleanup:
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
#endif
......@@ -39,6 +39,19 @@ enum amdgpu_dm_pipe_crc_source {
AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1,
};
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
struct crc_window_parm {
uint16_t x_start;
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
/* CRC windwo is activated or not*/
bool activated;
/* Update crc window during vertical blank or not */
bool update_win;
};
#endif
static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source)
{
return (source > AMDGPU_DM_PIPE_CRC_SOURCE_NONE) &&
......@@ -64,4 +77,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#define amdgpu_dm_crtc_handle_crc_irq(x)
#endif
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
#endif
#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
......@@ -2538,6 +2538,202 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
#endif
}
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
/*
* Set crc window coordinate x start
*/
static int crc_win_x_start_set(void *data, u64 val)
{
struct drm_crtc *crtc = data;
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
acrtc->dm_irq_params.crc_window.x_start = (uint16_t) val;
acrtc->dm_irq_params.crc_window.update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
}
/*
* Get crc window coordinate x start
*/
static int crc_win_x_start_get(void *data, u64 *val)
{
struct drm_crtc *crtc = data;
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
*val = acrtc->dm_irq_params.crc_window.x_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_start_fops, crc_win_x_start_get,
crc_win_x_start_set, "%llu\n");
/*
* Set crc window coordinate y start
*/
static int crc_win_y_start_set(void *data, u64 val)
{
struct drm_crtc *crtc = data;
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
acrtc->dm_irq_params.crc_window.y_start = (uint16_t) val;
acrtc->dm_irq_params.crc_window.update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
}
/*
* Get crc window coordinate y start
*/
static int crc_win_y_start_get(void *data, u64 *val)
{
struct drm_crtc *crtc = data;
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
*val = acrtc->dm_irq_params.crc_window.y_start;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_start_fops, crc_win_y_start_get,
crc_win_y_start_set, "%llu\n");
/*
* Set crc window coordinate x end
*/
static int crc_win_x_end_set(void *data, u64 val)
{
struct drm_crtc *crtc = data;
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
acrtc->dm_irq_params.crc_window.x_end = (uint16_t) val;
acrtc->dm_irq_params.crc_window.update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
}
/*
* Get crc window coordinate x end
*/
static int crc_win_x_end_get(void *data, u64 *val)
{
struct drm_crtc *crtc = data;
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
*val = acrtc->dm_irq_params.crc_window.x_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_end_fops, crc_win_x_end_get,
crc_win_x_end_set, "%llu\n");
/*
* Set crc window coordinate y end
*/
static int crc_win_y_end_set(void *data, u64 val)
{
struct drm_crtc *crtc = data;
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
acrtc->dm_irq_params.crc_window.y_end = (uint16_t) val;
acrtc->dm_irq_params.crc_window.update_win = false;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
}
/*
* Get crc window coordinate y end
*/
static int crc_win_y_end_get(void *data, u64 *val)
{
struct drm_crtc *crtc = data;
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
spin_lock_irq(&drm_dev->event_lock);
*val = acrtc->dm_irq_params.crc_window.y_end;
spin_unlock_irq(&drm_dev->event_lock);
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get,
crc_win_y_end_set, "%llu\n");
/*
* Trigger to commit crc window
*/
static int crc_win_update_set(void *data, u64 val)
{
struct drm_crtc *crtc = data;
struct drm_device *drm_dev = crtc->dev;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
if (val) {
spin_lock_irq(&drm_dev->event_lock);
acrtc->dm_irq_params.crc_window.activated = true;
acrtc->dm_irq_params.crc_window.update_win = true;
spin_unlock_irq(&drm_dev->event_lock);
}
return 0;
}
/*
* Get crc window update flag
*/
static int crc_win_update_get(void *data, u64 *val)
{
*val = 0;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get,
crc_win_update_set, "%llu\n");
void crtc_debugfs_init(struct drm_crtc *crtc)
{
struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry);
if (!dir)
return;
debugfs_create_file_unsafe("crc_win_x_start", 0644, dir, crtc,
&crc_win_x_start_fops);
debugfs_create_file_unsafe("crc_win_y_start", 0644, dir, crtc,
&crc_win_y_start_fops);
debugfs_create_file_unsafe("crc_win_x_end", 0644, dir, crtc,
&crc_win_x_end_fops);
debugfs_create_file_unsafe("crc_win_y_end", 0644, dir, crtc,
&crc_win_y_end_fops);
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
&crc_win_update_fops);
}
#endif
/*
* Writes DTN log state to the user supplied buffer.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
......
......@@ -31,5 +31,8 @@
void connector_debugfs_init(struct amdgpu_dm_connector *connector);
void dtn_debugfs_init(struct amdgpu_device *adev);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
void crtc_debugfs_init(struct drm_crtc *crtc);
#endif
#endif
......@@ -37,6 +37,9 @@ struct dm_irq_params {
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source crc_src;
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
struct crc_window_parm crc_window;
#endif
#endif
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment