Commit 42f1a013 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-5.2' of git://people.freedesktop.org/~agd5f/linux into drm-next

- Add the amdgpu specific bits for timeline support
- Add internal interfaces for xgmi pstate support
- DC Z ordering fixes for planes
- Add support for NV12 planes in DC
- Add colorspace properties for planes in DC
- eDP optimizations if the GOP driver already initialized eDP
- DC bandwidth validation tracing support
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190419150034.3473-1-alexander.deucher@amd.com
parents 6e865c72 f55be0be
......@@ -436,6 +436,12 @@ struct amdgpu_cs_chunk {
void *kdata;
};
struct amdgpu_cs_post_dep {
struct drm_syncobj *syncobj;
struct dma_fence_chain *chain;
u64 point;
};
struct amdgpu_cs_parser {
struct amdgpu_device *adev;
struct drm_file *filp;
......@@ -465,8 +471,8 @@ struct amdgpu_cs_parser {
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
unsigned num_post_dep_syncobjs;
struct drm_syncobj **post_dep_syncobjs;
unsigned num_post_deps;
struct amdgpu_cs_post_dep *post_deps;
};
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
......
......@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
break;
default:
......@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
for (i = 0; i < parser->num_post_dep_syncobjs; i++)
drm_syncobj_put(parser->post_dep_syncobjs[i]);
kfree(parser->post_dep_syncobjs);
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
kfree(parser->post_deps[i].chain);
}
kfree(parser->post_deps);
dma_fence_put(parser->fence);
......@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
}
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
uint32_t handle)
uint32_t handle, u64 point,
u64 flags)
{
int r;
struct dma_fence *fence;
r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
if (r)
int r;
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
if (r) {
DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
handle, point, r);
return r;
}
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
dma_fence_put(fence);
......@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps;
int i, r;
struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
for (i = 0; i < num_deps; ++i) {
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
0, 0);
if (r)
return r;
}
return 0;
}
static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
unsigned num_deps;
int i, r;
syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
for (i = 0; i < num_deps; ++i) {
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
r = amdgpu_syncobj_lookup_and_add_to_sync(p,
syncobj_deps[i].handle,
syncobj_deps[i].point,
syncobj_deps[i].flags);
if (r)
return r;
}
return 0;
}
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps;
int i;
struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
p->post_dep_syncobjs = kmalloc_array(num_deps,
sizeof(struct drm_syncobj *),
GFP_KERNEL);
p->num_post_dep_syncobjs = 0;
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->num_post_deps = 0;
if (!p->post_dep_syncobjs)
if (!p->post_deps)
return -ENOMEM;
for (i = 0; i < num_deps; ++i) {
p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
if (!p->post_dep_syncobjs[i])
p->post_deps[i].syncobj =
drm_syncobj_find(p->filp, deps[i].handle);
if (!p->post_deps[i].syncobj)
return -EINVAL;
p->num_post_dep_syncobjs++;
p->post_deps[i].chain = NULL;
p->post_deps[i].point = 0;
p->num_post_deps++;
}
return 0;
}
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk
*chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
unsigned num_deps;
int i;
syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->num_post_deps = 0;
if (!p->post_deps)
return -ENOMEM;
for (i = 0; i < num_deps; ++i) {
struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
dep->chain = NULL;
if (syncobj_deps[i].point) {
dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
if (!dep->chain)
return -ENOMEM;
}
dep->syncobj = drm_syncobj_find(p->filp,
syncobj_deps[i].handle);
if (!dep->syncobj) {
kfree(dep->chain);
return -EINVAL;
}
dep->point = syncobj_deps[i].point;
p->num_post_deps++;
}
return 0;
}
......@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
chunk = &p->chunks[i];
if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
switch (chunk->chunk_id) {
case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
return r;
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
return r;
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
return r;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
if (r)
return r;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
if (r)
return r;
break;
}
}
......@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i)
drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
for (i = 0; i < p->num_post_deps; ++i) {
if (p->post_deps[i].chain && p->post_deps[i].point) {
drm_syncobj_add_point(p->post_deps[i].syncobj,
p->post_deps[i].chain,
p->fence, p->post_deps[i].point);
p->post_deps[i].chain = NULL;
} else {
drm_syncobj_replace_fence(p->post_deps[i].syncobj,
p->fence);
}
}
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
......
......@@ -75,9 +75,10 @@
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
* - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
* - 3.31.0 - Add support for per-flip tiling attribute changes with DC
* - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
*/
#define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 31
#define KMS_DRIVER_MINOR 32
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
......
......@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
struct dma_fence *old, **ptr;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if (fence == NULL)
......@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq, flags | AMDGPU_FENCE_FLAG_INT);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
rcu_read_lock();
old = dma_fence_get_rcu_safe(ptr);
rcu_read_unlock();
if (old) {
r = dma_fence_wait(old, false);
dma_fence_put(old);
if (r)
return r;
}
}
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
old = rcu_dereference_protected(*ptr, 1);
if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
dma_fence_wait(old, false);
}
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
......
......@@ -24,6 +24,7 @@
#include <linux/list.h>
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
static DEFINE_MUTEX(xgmi_mutex);
......@@ -216,7 +217,17 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
if (hive->pstate == pstate)
return 0;
/* Todo : sent the message to SMU for pstate change */
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
if (is_support_sw_smu(adev))
ret = smu_set_xgmi_pstate(&adev->smu, pstate);
if (ret)
dev_err(adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret);
return ret;
}
......
......@@ -701,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
struct dc_state *context,
bool fast_validate)
{
/*
* we want a breakdown of the various stages of validation, which the
* perf_trace macro doesn't support
*/
BW_VAL_TRACE_SETUP();
const struct resource_pool *pool = dc->res_pool;
struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
int i, input_idx;
......@@ -711,6 +718,9 @@ bool dcn_validate_bandwidth(
float bw_limit;
PERFORMANCE_TRACE_START();
BW_VAL_TRACE_COUNT();
if (dcn_bw_apply_registry_override(dc))
dcn_bw_sync_calcs_and_dml(dc);
......@@ -1013,8 +1023,11 @@ bool dcn_validate_bandwidth(
mode_support_and_system_configuration(v);
}
if (v->voltage_level != 5) {
BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
......@@ -1087,6 +1100,8 @@ bool dcn_validate_bandwidth(
break;
}
BW_VAL_TRACE_END_WATERMARKS();
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
......@@ -1177,6 +1192,10 @@ bool dcn_validate_bandwidth(
input_idx++;
}
} else if (v->voltage_level == number_of_states_plus_one) {
BW_VAL_TRACE_SKIP(fail);
} else if (fast_validate) {
BW_VAL_TRACE_SKIP(fast);
}
if (v->voltage_level == 0) {
......@@ -1196,6 +1215,7 @@ bool dcn_validate_bandwidth(
kernel_fpu_end();
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
if (bw_limit_pass && v->voltage_level != 5)
return true;
......
......@@ -597,7 +597,7 @@ uint32_t dc_link_bandwidth_kbps(
}
const struct dc_link_settings *dc_link_get_verified_link_cap(
const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link)
{
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
......
......@@ -1548,8 +1548,7 @@ bool dp_validate_mode_timing(
timing->v_addressable == (uint32_t) 480)
return true;
/* We always use verified link settings */
link_setting = dc_link_get_verified_link_cap(link);
link_setting = dc_link_get_link_cap(link);
/* TODO: DYNAMIC_VALIDATION needs to be implemented */
/*if (flags.DYNAMIC_VALIDATION == 1 &&
......@@ -2587,6 +2586,9 @@ void detect_edp_sink_caps(struct dc_link *link)
uint32_t entry;
uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
union lane_count_set lane_count_set = { {0} };
uint8_t link_bw_set;
uint8_t link_rate_set;
retrieve_link_cap(link);
link->dpcd_caps.edp_supported_link_rates_count = 0;
......@@ -2612,6 +2614,33 @@ void detect_edp_sink_caps(struct dc_link *link)
}
}
link->verified_link_cap = link->reported_link_cap;
// Read DPCD 00101h to find out the number of lanes currently set
core_link_read_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, sizeof(lane_count_set));
link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
// Read DPCD 00100h to find if standard link rates are set
core_link_read_dpcd(link, DP_LINK_BW_SET,
&link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
/* If standard link rates are not being used,
* Read DPCD 00115h to find the link rate set used
*/
core_link_read_dpcd(link, DP_LINK_RATE_SET,
&link_rate_set, sizeof(link_rate_set));
if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
link->cur_link_settings.link_rate =
link->dpcd_caps.edp_supported_link_rates[link_rate_set];
link->cur_link_settings.link_rate_set = link_rate_set;
link->cur_link_settings.use_link_rate_set = true;
}
} else {
link->cur_link_settings.link_rate = link_bw_set;
link->cur_link_settings.use_link_rate_set = false;
}
}
void dc_link_dp_enable_hpd(const struct dc_link *link)
......
......@@ -2067,12 +2067,14 @@ void dc_resource_state_construct(
* Checks HW resource availability and bandwidth requirement.
* @dc: dc struct for this driver
* @new_ctx: state to be validated
* @fast_validate: set to true if only yes/no to support matters
*
* Return: DC_OK if the result can be programmed. Otherwise, an error code.
*/
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx)
struct dc_state *new_ctx,
bool fast_validate)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
......@@ -2127,7 +2129,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx))
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
return result;
......
......@@ -211,7 +211,8 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos);
if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
return;
if (vpos >= vupdate_line)
return;
......
......@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
#define DC_VER "3.2.25"
#define DC_VER "3.2.26"
#define MAX_SURFACES 3
#define MAX_PLANES 6
......@@ -66,8 +66,27 @@ struct dc_plane_cap {
uint32_t blends_with_above : 1;
uint32_t blends_with_below : 1;
uint32_t per_pixel_alpha : 1;
uint32_t supports_argb8888 : 1;
uint32_t supports_nv12 : 1;
struct {
uint32_t argb8888 : 1;
uint32_t nv12 : 1;
uint32_t fp16 : 1;
} pixel_format_support;
// max upscaling factor x1000
// upscaling factors are always >= 1
// for example, 1080p -> 8K is 4.0, or 4000 raw value
struct {
uint32_t argb8888;
uint32_t nv12;
uint32_t fp16;
} max_upscale_factor;
// max downscale factor x1000
// downscale factors are always <= 1
// for example, 8K -> 1080p is 0.25, or 250 raw value
struct {
uint32_t argb8888;
uint32_t nv12;
uint32_t fp16;
} max_downscale_factor;
};
struct dc_caps {
......@@ -183,6 +202,7 @@ struct dc_config {
bool disable_disp_pll_sharing;
bool fbc_support;
bool optimize_edp_link_rate;
bool disable_fractional_pwm;
bool allow_seamless_boot_optimization;
};
......@@ -226,6 +246,57 @@ struct dc_clocks {
bool p_state_change_support;
};
struct dc_bw_validation_profile {
bool enable;
unsigned long long total_ticks;
unsigned long long voltage_level_ticks;
unsigned long long watermark_ticks;
unsigned long long rq_dlg_ticks;
unsigned long long total_count;
unsigned long long skip_fast_count;
unsigned long long skip_pass_count;
unsigned long long skip_fail_count;
};
#define BW_VAL_TRACE_SETUP() \
unsigned long long end_tick = 0; \
unsigned long long voltage_level_tick = 0; \
unsigned long long watermark_tick = 0; \
unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \
dm_get_timestamp(dc->ctx) : 0
#define BW_VAL_TRACE_COUNT() \
if (dc->debug.bw_val_profile.enable) \
dc->debug.bw_val_profile.total_count++
#define BW_VAL_TRACE_SKIP(status) \
if (dc->debug.bw_val_profile.enable) { \
if (!voltage_level_tick) \
voltage_level_tick = dm_get_timestamp(dc->ctx); \
dc->debug.bw_val_profile.skip_ ## status ## _count++; \
}
#define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \
if (dc->debug.bw_val_profile.enable) \
voltage_level_tick = dm_get_timestamp(dc->ctx)
#define BW_VAL_TRACE_END_WATERMARKS() \
if (dc->debug.bw_val_profile.enable) \
watermark_tick = dm_get_timestamp(dc->ctx)
#define BW_VAL_TRACE_FINISH() \
if (dc->debug.bw_val_profile.enable) { \
end_tick = dm_get_timestamp(dc->ctx); \
dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \
dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \
if (watermark_tick) { \
dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \
dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \
} \
}
struct dc_debug_options {
enum visual_confirm visual_confirm;
bool sanity_checks;
......@@ -279,6 +350,7 @@ struct dc_debug_options {
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int force_fclk_khz;
bool disable_tri_buf;
struct dc_bw_validation_profile bw_val_profile;
};
struct dc_debug_data {
......@@ -638,9 +710,14 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
/*
* fast_validate: we return after determining if we can support the new state,
* but before we populate the programming info
*/
enum dc_status dc_validate_global_state(
struct dc *dc,
struct dc_state *new_ctx);
struct dc_state *new_ctx,
bool fast_validate);
void dc_resource_state_construct(
......
......@@ -250,7 +250,7 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting);
const struct dc_link_settings *dc_link_get_verified_link_cap(
const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link);
bool dc_submit_i2c(
......
......@@ -50,6 +50,7 @@
#define MCP_ABM_LEVEL_SET 0x65
#define MCP_ABM_PIPE_SET 0x66
#define MCP_BL_SET 0x67
#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */
#define MCP_DISABLE_ABM_IMMEDIATELY 255
......@@ -390,6 +391,23 @@ static bool dce_abm_init_backlight(struct abm *abm)
REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0);
/* Wait until microcontroller is ready to process interrupt */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
/* Set PWM fractional enable/disable */
value = (abm->ctx->dc->config.disable_fractional_pwm == false) ? 1 : 0;
REG_WRITE(MASTER_COMM_DATA_REG1, value);
/* Set command to enable or disable fractional PWM microcontroller */
REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
MCP_BL_SET_PWM_FRAC);
/* Notify microcontroller of new command */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
/* Ensure command has been executed before continuing */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
return true;
}
......
......@@ -380,7 +380,24 @@ static const struct resource_caps res_cap = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
};
#define CTX ctx
......@@ -761,7 +778,8 @@ static enum dc_status build_mapped_resource(
bool dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
struct dc_state *context,
bool fast_validate)
{
int i;
bool at_least_one_pipe = false;
......
......@@ -397,14 +397,48 @@ static const struct dc_plane_cap plane_cap = {
.blends_with_below = true,
.blends_with_above = true,
.per_pixel_alpha = 1,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
};
static const struct dc_plane_cap underlay_plane_cap = {
.type = DC_PLANE_TYPE_DCE_UNDERLAY,
.blends_with_above = true,
.per_pixel_alpha = 1,
.supports_nv12 = true
.pixel_format_support = {
.argb8888 = false,
.nv12 = true,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 1,
.nv12 = 16000,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 1,
.nv12 = 250,
.fp16 = 1
}
};
#define CTX ctx
......@@ -869,7 +903,8 @@ static enum dc_status build_mapped_resource(
static bool dce110_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
struct dc_state *context,
bool fast_validate)
{
bool result = false;
......
......@@ -399,7 +399,24 @@ static const struct resource_caps polaris_11_resource_cap = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
};
#define CTX ctx
......@@ -809,7 +826,8 @@ static enum dc_status build_mapped_resource(
bool dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
struct dc_state *context,
bool fast_validate)
{
bool result = false;
......
......@@ -44,7 +44,8 @@ enum dc_status dce112_validate_with_context(
bool dce112_validate_bandwidth(
struct dc *dc,
struct dc_state *context);
struct dc_state *context,
bool fast_validate);
enum dc_status dce112_add_stream_to_ctx(
struct dc *dc,
......
......@@ -456,7 +456,24 @@ static const struct resource_caps res_cap = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
};
static const struct dc_debug_options debug_defaults = {
......
......@@ -389,7 +389,24 @@ static const struct resource_caps res_cap_83 = {
static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
};
static const struct dce_dmcu_registers dmcu_regs = {
......@@ -795,7 +812,8 @@ static void destruct(struct dce110_resource_pool *pool)
bool dce80_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
struct dc_state *context,
bool fast_validate)
{
int i;
bool at_least_one_pipe = false;
......
......@@ -247,7 +247,7 @@
.field_name = reg_name ## __ ## field_name ## post_fix
/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
......@@ -331,7 +331,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
......@@ -339,7 +338,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
......@@ -373,6 +371,11 @@
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
/* Mask/shift struct generation macro for ASICs with VM */
#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
......
......@@ -521,8 +521,24 @@ static const struct dc_plane_cap plane_cap = {
.blends_with_above = true,
.blends_with_below = true,
.per_pixel_alpha = true,
.supports_argb8888 = true,
.supports_nv12 = true
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
.fp16 = true
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 250,
.fp16 = 1
}
};
static const struct dc_debug_options debug_defaults_drv = {
......
......@@ -97,7 +97,8 @@ struct resource_funcs {
const struct encoder_init_data *init);
bool (*validate_bandwidth)(
struct dc *dc,
struct dc_state *context);
struct dc_state *context,
bool fast_validate);
enum dc_status (*validate_global)(
struct dc *dc,
......
......@@ -621,7 +621,8 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth(
struct dc *dc,
struct dc_state *context);
struct dc_state *context,
bool fast_validate);
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
......
......@@ -540,6 +540,8 @@ struct smu_funcs
int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
};
#define smu_init_microcode(smu) \
......@@ -723,6 +725,8 @@ struct smu_funcs
((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0)
#define smu_get_mclk(smu, low) \
((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0)
#define smu_set_xgmi_pstate(smu, pstate) \
((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
......
......@@ -1893,6 +1893,13 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
return ret;
}
static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
/* send msg to SMU to set pstate */
return 0;
}
static const struct smu_funcs smu_v11_0_funcs = {
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
......@@ -1947,6 +1954,7 @@ static const struct smu_funcs smu_v11_0_funcs = {
.get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
};
void smu_v11_0_set_smu_funcs(struct smu_context *smu)
......@@ -1954,7 +1962,6 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
smu->funcs = &smu_v11_0_funcs;
switch (adev->asic_type) {
case CHIP_VEGA20:
vega20_set_ppt_funcs(smu);
......
......@@ -528,6 +528,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
......@@ -608,6 +610,12 @@ struct drm_amdgpu_cs_chunk_sem {
__u32 handle;
};
struct drm_amdgpu_cs_chunk_syncobj {
__u32 handle;
__u32 flags;
__u64 point;
};
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment