Commit 42f1a013 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-next-5.2' of git://people.freedesktop.org/~agd5f/linux into drm-next

- Add the amdgpu specific bits for timeline support
- Add internal interfaces for xgmi pstate support
- DC Z ordering fixes for planes
- Add support for NV12 planes in DC
- Add colorspace properties for planes in DC
- eDP optimizations if the GOP driver already initialized eDP
- DC bandwidth validation tracing support
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190419150034.3473-1-alexander.deucher@amd.com
parents 6e865c72 f55be0be
...@@ -436,6 +436,12 @@ struct amdgpu_cs_chunk { ...@@ -436,6 +436,12 @@ struct amdgpu_cs_chunk {
void *kdata; void *kdata;
}; };
struct amdgpu_cs_post_dep {
struct drm_syncobj *syncobj;
struct dma_fence_chain *chain;
u64 point;
};
struct amdgpu_cs_parser { struct amdgpu_cs_parser {
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct drm_file *filp; struct drm_file *filp;
...@@ -465,8 +471,8 @@ struct amdgpu_cs_parser { ...@@ -465,8 +471,8 @@ struct amdgpu_cs_parser {
/* user fence */ /* user fence */
struct amdgpu_bo_list_entry uf_entry; struct amdgpu_bo_list_entry uf_entry;
unsigned num_post_dep_syncobjs; unsigned num_post_deps;
struct drm_syncobj **post_dep_syncobjs; struct amdgpu_cs_post_dep *post_deps;
}; };
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
......
...@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs ...@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
case AMDGPU_CHUNK_ID_SYNCOBJ_IN: case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
break; break;
default: default:
...@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, ...@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
ttm_eu_backoff_reservation(&parser->ticket, ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated); &parser->validated);
for (i = 0; i < parser->num_post_dep_syncobjs; i++) for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_dep_syncobjs[i]); drm_syncobj_put(parser->post_deps[i].syncobj);
kfree(parser->post_dep_syncobjs); kfree(parser->post_deps[i].chain);
}
kfree(parser->post_deps);
dma_fence_put(parser->fence); dma_fence_put(parser->fence);
...@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, ...@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
} }
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
uint32_t handle) uint32_t handle, u64 point,
u64 flags)
{ {
int r;
struct dma_fence *fence; struct dma_fence *fence;
r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence); int r;
if (r)
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
if (r) {
DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
handle, point, r);
return r; return r;
}
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
dma_fence_put(fence); dma_fence_put(fence);
...@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p, ...@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p, static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk) struct amdgpu_cs_chunk *chunk)
{ {
struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps; unsigned num_deps;
int i, r; int i, r;
struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 / num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem); sizeof(struct drm_amdgpu_cs_chunk_sem);
for (i = 0; i < num_deps; ++i) {
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
0, 0);
if (r)
return r;
}
return 0;
}
static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
unsigned num_deps;
int i, r;
syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
for (i = 0; i < num_deps; ++i) { for (i = 0; i < num_deps; ++i) {
r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle); r = amdgpu_syncobj_lookup_and_add_to_sync(p,
syncobj_deps[i].handle,
syncobj_deps[i].point,
syncobj_deps[i].flags);
if (r) if (r)
return r; return r;
} }
return 0; return 0;
} }
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p, static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk) struct amdgpu_cs_chunk *chunk)
{ {
struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps; unsigned num_deps;
int i; int i;
struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata; deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 / num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem); sizeof(struct drm_amdgpu_cs_chunk_sem);
p->post_dep_syncobjs = kmalloc_array(num_deps, p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
sizeof(struct drm_syncobj *), GFP_KERNEL);
GFP_KERNEL); p->num_post_deps = 0;
p->num_post_dep_syncobjs = 0;
if (!p->post_dep_syncobjs) if (!p->post_deps)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < num_deps; ++i) { for (i = 0; i < num_deps; ++i) {
p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle); p->post_deps[i].syncobj =
if (!p->post_dep_syncobjs[i]) drm_syncobj_find(p->filp, deps[i].handle);
if (!p->post_deps[i].syncobj)
return -EINVAL; return -EINVAL;
p->num_post_dep_syncobjs++; p->post_deps[i].chain = NULL;
p->post_deps[i].point = 0;
p->num_post_deps++;
} }
return 0;
}
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk
*chunk)
{
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
unsigned num_deps;
int i;
syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
GFP_KERNEL);
p->num_post_deps = 0;
if (!p->post_deps)
return -ENOMEM;
for (i = 0; i < num_deps; ++i) {
struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
dep->chain = NULL;
if (syncobj_deps[i].point) {
dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
if (!dep->chain)
return -ENOMEM;
}
dep->syncobj = drm_syncobj_find(p->filp,
syncobj_deps[i].handle);
if (!dep->syncobj) {
kfree(dep->chain);
return -EINVAL;
}
dep->point = syncobj_deps[i].point;
p->num_post_deps++;
}
return 0; return 0;
} }
...@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, ...@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
chunk = &p->chunks[i]; chunk = &p->chunks[i];
if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES || switch (chunk->chunk_id) {
chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { case AMDGPU_CHUNK_ID_DEPENDENCIES:
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk); r = amdgpu_cs_process_fence_dep(p, chunk);
if (r) if (r)
return r; return r;
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) { break;
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk); r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r) if (r)
return r; return r;
} else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) { break;
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk); r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r) if (r)
return r; return r;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
if (r)
return r;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
if (r)
return r;
break;
} }
} }
...@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) ...@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{ {
int i; int i;
for (i = 0; i < p->num_post_dep_syncobjs; ++i) for (i = 0; i < p->num_post_deps; ++i) {
drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence); if (p->post_deps[i].chain && p->post_deps[i].point) {
drm_syncobj_add_point(p->post_deps[i].syncobj,
p->post_deps[i].chain,
p->fence, p->post_deps[i].point);
p->post_deps[i].chain = NULL;
} else {
drm_syncobj_replace_fence(p->post_deps[i].syncobj,
p->fence);
}
}
} }
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
......
...@@ -75,9 +75,10 @@ ...@@ -75,9 +75,10 @@
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
* - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE. * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
* - 3.31.0 - Add support for per-flip tiling attribute changes with DC * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
* - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 31 #define KMS_DRIVER_MINOR 32
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
......
...@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, ...@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence; struct amdgpu_fence *fence;
struct dma_fence *old, **ptr; struct dma_fence __rcu **ptr;
uint32_t seq; uint32_t seq;
int r;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if (fence == NULL) if (fence == NULL)
...@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, ...@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq, flags | AMDGPU_FENCE_FLAG_INT); seq, flags | AMDGPU_FENCE_FLAG_INT);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
rcu_read_lock();
old = dma_fence_get_rcu_safe(ptr);
rcu_read_unlock();
if (old) {
r = dma_fence_wait(old, false);
dma_fence_put(old);
if (r)
return r;
}
}
/* This function can't be called concurrently anyway, otherwise /* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer. * emitting the fence would mess up the hardware ring buffer.
*/ */
old = rcu_dereference_protected(*ptr, 1);
if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
dma_fence_wait(old, false);
}
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base)); rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base; *f = &fence->base;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/list.h> #include <linux/list.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_xgmi.h" #include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
static DEFINE_MUTEX(xgmi_mutex); static DEFINE_MUTEX(xgmi_mutex);
...@@ -216,7 +217,17 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) ...@@ -216,7 +217,17 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
if (hive->pstate == pstate) if (hive->pstate == pstate)
return 0; return 0;
/* Todo : sent the message to SMU for pstate change */
dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
if (is_support_sw_smu(adev))
ret = smu_set_xgmi_pstate(&adev->smu, pstate);
if (ret)
dev_err(adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
adev->gmc.xgmi.node_id,
adev->gmc.xgmi.hive_id, ret);
return ret; return ret;
} }
......
...@@ -2085,7 +2085,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) ...@@ -2085,7 +2085,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (!plane->blends_with_above || !plane->blends_with_below) if (!plane->blends_with_above || !plane->blends_with_below)
continue; continue;
if (!plane->supports_argb8888) if (!plane->pixel_format_support.argb8888)
continue; continue;
if (initialize_plane(dm, NULL, primary_planes + i, if (initialize_plane(dm, NULL, primary_planes + i,
...@@ -2385,56 +2385,63 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { ...@@ -2385,56 +2385,63 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
.destroy = amdgpu_dm_encoder_destroy, .destroy = amdgpu_dm_encoder_destroy,
}; };
static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
struct dc_plane_state *plane_state) static int fill_dc_scaling_info(const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info)
{ {
plane_state->src_rect.x = state->src_x >> 16; int scale_w, scale_h;
plane_state->src_rect.y = state->src_y >> 16;
/* we ignore the mantissa for now and do not deal with floating pixels :( */
plane_state->src_rect.width = state->src_w >> 16;
if (plane_state->src_rect.width == 0) memset(scaling_info, 0, sizeof(*scaling_info));
return false;
plane_state->src_rect.height = state->src_h >> 16; /* Source is fixed 16.16 but we ignore mantissa for now... */
if (plane_state->src_rect.height == 0) scaling_info->src_rect.x = state->src_x >> 16;
return false; scaling_info->src_rect.y = state->src_y >> 16;
plane_state->dst_rect.x = state->crtc_x; scaling_info->src_rect.width = state->src_w >> 16;
plane_state->dst_rect.y = state->crtc_y; if (scaling_info->src_rect.width == 0)
return -EINVAL;
scaling_info->src_rect.height = state->src_h >> 16;
if (scaling_info->src_rect.height == 0)
return -EINVAL;
scaling_info->dst_rect.x = state->crtc_x;
scaling_info->dst_rect.y = state->crtc_y;
if (state->crtc_w == 0) if (state->crtc_w == 0)
return false; return -EINVAL;
plane_state->dst_rect.width = state->crtc_w; scaling_info->dst_rect.width = state->crtc_w;
if (state->crtc_h == 0) if (state->crtc_h == 0)
return false; return -EINVAL;
plane_state->dst_rect.height = state->crtc_h; scaling_info->dst_rect.height = state->crtc_h;
plane_state->clip_rect = plane_state->dst_rect; /* DRM doesn't specify clipping on destination output. */
scaling_info->clip_rect = scaling_info->dst_rect;
switch (state->rotation & DRM_MODE_ROTATE_MASK) { /* TODO: Validate scaling per-format with DC plane caps */
case DRM_MODE_ROTATE_0: scale_w = scaling_info->dst_rect.width * 1000 /
plane_state->rotation = ROTATION_ANGLE_0; scaling_info->src_rect.width;
break;
case DRM_MODE_ROTATE_90:
plane_state->rotation = ROTATION_ANGLE_90;
break;
case DRM_MODE_ROTATE_180:
plane_state->rotation = ROTATION_ANGLE_180;
break;
case DRM_MODE_ROTATE_270:
plane_state->rotation = ROTATION_ANGLE_270;
break;
default:
plane_state->rotation = ROTATION_ANGLE_0;
break;
}
return true; if (scale_w < 250 || scale_w > 16000)
return -EINVAL;
scale_h = scaling_info->dst_rect.height * 1000 /
scaling_info->src_rect.height;
if (scale_h < 250 || scale_h > 16000)
return -EINVAL;
/*
* The "scaling_quality" can be ignored for now, quality = 0 has DC
* assume reasonable defaults based on the format.
*/
return 0;
} }
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags) uint64_t *tiling_flags)
{ {
...@@ -2463,12 +2470,16 @@ static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags) ...@@ -2463,12 +2470,16 @@ static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
return offset ? (address + offset * 256) : 0; return offset ? (address + offset * 256) : 0;
} }
static int fill_plane_dcc_attributes(struct amdgpu_device *adev, static int
const struct amdgpu_framebuffer *afb, fill_plane_dcc_attributes(struct amdgpu_device *adev,
const struct dc_plane_state *plane_state, const struct amdgpu_framebuffer *afb,
struct dc_plane_dcc_param *dcc, const enum surface_pixel_format format,
struct dc_plane_address *address, const enum dc_rotation_angle rotation,
uint64_t info) const union plane_size *plane_size,
const union dc_tiling_info *tiling_info,
const uint64_t info,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address)
{ {
struct dc *dc = adev->dm.dc; struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input; struct dc_dcc_surface_param input;
...@@ -2483,24 +2494,20 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev, ...@@ -2483,24 +2494,20 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev,
if (!offset) if (!offset)
return 0; return 0;
if (plane_state->address.type != PLN_ADDR_TYPE_GRAPHICS) if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0; return 0;
if (!dc->cap_funcs.get_dcc_compression_cap) if (!dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL; return -EINVAL;
input.format = plane_state->format; input.format = format;
input.surface_size.width = input.surface_size.width = plane_size->grph.surface_size.width;
plane_state->plane_size.grph.surface_size.width; input.surface_size.height = plane_size->grph.surface_size.height;
input.surface_size.height = input.swizzle_mode = tiling_info->gfx9.swizzle;
plane_state->plane_size.grph.surface_size.height;
input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle;
if (plane_state->rotation == ROTATION_ANGLE_0 || if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
plane_state->rotation == ROTATION_ANGLE_180)
input.scan = SCAN_DIRECTION_HORIZONTAL; input.scan = SCAN_DIRECTION_HORIZONTAL;
else if (plane_state->rotation == ROTATION_ANGLE_90 || else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
plane_state->rotation == ROTATION_ANGLE_270)
input.scan = SCAN_DIRECTION_VERTICAL; input.scan = SCAN_DIRECTION_VERTICAL;
if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
...@@ -2525,28 +2532,54 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev, ...@@ -2525,28 +2532,54 @@ static int fill_plane_dcc_attributes(struct amdgpu_device *adev,
} }
static int static int
fill_plane_tiling_attributes(struct amdgpu_device *adev, fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb, const struct amdgpu_framebuffer *afb,
const struct dc_plane_state *plane_state, const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
union dc_tiling_info *tiling_info, union dc_tiling_info *tiling_info,
union plane_size *plane_size,
struct dc_plane_dcc_param *dcc, struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address, struct dc_plane_address *address)
uint64_t tiling_flags)
{ {
const struct drm_framebuffer *fb = &afb->base;
int ret; int ret;
memset(tiling_info, 0, sizeof(*tiling_info)); memset(tiling_info, 0, sizeof(*tiling_info));
memset(plane_size, 0, sizeof(*plane_size));
memset(dcc, 0, sizeof(*dcc)); memset(dcc, 0, sizeof(*dcc));
memset(address, 0, sizeof(*address)); memset(address, 0, sizeof(*address));
if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
plane_size->grph.surface_size.x = 0;
plane_size->grph.surface_size.y = 0;
plane_size->grph.surface_size.width = fb->width;
plane_size->grph.surface_size.height = fb->height;
plane_size->grph.surface_pitch =
fb->pitches[0] / fb->format->cpp[0];
address->type = PLN_ADDR_TYPE_GRAPHICS; address->type = PLN_ADDR_TYPE_GRAPHICS;
address->grph.addr.low_part = lower_32_bits(afb->address); address->grph.addr.low_part = lower_32_bits(afb->address);
address->grph.addr.high_part = upper_32_bits(afb->address); address->grph.addr.high_part = upper_32_bits(afb->address);
} else { } else {
const struct drm_framebuffer *fb = &afb->base;
uint64_t chroma_addr = afb->address + fb->offsets[1]; uint64_t chroma_addr = afb->address + fb->offsets[1];
plane_size->video.luma_size.x = 0;
plane_size->video.luma_size.y = 0;
plane_size->video.luma_size.width = fb->width;
plane_size->video.luma_size.height = fb->height;
plane_size->video.luma_pitch =
fb->pitches[0] / fb->format->cpp[0];
plane_size->video.chroma_size.x = 0;
plane_size->video.chroma_size.y = 0;
/* TODO: set these based on surface format */
plane_size->video.chroma_size.width = fb->width / 2;
plane_size->video.chroma_size.height = fb->height / 2;
plane_size->video.chroma_pitch =
fb->pitches[1] / fb->format->cpp[1];
address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
address->video_progressive.luma_addr.low_part = address->video_progressive.luma_addr.low_part =
lower_32_bits(afb->address); lower_32_bits(afb->address);
...@@ -2607,8 +2640,9 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev, ...@@ -2607,8 +2640,9 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev,
AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE); AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
tiling_info->gfx9.shaderEnable = 1; tiling_info->gfx9.shaderEnable = 1;
ret = fill_plane_dcc_attributes(adev, afb, plane_state, dcc, ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
address, tiling_flags); plane_size, tiling_info,
tiling_flags, dcc, address);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2616,112 +2650,8 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev, ...@@ -2616,112 +2650,8 @@ fill_plane_tiling_attributes(struct amdgpu_device *adev,
return 0; return 0;
} }
static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
struct dc_plane_state *plane_state,
const struct amdgpu_framebuffer *amdgpu_fb)
{
uint64_t tiling_flags;
const struct drm_framebuffer *fb = &amdgpu_fb->base;
int ret = 0;
struct drm_format_name_buf format_name;
ret = get_fb_info(
amdgpu_fb,
&tiling_flags);
if (ret)
return ret;
switch (fb->format->format) {
case DRM_FORMAT_C8:
plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
break;
case DRM_FORMAT_RGB565:
plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
break;
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
break;
case DRM_FORMAT_NV21:
plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
break;
case DRM_FORMAT_NV12:
plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
drm_get_format_name(fb->format->format, &format_name));
return -EINVAL;
}
memset(&plane_state->address, 0, sizeof(plane_state->address));
if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
plane_state->plane_size.grph.surface_size.x = 0;
plane_state->plane_size.grph.surface_size.y = 0;
plane_state->plane_size.grph.surface_size.width = fb->width;
plane_state->plane_size.grph.surface_size.height = fb->height;
plane_state->plane_size.grph.surface_pitch =
fb->pitches[0] / fb->format->cpp[0];
/* TODO: unhardcode */
plane_state->color_space = COLOR_SPACE_SRGB;
} else {
plane_state->plane_size.video.luma_size.x = 0;
plane_state->plane_size.video.luma_size.y = 0;
plane_state->plane_size.video.luma_size.width = fb->width;
plane_state->plane_size.video.luma_size.height = fb->height;
plane_state->plane_size.video.luma_pitch =
fb->pitches[0] / fb->format->cpp[0];
plane_state->plane_size.video.chroma_size.x = 0;
plane_state->plane_size.video.chroma_size.y = 0;
/* TODO: set these based on surface format */
plane_state->plane_size.video.chroma_size.width = fb->width / 2;
plane_state->plane_size.video.chroma_size.height = fb->height / 2;
plane_state->plane_size.video.chroma_pitch =
fb->pitches[1] / fb->format->cpp[1];
/* TODO: unhardcode */
plane_state->color_space = COLOR_SPACE_YCBCR709;
}
fill_plane_tiling_attributes(adev, amdgpu_fb, plane_state,
&plane_state->tiling_info,
&plane_state->dcc,
&plane_state->address,
tiling_flags);
plane_state->visible = true;
plane_state->scaling_quality.h_taps_c = 0;
plane_state->scaling_quality.v_taps_c = 0;
/* is this needed? is plane_state zeroed at allocation? */
plane_state->scaling_quality.h_taps = 0;
plane_state->scaling_quality.v_taps = 0;
plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
return ret;
}
static void static void
fill_blending_from_plane_state(struct drm_plane_state *plane_state, fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
const struct dc_plane_state *dc_plane_state,
bool *per_pixel_alpha, bool *global_alpha, bool *per_pixel_alpha, bool *global_alpha,
int *global_alpha_value) int *global_alpha_value)
{ {
...@@ -2757,7 +2687,7 @@ fill_blending_from_plane_state(struct drm_plane_state *plane_state, ...@@ -2757,7 +2687,7 @@ fill_blending_from_plane_state(struct drm_plane_state *plane_state,
static int static int
fill_plane_color_attributes(const struct drm_plane_state *plane_state, fill_plane_color_attributes(const struct drm_plane_state *plane_state,
const struct dc_plane_state *dc_plane_state, const enum surface_pixel_format format,
enum dc_color_space *color_space) enum dc_color_space *color_space)
{ {
bool full_range; bool full_range;
...@@ -2765,7 +2695,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, ...@@ -2765,7 +2695,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
*color_space = COLOR_SPACE_SRGB; *color_space = COLOR_SPACE_SRGB;
/* DRM color properties only affect non-RGB formats. */ /* DRM color properties only affect non-RGB formats. */
if (dc_plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0; return 0;
full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
...@@ -2799,32 +2729,144 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state, ...@@ -2799,32 +2729,144 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
return 0; return 0;
} }
static int fill_plane_attributes(struct amdgpu_device *adev, static int
struct dc_plane_state *dc_plane_state, fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
struct drm_plane_state *plane_state, const struct drm_plane_state *plane_state,
struct drm_crtc_state *crtc_state) const uint64_t tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address)
{
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
to_amdgpu_framebuffer(plane_state->fb);
struct drm_format_name_buf format_name;
int ret;
memset(plane_info, 0, sizeof(*plane_info));
switch (fb->format->format) {
case DRM_FORMAT_C8:
plane_info->format =
SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
break;
case DRM_FORMAT_RGB565:
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
break;
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
break;
case DRM_FORMAT_NV21:
plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
break;
case DRM_FORMAT_NV12:
plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
break;
default:
DRM_ERROR(
"Unsupported screen format %s\n",
drm_get_format_name(fb->format->format, &format_name));
return -EINVAL;
}
switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
case DRM_MODE_ROTATE_0:
plane_info->rotation = ROTATION_ANGLE_0;
break;
case DRM_MODE_ROTATE_90:
plane_info->rotation = ROTATION_ANGLE_90;
break;
case DRM_MODE_ROTATE_180:
plane_info->rotation = ROTATION_ANGLE_180;
break;
case DRM_MODE_ROTATE_270:
plane_info->rotation = ROTATION_ANGLE_270;
break;
default:
plane_info->rotation = ROTATION_ANGLE_0;
break;
}
plane_info->visible = true;
plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
ret = fill_plane_color_attributes(plane_state, plane_info->format,
&plane_info->color_space);
if (ret)
return ret;
ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
plane_info->rotation, tiling_flags,
&plane_info->tiling_info,
&plane_info->plane_size,
&plane_info->dcc, address);
if (ret)
return ret;
fill_blending_from_plane_state(
plane_state, &plane_info->per_pixel_alpha,
&plane_info->global_alpha, &plane_info->global_alpha_value);
return 0;
}
static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_plane_state *dc_plane_state,
struct drm_plane_state *plane_state,
struct drm_crtc_state *crtc_state)
{ {
const struct amdgpu_framebuffer *amdgpu_fb = const struct amdgpu_framebuffer *amdgpu_fb =
to_amdgpu_framebuffer(plane_state->fb); to_amdgpu_framebuffer(plane_state->fb);
const struct drm_crtc *crtc = plane_state->crtc; struct dc_scaling_info scaling_info;
int ret = 0; struct dc_plane_info plane_info;
uint64_t tiling_flags;
int ret;
if (!fill_rects_from_plane_state(plane_state, dc_plane_state)) ret = fill_dc_scaling_info(plane_state, &scaling_info);
return -EINVAL; if (ret)
return ret;
ret = fill_plane_attributes_from_fb( dc_plane_state->src_rect = scaling_info.src_rect;
crtc->dev->dev_private, dc_plane_state->dst_rect = scaling_info.dst_rect;
dc_plane_state, dc_plane_state->clip_rect = scaling_info.clip_rect;
amdgpu_fb); dc_plane_state->scaling_quality = scaling_info.scaling_quality;
ret = get_fb_info(amdgpu_fb, &tiling_flags);
if (ret) if (ret)
return ret; return ret;
ret = fill_plane_color_attributes(plane_state, dc_plane_state, ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
&dc_plane_state->color_space); &plane_info,
&dc_plane_state->address);
if (ret) if (ret)
return ret; return ret;
dc_plane_state->format = plane_info.format;
dc_plane_state->color_space = plane_info.color_space;
dc_plane_state->format = plane_info.format;
dc_plane_state->plane_size = plane_info.plane_size;
dc_plane_state->rotation = plane_info.rotation;
dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
dc_plane_state->stereo_format = plane_info.stereo_format;
dc_plane_state->tiling_info = plane_info.tiling_info;
dc_plane_state->visible = plane_info.visible;
dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
dc_plane_state->global_alpha = plane_info.global_alpha;
dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
dc_plane_state->dcc = plane_info.dcc;
/* /*
* Always set input transfer function, since plane state is refreshed * Always set input transfer function, since plane state is refreshed
* every time. * every time.
...@@ -2835,11 +2877,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev, ...@@ -2835,11 +2877,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->in_transfer_func = NULL; dc_plane_state->in_transfer_func = NULL;
} }
fill_blending_from_plane_state(plane_state, dc_plane_state,
&dc_plane_state->per_pixel_alpha,
&dc_plane_state->global_alpha,
&dc_plane_state->global_alpha_value);
return ret; return ret;
} }
...@@ -3825,6 +3862,38 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc) ...@@ -3825,6 +3862,38 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
{ {
} }
static bool does_crtc_have_active_plane(struct drm_crtc_state *new_crtc_state)
{
struct drm_atomic_state *state = new_crtc_state->state;
struct drm_plane *plane;
int num_active = 0;
drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
struct drm_plane_state *new_plane_state;
/* Cursor planes are "fake". */
if (plane->type == DRM_PLANE_TYPE_CURSOR)
continue;
new_plane_state = drm_atomic_get_new_plane_state(state, plane);
if (!new_plane_state) {
/*
* The plane is enable on the CRTC and hasn't changed
* state. This means that it previously passed
* validation and is therefore enabled.
*/
num_active += 1;
continue;
}
/* We need a framebuffer to be considered enabled. */
num_active += (new_plane_state->fb != NULL);
}
return num_active > 0;
}
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state) struct drm_crtc_state *state)
{ {
...@@ -3843,6 +3912,11 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, ...@@ -3843,6 +3912,11 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
if (!dm_crtc_state->stream) if (!dm_crtc_state->stream)
return 0; return 0;
/* We want at least one hardware plane enabled to use the stream. */
if (state->enable && state->active &&
!does_crtc_have_active_plane(state))
return -EINVAL;
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
return 0; return 0;
...@@ -3994,9 +4068,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, ...@@ -3994,9 +4068,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state; struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
fill_plane_tiling_attributes( fill_plane_buffer_attributes(
adev, afb, plane_state, &plane_state->tiling_info, adev, afb, plane_state->format, plane_state->rotation,
&plane_state->dcc, &plane_state->address, tiling_flags); tiling_flags, &plane_state->tiling_info,
&plane_state->plane_size, &plane_state->dcc,
&plane_state->address);
} }
return 0; return 0;
...@@ -4028,13 +4104,18 @@ static int dm_plane_atomic_check(struct drm_plane *plane, ...@@ -4028,13 +4104,18 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
{ {
struct amdgpu_device *adev = plane->dev->dev_private; struct amdgpu_device *adev = plane->dev->dev_private;
struct dc *dc = adev->dm.dc; struct dc *dc = adev->dm.dc;
struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); struct dm_plane_state *dm_plane_state;
struct dc_scaling_info scaling_info;
int ret;
dm_plane_state = to_dm_plane_state(state);
if (!dm_plane_state->dc_state) if (!dm_plane_state->dc_state)
return 0; return 0;
if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state)) ret = fill_dc_scaling_info(state, &scaling_info);
return -EINVAL; if (ret)
return ret;
if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
return 0; return 0;
...@@ -4121,46 +4202,71 @@ static const u32 cursor_formats[] = { ...@@ -4121,46 +4202,71 @@ static const u32 cursor_formats[] = {
DRM_FORMAT_ARGB8888 DRM_FORMAT_ARGB8888
}; };
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, static int get_plane_formats(const struct drm_plane *plane,
struct drm_plane *plane, const struct dc_plane_cap *plane_cap,
unsigned long possible_crtcs, uint32_t *formats, int max_formats)
const struct dc_plane_cap *plane_cap)
{ {
int res = -EPERM; int i, num_formats = 0;
/*
* TODO: Query support for each group of formats directly from
* DC plane caps. This will require adding more formats to the
* caps list.
*/
switch (plane->type) { switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY: case DRM_PLANE_TYPE_PRIMARY:
res = drm_universal_plane_init( for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
dm->adev->ddev, if (num_formats >= max_formats)
plane, break;
possible_crtcs,
&dm_plane_funcs, formats[num_formats++] = rgb_formats[i];
rgb_formats, }
ARRAY_SIZE(rgb_formats),
NULL, plane->type, NULL); if (plane_cap && plane_cap->pixel_format_support.nv12)
formats[num_formats++] = DRM_FORMAT_NV12;
break; break;
case DRM_PLANE_TYPE_OVERLAY: case DRM_PLANE_TYPE_OVERLAY:
res = drm_universal_plane_init( for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
dm->adev->ddev, if (num_formats >= max_formats)
plane, break;
possible_crtcs,
&dm_plane_funcs, formats[num_formats++] = overlay_formats[i];
overlay_formats, }
ARRAY_SIZE(overlay_formats),
NULL, plane->type, NULL);
break; break;
case DRM_PLANE_TYPE_CURSOR: case DRM_PLANE_TYPE_CURSOR:
res = drm_universal_plane_init( for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
dm->adev->ddev, if (num_formats >= max_formats)
plane, break;
possible_crtcs,
&dm_plane_funcs, formats[num_formats++] = cursor_formats[i];
cursor_formats, }
ARRAY_SIZE(cursor_formats),
NULL, plane->type, NULL);
break; break;
} }
return num_formats;
}
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap)
{
uint32_t formats[32];
int num_formats;
int res = -EPERM;
num_formats = get_plane_formats(plane, plane_cap, formats,
ARRAY_SIZE(formats));
res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
&dm_plane_funcs, formats, num_formats,
NULL, plane->type, NULL);
if (res)
return res;
if (plane->type == DRM_PLANE_TYPE_OVERLAY && if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
plane_cap && plane_cap->per_pixel_alpha) { plane_cap && plane_cap->per_pixel_alpha) {
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
...@@ -4170,14 +4276,25 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, ...@@ -4170,14 +4276,25 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_blend_mode_property(plane, blend_caps); drm_plane_create_blend_mode_property(plane, blend_caps);
} }
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
plane_cap && plane_cap->pixel_format_support.nv12) {
/* This only affects YUV formats. */
drm_plane_create_color_properties(
plane,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
}
drm_plane_helper_add(plane, &dm_plane_helper_funcs); drm_plane_helper_add(plane, &dm_plane_helper_funcs);
/* Create (reset) the plane state */ /* Create (reset) the plane state */
if (plane->funcs->reset) if (plane->funcs->reset)
plane->funcs->reset(plane); plane->funcs->reset(plane);
return 0;
return res;
} }
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
...@@ -4769,9 +4886,13 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, ...@@ -4769,9 +4886,13 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
x = plane->state->crtc_x; x = plane->state->crtc_x;
y = plane->state->crtc_y; y = plane->state->crtc_y;
/* avivo cursor are offset into the total surface */
x += crtc->primary->state->src_x >> 16; if (crtc->primary->state) {
y += crtc->primary->state->src_y >> 16; /* avivo cursor are offset into the total surface */
x += crtc->primary->state->src_x >> 16;
y += crtc->primary->state->src_y >> 16;
}
if (x < 0) { if (x < 0) {
xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
x = 0; x = 0;
...@@ -5046,7 +5167,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -5046,7 +5167,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state; struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb; struct drm_framebuffer *fb = new_plane_state->fb;
struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
bool plane_needs_flip; bool plane_needs_flip;
struct dc_plane_state *dc_plane; struct dc_plane_state *dc_plane;
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
...@@ -5070,29 +5190,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -5070,29 +5190,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
} }
fill_dc_scaling_info(new_plane_state,
&bundle->scaling_infos[planes_count]);
bundle->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality; bundle->surface_updates[planes_count].scaling_info =
bundle->scaling_infos[planes_count].src_rect = dc_plane->src_rect; &bundle->scaling_infos[planes_count];
bundle->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect;
bundle->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
bundle->surface_updates[planes_count].scaling_info = &bundle->scaling_infos[planes_count];
fill_plane_color_attributes(
new_plane_state, dc_plane,
&bundle->plane_infos[planes_count].color_space);
bundle->plane_infos[planes_count].format = dc_plane->format;
bundle->plane_infos[planes_count].plane_size = dc_plane->plane_size;
bundle->plane_infos[planes_count].rotation = dc_plane->rotation;
bundle->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror;
bundle->plane_infos[planes_count].stereo_format = dc_plane->stereo_format;
bundle->plane_infos[planes_count].tiling_info = dc_plane->tiling_info;
bundle->plane_infos[planes_count].visible = dc_plane->visible;
bundle->plane_infos[planes_count].global_alpha = dc_plane->global_alpha;
bundle->plane_infos[planes_count].global_alpha_value = dc_plane->global_alpha_value;
bundle->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha;
bundle->plane_infos[planes_count].dcc = dc_plane->dcc;
bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count];
plane_needs_flip = old_plane_state->fb && new_plane_state->fb; plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
...@@ -5124,11 +5226,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -5124,11 +5226,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_bo_unreserve(abo); amdgpu_bo_unreserve(abo);
fill_plane_tiling_attributes(dm->adev, afb, dc_plane, fill_dc_plane_info_and_addr(
&bundle->plane_infos[planes_count].tiling_info, dm->adev, new_plane_state, tiling_flags,
&bundle->plane_infos[planes_count].dcc, &bundle->plane_infos[planes_count],
&bundle->flip_addrs[planes_count].address, &bundle->flip_addrs[planes_count].address);
tiling_flags);
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
bundle->flip_addrs[planes_count].flip_immediate = bundle->flip_addrs[planes_count].flip_immediate =
(crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
...@@ -5812,21 +5916,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, ...@@ -5812,21 +5916,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector = NULL; struct amdgpu_dm_connector *aconnector = NULL;
struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
struct drm_plane_state *new_plane_state = NULL;
new_stream = NULL; new_stream = NULL;
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
acrtc = to_amdgpu_crtc(crtc); acrtc = to_amdgpu_crtc(crtc);
new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
ret = -EINVAL;
goto fail;
}
aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
/* TODO This hack should go away */ /* TODO This hack should go away */
...@@ -6016,6 +6111,69 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, ...@@ -6016,6 +6111,69 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
return ret; return ret;
} }
static bool should_reset_plane(struct drm_atomic_state *state,
struct drm_plane *plane,
struct drm_plane_state *old_plane_state,
struct drm_plane_state *new_plane_state)
{
struct drm_plane *other;
struct drm_plane_state *old_other_state, *new_other_state;
struct drm_crtc_state *new_crtc_state;
int i;
/*
* TODO: Remove this hack once the checks below are sufficient
* enough to determine when we need to reset all the planes on
* the stream.
*/
if (state->allow_modeset)
return true;
/* Exit early if we know that we're adding or removing the plane. */
if (old_plane_state->crtc != new_plane_state->crtc)
return true;
/* old crtc == new_crtc == NULL, plane not in context. */
if (!new_plane_state->crtc)
return false;
new_crtc_state =
drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
if (!new_crtc_state)
return true;
if (drm_atomic_crtc_needs_modeset(new_crtc_state))
return true;
/*
* If there are any new primary or overlay planes being added or
* removed then the z-order can potentially change. To ensure
* correct z-order and pipe acquisition the current DC architecture
* requires us to remove and recreate all existing planes.
*
* TODO: Come up with a more elegant solution for this.
*/
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
if (old_other_state->crtc != new_plane_state->crtc &&
new_other_state->crtc != new_plane_state->crtc)
continue;
if (old_other_state->crtc != new_other_state->crtc)
return true;
/* TODO: Remove this once we can handle fast format changes. */
if (old_other_state->fb && new_other_state->fb &&
old_other_state->fb->format != new_other_state->fb->format)
return true;
}
return false;
}
static int dm_update_plane_state(struct dc *dc, static int dm_update_plane_state(struct dc *dc,
struct drm_atomic_state *state, struct drm_atomic_state *state,
struct drm_plane *plane, struct drm_plane *plane,
...@@ -6030,8 +6188,7 @@ static int dm_update_plane_state(struct dc *dc, ...@@ -6030,8 +6188,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state; struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
/* TODO return page_flip_needed() function */ bool needs_reset;
bool pflip_needed = !state->allow_modeset;
int ret = 0; int ret = 0;
...@@ -6044,10 +6201,12 @@ static int dm_update_plane_state(struct dc *dc, ...@@ -6044,10 +6201,12 @@ static int dm_update_plane_state(struct dc *dc,
if (plane->type == DRM_PLANE_TYPE_CURSOR) if (plane->type == DRM_PLANE_TYPE_CURSOR)
return 0; return 0;
needs_reset = should_reset_plane(state, plane, old_plane_state,
new_plane_state);
/* Remove any changed/removed planes */ /* Remove any changed/removed planes */
if (!enable) { if (!enable) {
if (pflip_needed && if (!needs_reset)
plane->type != DRM_PLANE_TYPE_OVERLAY)
return 0; return 0;
if (!old_plane_crtc) if (!old_plane_crtc)
...@@ -6098,7 +6257,7 @@ static int dm_update_plane_state(struct dc *dc, ...@@ -6098,7 +6257,7 @@ static int dm_update_plane_state(struct dc *dc,
if (!dm_new_crtc_state->stream) if (!dm_new_crtc_state->stream)
return 0; return 0;
if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY) if (!needs_reset)
return 0; return 0;
WARN_ON(dm_new_plane_state->dc_state); WARN_ON(dm_new_plane_state->dc_state);
...@@ -6110,7 +6269,7 @@ static int dm_update_plane_state(struct dc *dc, ...@@ -6110,7 +6269,7 @@ static int dm_update_plane_state(struct dc *dc,
DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n", DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, new_plane_crtc->base.id); plane->base.id, new_plane_crtc->base.id);
ret = fill_plane_attributes( ret = fill_dc_plane_attributes(
new_plane_crtc->dev->dev_private, new_plane_crtc->dev->dev_private,
dc_new_plane_state, dc_new_plane_state,
new_plane_state, new_plane_state,
...@@ -6158,10 +6317,11 @@ static int dm_update_plane_state(struct dc *dc, ...@@ -6158,10 +6317,11 @@ static int dm_update_plane_state(struct dc *dc,
} }
static int static int
dm_determine_update_type_for_commit(struct dc *dc, dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
struct drm_atomic_state *state, struct drm_atomic_state *state,
enum surface_update_type *out_type) enum surface_update_type *out_type)
{ {
struct dc *dc = dm->dc;
struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL; struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
int i, j, num_plane, ret = 0; int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state; struct drm_plane_state *old_plane_state, *new_plane_state;
...@@ -6175,20 +6335,19 @@ dm_determine_update_type_for_commit(struct dc *dc, ...@@ -6175,20 +6335,19 @@ dm_determine_update_type_for_commit(struct dc *dc,
struct dc_stream_status *status = NULL; struct dc_stream_status *status = NULL;
struct dc_surface_update *updates; struct dc_surface_update *updates;
struct dc_plane_state *surface;
enum surface_update_type update_type = UPDATE_TYPE_FAST; enum surface_update_type update_type = UPDATE_TYPE_FAST;
updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL);
if (!updates || !surface) { if (!updates) {
DRM_ERROR("Plane or surface update failed to allocate"); DRM_ERROR("Failed to allocate plane updates\n");
/* Set type to FULL to avoid crashing in DC*/ /* Set type to FULL to avoid crashing in DC*/
update_type = UPDATE_TYPE_FULL; update_type = UPDATE_TYPE_FULL;
goto cleanup; goto cleanup;
} }
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct dc_scaling_info scaling_info;
struct dc_stream_update stream_update; struct dc_stream_update stream_update;
memset(&stream_update, 0, sizeof(stream_update)); memset(&stream_update, 0, sizeof(stream_update));
...@@ -6219,23 +6378,12 @@ dm_determine_update_type_for_commit(struct dc *dc, ...@@ -6219,23 +6378,12 @@ dm_determine_update_type_for_commit(struct dc *dc,
goto cleanup; goto cleanup;
} }
if (!state->allow_modeset)
continue;
if (crtc != new_plane_crtc) if (crtc != new_plane_crtc)
continue; continue;
updates[num_plane].surface = &surface[num_plane]; updates[num_plane].surface = new_dm_plane_state->dc_state;
if (new_crtc_state->mode_changed) { if (new_crtc_state->mode_changed) {
updates[num_plane].surface->src_rect =
new_dm_plane_state->dc_state->src_rect;
updates[num_plane].surface->dst_rect =
new_dm_plane_state->dc_state->dst_rect;
updates[num_plane].surface->rotation =
new_dm_plane_state->dc_state->rotation;
updates[num_plane].surface->in_transfer_func =
new_dm_plane_state->dc_state->in_transfer_func;
stream_update.dst = new_dm_crtc_state->stream->dst; stream_update.dst = new_dm_crtc_state->stream->dst;
stream_update.src = new_dm_crtc_state->stream->src; stream_update.src = new_dm_crtc_state->stream->src;
} }
...@@ -6251,6 +6399,13 @@ dm_determine_update_type_for_commit(struct dc *dc, ...@@ -6251,6 +6399,13 @@ dm_determine_update_type_for_commit(struct dc *dc,
new_dm_crtc_state->stream->out_transfer_func; new_dm_crtc_state->stream->out_transfer_func;
} }
ret = fill_dc_scaling_info(new_plane_state,
&scaling_info);
if (ret)
goto cleanup;
updates[num_plane].scaling_info = &scaling_info;
num_plane++; num_plane++;
} }
...@@ -6270,8 +6425,14 @@ dm_determine_update_type_for_commit(struct dc *dc, ...@@ -6270,8 +6425,14 @@ dm_determine_update_type_for_commit(struct dc *dc,
status = dc_stream_get_status_from_state(old_dm_state->context, status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream); new_dm_crtc_state->stream);
/*
* TODO: DC modifies the surface during this call so we need
* to lock here - find a way to do this without locking.
*/
mutex_lock(&dm->dc_lock);
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
&stream_update, status); &stream_update, status);
mutex_unlock(&dm->dc_lock);
if (update_type > UPDATE_TYPE_MED) { if (update_type > UPDATE_TYPE_MED) {
update_type = UPDATE_TYPE_FULL; update_type = UPDATE_TYPE_FULL;
...@@ -6281,7 +6442,6 @@ dm_determine_update_type_for_commit(struct dc *dc, ...@@ -6281,7 +6442,6 @@ dm_determine_update_type_for_commit(struct dc *dc,
cleanup: cleanup:
kfree(updates); kfree(updates);
kfree(surface);
*out_type = update_type; *out_type = update_type;
return ret; return ret;
...@@ -6465,7 +6625,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -6465,7 +6625,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true; lock_and_validation_needed = true;
} }
ret = dm_determine_update_type_for_commit(dc, state, &update_type); ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
if (ret) if (ret)
goto fail; goto fail;
...@@ -6480,9 +6640,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -6480,9 +6640,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/ */
if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST) if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL"); WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
if (overall_update_type > UPDATE_TYPE_FAST) { if (overall_update_type > UPDATE_TYPE_FAST) {
ret = dm_atomic_get_state(state, &dm_state); ret = dm_atomic_get_state(state, &dm_state);
...@@ -6493,7 +6650,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -6493,7 +6650,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret) if (ret)
goto fail; goto fail;
if (dc_validate_global_state(dc, dm_state->context) != DC_OK) { if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;
} }
......
...@@ -701,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v, ...@@ -701,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
bool dcn_validate_bandwidth( bool dcn_validate_bandwidth(
struct dc *dc, struct dc *dc,
struct dc_state *context) struct dc_state *context,
bool fast_validate)
{ {
/*
* we want a breakdown of the various stages of validation, which the
* perf_trace macro doesn't support
*/
BW_VAL_TRACE_SETUP();
const struct resource_pool *pool = dc->res_pool; const struct resource_pool *pool = dc->res_pool;
struct dcn_bw_internal_vars *v = &context->dcn_bw_vars; struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
int i, input_idx; int i, input_idx;
...@@ -711,6 +718,9 @@ bool dcn_validate_bandwidth( ...@@ -711,6 +718,9 @@ bool dcn_validate_bandwidth(
float bw_limit; float bw_limit;
PERFORMANCE_TRACE_START(); PERFORMANCE_TRACE_START();
BW_VAL_TRACE_COUNT();
if (dcn_bw_apply_registry_override(dc)) if (dcn_bw_apply_registry_override(dc))
dcn_bw_sync_calcs_and_dml(dc); dcn_bw_sync_calcs_and_dml(dc);
...@@ -1013,8 +1023,11 @@ bool dcn_validate_bandwidth( ...@@ -1013,8 +1023,11 @@ bool dcn_validate_bandwidth(
mode_support_and_system_configuration(v); mode_support_and_system_configuration(v);
} }
if (v->voltage_level != 5) { BW_VAL_TRACE_END_VOLTAGE_LEVEL();
if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65; bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72) else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
...@@ -1087,6 +1100,8 @@ bool dcn_validate_bandwidth( ...@@ -1087,6 +1100,8 @@ bool dcn_validate_bandwidth(
break; break;
} }
BW_VAL_TRACE_END_WATERMARKS();
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
...@@ -1177,6 +1192,10 @@ bool dcn_validate_bandwidth( ...@@ -1177,6 +1192,10 @@ bool dcn_validate_bandwidth(
input_idx++; input_idx++;
} }
} else if (v->voltage_level == number_of_states_plus_one) {
BW_VAL_TRACE_SKIP(fail);
} else if (fast_validate) {
BW_VAL_TRACE_SKIP(fast);
} }
if (v->voltage_level == 0) { if (v->voltage_level == 0) {
...@@ -1196,6 +1215,7 @@ bool dcn_validate_bandwidth( ...@@ -1196,6 +1215,7 @@ bool dcn_validate_bandwidth(
kernel_fpu_end(); kernel_fpu_end();
PERFORMANCE_TRACE_END(); PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
if (bw_limit_pass && v->voltage_level != 5) if (bw_limit_pass && v->voltage_level != 5)
return true; return true;
......
...@@ -597,7 +597,7 @@ uint32_t dc_link_bandwidth_kbps( ...@@ -597,7 +597,7 @@ uint32_t dc_link_bandwidth_kbps(
} }
const struct dc_link_settings *dc_link_get_verified_link_cap( const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link) const struct dc_link *link)
{ {
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
......
...@@ -1548,8 +1548,7 @@ bool dp_validate_mode_timing( ...@@ -1548,8 +1548,7 @@ bool dp_validate_mode_timing(
timing->v_addressable == (uint32_t) 480) timing->v_addressable == (uint32_t) 480)
return true; return true;
/* We always use verified link settings */ link_setting = dc_link_get_link_cap(link);
link_setting = dc_link_get_verified_link_cap(link);
/* TODO: DYNAMIC_VALIDATION needs to be implemented */ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
/*if (flags.DYNAMIC_VALIDATION == 1 && /*if (flags.DYNAMIC_VALIDATION == 1 &&
...@@ -2587,6 +2586,9 @@ void detect_edp_sink_caps(struct dc_link *link) ...@@ -2587,6 +2586,9 @@ void detect_edp_sink_caps(struct dc_link *link)
uint32_t entry; uint32_t entry;
uint32_t link_rate_in_khz; uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
union lane_count_set lane_count_set = { {0} };
uint8_t link_bw_set;
uint8_t link_rate_set;
retrieve_link_cap(link); retrieve_link_cap(link);
link->dpcd_caps.edp_supported_link_rates_count = 0; link->dpcd_caps.edp_supported_link_rates_count = 0;
...@@ -2612,6 +2614,33 @@ void detect_edp_sink_caps(struct dc_link *link) ...@@ -2612,6 +2614,33 @@ void detect_edp_sink_caps(struct dc_link *link)
} }
} }
link->verified_link_cap = link->reported_link_cap; link->verified_link_cap = link->reported_link_cap;
// Read DPCD 00101h to find out the number of lanes currently set
core_link_read_dpcd(link, DP_LANE_COUNT_SET,
&lane_count_set.raw, sizeof(lane_count_set));
link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
// Read DPCD 00100h to find if standard link rates are set
core_link_read_dpcd(link, DP_LINK_BW_SET,
&link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
/* If standard link rates are not being used,
* Read DPCD 00115h to find the link rate set used
*/
core_link_read_dpcd(link, DP_LINK_RATE_SET,
&link_rate_set, sizeof(link_rate_set));
if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
link->cur_link_settings.link_rate =
link->dpcd_caps.edp_supported_link_rates[link_rate_set];
link->cur_link_settings.link_rate_set = link_rate_set;
link->cur_link_settings.use_link_rate_set = true;
}
} else {
link->cur_link_settings.link_rate = link_bw_set;
link->cur_link_settings.use_link_rate_set = false;
}
} }
void dc_link_dp_enable_hpd(const struct dc_link *link) void dc_link_dp_enable_hpd(const struct dc_link *link)
......
...@@ -2067,12 +2067,14 @@ void dc_resource_state_construct( ...@@ -2067,12 +2067,14 @@ void dc_resource_state_construct(
* Checks HW resource availability and bandwidth requirement. * Checks HW resource availability and bandwidth requirement.
* @dc: dc struct for this driver * @dc: dc struct for this driver
* @new_ctx: state to be validated * @new_ctx: state to be validated
* @fast_validate: set to true if only yes/no to support matters
* *
* Return: DC_OK if the result can be programmed. Otherwise, an error code. * Return: DC_OK if the result can be programmed. Otherwise, an error code.
*/ */
enum dc_status dc_validate_global_state( enum dc_status dc_validate_global_state(
struct dc *dc, struct dc *dc,
struct dc_state *new_ctx) struct dc_state *new_ctx,
bool fast_validate)
{ {
enum dc_status result = DC_ERROR_UNEXPECTED; enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j; int i, j;
...@@ -2127,7 +2129,7 @@ enum dc_status dc_validate_global_state( ...@@ -2127,7 +2129,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx); result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK) if (result == DC_OK)
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx)) if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE; result = DC_FAIL_BANDWIDTH_VALIDATE;
return result; return result;
......
...@@ -211,7 +211,8 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc) ...@@ -211,7 +211,8 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) { ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx); vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos); if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
return;
if (vpos >= vupdate_line) if (vpos >= vupdate_line)
return; return;
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h" #include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h" #include "dml/display_mode_lib.h"
#define DC_VER "3.2.25" #define DC_VER "3.2.26"
#define MAX_SURFACES 3 #define MAX_SURFACES 3
#define MAX_PLANES 6 #define MAX_PLANES 6
...@@ -66,8 +66,27 @@ struct dc_plane_cap { ...@@ -66,8 +66,27 @@ struct dc_plane_cap {
uint32_t blends_with_above : 1; uint32_t blends_with_above : 1;
uint32_t blends_with_below : 1; uint32_t blends_with_below : 1;
uint32_t per_pixel_alpha : 1; uint32_t per_pixel_alpha : 1;
uint32_t supports_argb8888 : 1; struct {
uint32_t supports_nv12 : 1; uint32_t argb8888 : 1;
uint32_t nv12 : 1;
uint32_t fp16 : 1;
} pixel_format_support;
// max upscaling factor x1000
// upscaling factors are always >= 1
// for example, 1080p -> 8K is 4.0, or 4000 raw value
struct {
uint32_t argb8888;
uint32_t nv12;
uint32_t fp16;
} max_upscale_factor;
// max downscale factor x1000
// downscale factors are always <= 1
// for example, 8K -> 1080p is 0.25, or 250 raw value
struct {
uint32_t argb8888;
uint32_t nv12;
uint32_t fp16;
} max_downscale_factor;
}; };
struct dc_caps { struct dc_caps {
...@@ -183,6 +202,7 @@ struct dc_config { ...@@ -183,6 +202,7 @@ struct dc_config {
bool disable_disp_pll_sharing; bool disable_disp_pll_sharing;
bool fbc_support; bool fbc_support;
bool optimize_edp_link_rate; bool optimize_edp_link_rate;
bool disable_fractional_pwm;
bool allow_seamless_boot_optimization; bool allow_seamless_boot_optimization;
}; };
...@@ -226,6 +246,57 @@ struct dc_clocks { ...@@ -226,6 +246,57 @@ struct dc_clocks {
bool p_state_change_support; bool p_state_change_support;
}; };
struct dc_bw_validation_profile {
bool enable;
unsigned long long total_ticks;
unsigned long long voltage_level_ticks;
unsigned long long watermark_ticks;
unsigned long long rq_dlg_ticks;
unsigned long long total_count;
unsigned long long skip_fast_count;
unsigned long long skip_pass_count;
unsigned long long skip_fail_count;
};
#define BW_VAL_TRACE_SETUP() \
unsigned long long end_tick = 0; \
unsigned long long voltage_level_tick = 0; \
unsigned long long watermark_tick = 0; \
unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \
dm_get_timestamp(dc->ctx) : 0
#define BW_VAL_TRACE_COUNT() \
if (dc->debug.bw_val_profile.enable) \
dc->debug.bw_val_profile.total_count++
#define BW_VAL_TRACE_SKIP(status) \
if (dc->debug.bw_val_profile.enable) { \
if (!voltage_level_tick) \
voltage_level_tick = dm_get_timestamp(dc->ctx); \
dc->debug.bw_val_profile.skip_ ## status ## _count++; \
}
#define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \
if (dc->debug.bw_val_profile.enable) \
voltage_level_tick = dm_get_timestamp(dc->ctx)
#define BW_VAL_TRACE_END_WATERMARKS() \
if (dc->debug.bw_val_profile.enable) \
watermark_tick = dm_get_timestamp(dc->ctx)
#define BW_VAL_TRACE_FINISH() \
if (dc->debug.bw_val_profile.enable) { \
end_tick = dm_get_timestamp(dc->ctx); \
dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \
dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \
if (watermark_tick) { \
dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \
dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \
} \
}
struct dc_debug_options { struct dc_debug_options {
enum visual_confirm visual_confirm; enum visual_confirm visual_confirm;
bool sanity_checks; bool sanity_checks;
...@@ -279,6 +350,7 @@ struct dc_debug_options { ...@@ -279,6 +350,7 @@ struct dc_debug_options {
unsigned int force_odm_combine; //bit vector based on otg inst unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int force_fclk_khz; unsigned int force_fclk_khz;
bool disable_tri_buf; bool disable_tri_buf;
struct dc_bw_validation_profile bw_val_profile;
}; };
struct dc_debug_data { struct dc_debug_data {
...@@ -638,9 +710,14 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla ...@@ -638,9 +710,14 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info); void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
/*
* fast_validate: we return after determining if we can support the new state,
* but before we populate the programming info
*/
enum dc_status dc_validate_global_state( enum dc_status dc_validate_global_state(
struct dc *dc, struct dc *dc,
struct dc_state *new_ctx); struct dc_state *new_ctx,
bool fast_validate);
void dc_resource_state_construct( void dc_resource_state_construct(
......
...@@ -250,7 +250,7 @@ uint32_t dc_link_bandwidth_kbps( ...@@ -250,7 +250,7 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link, const struct dc_link *link,
const struct dc_link_settings *link_setting); const struct dc_link_settings *link_setting);
const struct dc_link_settings *dc_link_get_verified_link_cap( const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link); const struct dc_link *link);
bool dc_submit_i2c( bool dc_submit_i2c(
......
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#define MCP_ABM_LEVEL_SET 0x65 #define MCP_ABM_LEVEL_SET 0x65
#define MCP_ABM_PIPE_SET 0x66 #define MCP_ABM_PIPE_SET 0x66
#define MCP_BL_SET 0x67 #define MCP_BL_SET 0x67
#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */
#define MCP_DISABLE_ABM_IMMEDIATELY 255 #define MCP_DISABLE_ABM_IMMEDIATELY 255
...@@ -390,6 +391,23 @@ static bool dce_abm_init_backlight(struct abm *abm) ...@@ -390,6 +391,23 @@ static bool dce_abm_init_backlight(struct abm *abm)
REG_UPDATE(BL_PWM_GRP1_REG_LOCK, REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
BL_PWM_GRP1_REG_LOCK, 0); BL_PWM_GRP1_REG_LOCK, 0);
/* Wait until microcontroller is ready to process interrupt */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
/* Set PWM fractional enable/disable */
value = (abm->ctx->dc->config.disable_fractional_pwm == false) ? 1 : 0;
REG_WRITE(MASTER_COMM_DATA_REG1, value);
/* Set command to enable or disable fractional PWM microcontroller */
REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
MCP_BL_SET_PWM_FRAC);
/* Notify microcontroller of new command */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
/* Ensure command has been executed before continuing */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
return true; return true;
} }
......
...@@ -380,7 +380,24 @@ static const struct resource_caps res_cap = { ...@@ -380,7 +380,24 @@ static const struct resource_caps res_cap = {
static const struct dc_plane_cap plane_cap = { static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB, .type = DC_PLANE_TYPE_DCE_RGB,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
}; };
#define CTX ctx #define CTX ctx
...@@ -761,7 +778,8 @@ static enum dc_status build_mapped_resource( ...@@ -761,7 +778,8 @@ static enum dc_status build_mapped_resource(
bool dce100_validate_bandwidth( bool dce100_validate_bandwidth(
struct dc *dc, struct dc *dc,
struct dc_state *context) struct dc_state *context,
bool fast_validate)
{ {
int i; int i;
bool at_least_one_pipe = false; bool at_least_one_pipe = false;
......
...@@ -397,14 +397,48 @@ static const struct dc_plane_cap plane_cap = { ...@@ -397,14 +397,48 @@ static const struct dc_plane_cap plane_cap = {
.blends_with_below = true, .blends_with_below = true,
.blends_with_above = true, .blends_with_above = true,
.per_pixel_alpha = 1, .per_pixel_alpha = 1,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
}; };
static const struct dc_plane_cap underlay_plane_cap = { static const struct dc_plane_cap underlay_plane_cap = {
.type = DC_PLANE_TYPE_DCE_UNDERLAY, .type = DC_PLANE_TYPE_DCE_UNDERLAY,
.blends_with_above = true, .blends_with_above = true,
.per_pixel_alpha = 1, .per_pixel_alpha = 1,
.supports_nv12 = true
.pixel_format_support = {
.argb8888 = false,
.nv12 = true,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 1,
.nv12 = 16000,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 1,
.nv12 = 250,
.fp16 = 1
}
}; };
#define CTX ctx #define CTX ctx
...@@ -869,7 +903,8 @@ static enum dc_status build_mapped_resource( ...@@ -869,7 +903,8 @@ static enum dc_status build_mapped_resource(
static bool dce110_validate_bandwidth( static bool dce110_validate_bandwidth(
struct dc *dc, struct dc *dc,
struct dc_state *context) struct dc_state *context,
bool fast_validate)
{ {
bool result = false; bool result = false;
......
...@@ -399,7 +399,24 @@ static const struct resource_caps polaris_11_resource_cap = { ...@@ -399,7 +399,24 @@ static const struct resource_caps polaris_11_resource_cap = {
static const struct dc_plane_cap plane_cap = { static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB, .type = DC_PLANE_TYPE_DCE_RGB,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
}; };
#define CTX ctx #define CTX ctx
...@@ -809,7 +826,8 @@ static enum dc_status build_mapped_resource( ...@@ -809,7 +826,8 @@ static enum dc_status build_mapped_resource(
bool dce112_validate_bandwidth( bool dce112_validate_bandwidth(
struct dc *dc, struct dc *dc,
struct dc_state *context) struct dc_state *context,
bool fast_validate)
{ {
bool result = false; bool result = false;
......
...@@ -44,7 +44,8 @@ enum dc_status dce112_validate_with_context( ...@@ -44,7 +44,8 @@ enum dc_status dce112_validate_with_context(
bool dce112_validate_bandwidth( bool dce112_validate_bandwidth(
struct dc *dc, struct dc *dc,
struct dc_state *context); struct dc_state *context,
bool fast_validate);
enum dc_status dce112_add_stream_to_ctx( enum dc_status dce112_add_stream_to_ctx(
struct dc *dc, struct dc *dc,
......
...@@ -456,7 +456,24 @@ static const struct resource_caps res_cap = { ...@@ -456,7 +456,24 @@ static const struct resource_caps res_cap = {
static const struct dc_plane_cap plane_cap = { static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB, .type = DC_PLANE_TYPE_DCE_RGB,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
}; };
static const struct dc_debug_options debug_defaults = { static const struct dc_debug_options debug_defaults = {
......
...@@ -389,7 +389,24 @@ static const struct resource_caps res_cap_83 = { ...@@ -389,7 +389,24 @@ static const struct resource_caps res_cap_83 = {
static const struct dc_plane_cap plane_cap = { static const struct dc_plane_cap plane_cap = {
.type = DC_PLANE_TYPE_DCE_RGB, .type = DC_PLANE_TYPE_DCE_RGB,
.supports_argb8888 = true,
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
.fp16 = false
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 1,
.fp16 = 1
}
}; };
static const struct dce_dmcu_registers dmcu_regs = { static const struct dce_dmcu_registers dmcu_regs = {
...@@ -795,7 +812,8 @@ static void destruct(struct dce110_resource_pool *pool) ...@@ -795,7 +812,8 @@ static void destruct(struct dce110_resource_pool *pool)
bool dce80_validate_bandwidth( bool dce80_validate_bandwidth(
struct dc *dc, struct dc *dc,
struct dc_state *context) struct dc_state *context,
bool fast_validate)
{ {
int i; int i;
bool at_least_one_pipe = false; bool at_least_one_pipe = false;
......
...@@ -247,7 +247,7 @@ ...@@ -247,7 +247,7 @@
.field_name = reg_name ## __ ## field_name ## post_fix .field_name = reg_name ## __ ## field_name ## post_fix
/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */ /* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
#define HUBP_MASK_SH_LIST_DCN(mask_sh)\ #define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
...@@ -331,7 +331,6 @@ ...@@ -331,7 +331,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
...@@ -339,7 +338,6 @@ ...@@ -339,7 +338,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\ HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
...@@ -373,6 +371,11 @@ ...@@ -373,6 +371,11 @@
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh) HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
/* Mask/shift struct generation macro for ASICs with VM */ /* Mask/shift struct generation macro for ASICs with VM */
#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\ #define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
......
...@@ -521,8 +521,24 @@ static const struct dc_plane_cap plane_cap = { ...@@ -521,8 +521,24 @@ static const struct dc_plane_cap plane_cap = {
.blends_with_above = true, .blends_with_above = true,
.blends_with_below = true, .blends_with_below = true,
.per_pixel_alpha = true, .per_pixel_alpha = true,
.supports_argb8888 = true,
.supports_nv12 = true .pixel_format_support = {
.argb8888 = true,
.nv12 = true,
.fp16 = true
},
.max_upscale_factor = {
.argb8888 = 16000,
.nv12 = 16000,
.fp16 = 1
},
.max_downscale_factor = {
.argb8888 = 250,
.nv12 = 250,
.fp16 = 1
}
}; };
static const struct dc_debug_options debug_defaults_drv = { static const struct dc_debug_options debug_defaults_drv = {
......
...@@ -97,7 +97,8 @@ struct resource_funcs { ...@@ -97,7 +97,8 @@ struct resource_funcs {
const struct encoder_init_data *init); const struct encoder_init_data *init);
bool (*validate_bandwidth)( bool (*validate_bandwidth)(
struct dc *dc, struct dc *dc,
struct dc_state *context); struct dc_state *context,
bool fast_validate);
enum dc_status (*validate_global)( enum dc_status (*validate_global)(
struct dc *dc, struct dc *dc,
......
...@@ -621,7 +621,8 @@ extern const struct dcn_ip_params dcn10_ip_defaults; ...@@ -621,7 +621,8 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth( bool dcn_validate_bandwidth(
struct dc *dc, struct dc *dc,
struct dc_state *context); struct dc_state *context,
bool fast_validate);
unsigned int dcn_find_dcfclk_suits_all( unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc, const struct dc *dc,
......
...@@ -540,6 +540,8 @@ struct smu_funcs ...@@ -540,6 +540,8 @@ struct smu_funcs
int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed); int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
}; };
#define smu_init_microcode(smu) \ #define smu_init_microcode(smu) \
...@@ -723,6 +725,8 @@ struct smu_funcs ...@@ -723,6 +725,8 @@ struct smu_funcs
((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0) ((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0)
#define smu_get_mclk(smu, low) \ #define smu_get_mclk(smu, low) \
((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0) ((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0)
#define smu_set_xgmi_pstate(smu, pstate) \
((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
......
...@@ -1893,6 +1893,13 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, ...@@ -1893,6 +1893,13 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
return ret; return ret;
} }
static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
/* send msg to SMU to set pstate */
return 0;
}
static const struct smu_funcs smu_v11_0_funcs = { static const struct smu_funcs smu_v11_0_funcs = {
.init_microcode = smu_v11_0_init_microcode, .init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode, .load_microcode = smu_v11_0_load_microcode,
...@@ -1947,6 +1954,7 @@ static const struct smu_funcs smu_v11_0_funcs = { ...@@ -1947,6 +1954,7 @@ static const struct smu_funcs smu_v11_0_funcs = {
.get_fan_speed_percent = smu_v11_0_get_fan_speed_percent, .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
.set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
.set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
}; };
void smu_v11_0_set_smu_funcs(struct smu_context *smu) void smu_v11_0_set_smu_funcs(struct smu_context *smu)
...@@ -1954,7 +1962,6 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu) ...@@ -1954,7 +1962,6 @@ void smu_v11_0_set_smu_funcs(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev; struct amdgpu_device *adev = smu->adev;
smu->funcs = &smu_v11_0_funcs; smu->funcs = &smu_v11_0_funcs;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA20: case CHIP_VEGA20:
vega20_set_ppt_funcs(smu); vega20_set_ppt_funcs(smu);
......
...@@ -528,6 +528,8 @@ struct drm_amdgpu_gem_va { ...@@ -528,6 +528,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06 #define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07 #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
struct drm_amdgpu_cs_chunk { struct drm_amdgpu_cs_chunk {
__u32 chunk_id; __u32 chunk_id;
...@@ -608,6 +610,12 @@ struct drm_amdgpu_cs_chunk_sem { ...@@ -608,6 +610,12 @@ struct drm_amdgpu_cs_chunk_sem {
__u32 handle; __u32 handle;
}; };
struct drm_amdgpu_cs_chunk_syncobj {
__u32 handle;
__u32 flags;
__u64 point;
};
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2 #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment