Commit fa5cf901 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'drm-fixes-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-fixes

Nothing too major here.  A couple more ttm fixes for huge page and a kiq
fix for amdgpu, along with some DC fixes.

* 'drm-fixes-4.15' of git://people.freedesktop.org/~agd5f/linux:
  drm/amd/display: Fix rehook MST display not light back on
  drm/amd/display: fix missing pixel clock adjustment for dongle
  drm/amd/display: set chroma taps to 1 when not scaling
  drm/amd/display: add pipe locking before front end programing
  drm/amdgpu: fix MAP_QUEUES paramter
  drm/ttm: max_cpages is in unit of native page
  drm/ttm: fix incorrect calculate on shrink_pages
parents 1291a0d5 becd0875
...@@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev) ...@@ -2467,7 +2467,7 @@ static int gfx_v9_0_kiq_kcq_enable(struct amdgpu_device *adev)
PACKET3_MAP_QUEUES_PIPE(ring->pipe) | PACKET3_MAP_QUEUES_PIPE(ring->pipe) |
PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) |
PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */
PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index)); amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
......
...@@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, ...@@ -2336,7 +2336,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct dm_connector_state *dm_state) const struct dm_connector_state *dm_state)
{ {
struct drm_display_mode *preferred_mode = NULL; struct drm_display_mode *preferred_mode = NULL;
const struct drm_connector *drm_connector; struct drm_connector *drm_connector;
struct dc_stream_state *stream = NULL; struct dc_stream_state *stream = NULL;
struct drm_display_mode mode = *drm_mode; struct drm_display_mode mode = *drm_mode;
bool native_mode_found = false; bool native_mode_found = false;
...@@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, ...@@ -2355,11 +2355,13 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (!aconnector->dc_sink) { if (!aconnector->dc_sink) {
/* /*
* Exclude MST from creating fake_sink * Create dc_sink when necessary to MST
* TODO: need to enable MST into fake_sink feature * Don't apply fake_sink to MST
*/ */
if (aconnector->mst_port) if (aconnector->mst_port) {
goto stream_create_fail; dm_dp_mst_dc_sink_create(drm_connector);
goto mst_dc_sink_create_done;
}
if (create_fake_sink(aconnector)) if (create_fake_sink(aconnector))
goto stream_create_fail; goto stream_create_fail;
...@@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, ...@@ -2410,6 +2412,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream_create_fail: stream_create_fail:
dm_state_null: dm_state_null:
drm_connector_null: drm_connector_null:
mst_dc_sink_create_done:
return stream; return stream;
} }
......
...@@ -189,6 +189,8 @@ struct amdgpu_dm_connector { ...@@ -189,6 +189,8 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock; struct mutex hpd_lock;
bool fake_enable; bool fake_enable;
bool mst_connected;
}; };
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
......
...@@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector, ...@@ -185,6 +185,42 @@ static int dm_connector_update_modes(struct drm_connector *connector,
return ret; return ret;
} }
void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct edid *edid;
struct dc_sink *dc_sink;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
if (!edid) {
drm_mode_connector_update_edid_property(
&aconnector->base,
NULL);
return;
}
aconnector->edid = edid;
dc_sink = dc_link_add_remote_sink(
aconnector->dc_link,
(uint8_t *)aconnector->edid,
(aconnector->edid->extensions + 1) * EDID_LENGTH,
&init_params);
dc_sink->priv = aconnector;
aconnector->dc_sink = dc_sink;
amdgpu_dm_add_sink_to_freesync_module(
connector, aconnector->edid);
drm_mode_connector_update_edid_property(
&aconnector->base, aconnector->edid);
}
static int dm_dp_mst_get_modes(struct drm_connector *connector) static int dm_dp_mst_get_modes(struct drm_connector *connector)
{ {
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
...@@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -311,6 +347,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_set_path_property(connector, pathprop); drm_mode_connector_set_path_property(connector, pathprop);
drm_connector_list_iter_end(&conn_iter); drm_connector_list_iter_end(&conn_iter);
aconnector->mst_connected = true;
return &aconnector->base; return &aconnector->base;
} }
} }
...@@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -363,6 +400,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/ */
amdgpu_dm_connector_funcs_reset(connector); amdgpu_dm_connector_funcs_reset(connector);
aconnector->mst_connected = true;
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port); aconnector, connector->base.id, aconnector->mst_port);
...@@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, ...@@ -394,6 +433,8 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_mode_connector_update_edid_property( drm_mode_connector_update_edid_property(
&aconnector->base, &aconnector->base,
NULL); NULL);
aconnector->mst_connected = false;
} }
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
...@@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) ...@@ -404,10 +445,18 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev); drm_kms_helper_hotplug_event(dev);
} }
static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
{
mutex_lock(&connector->dev->mode_config.mutex);
drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
mutex_unlock(&connector->dev->mode_config.mutex);
}
static void dm_dp_mst_register_connector(struct drm_connector *connector) static void dm_dp_mst_register_connector(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev) if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
...@@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector) ...@@ -416,6 +465,8 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
drm_connector_register(connector); drm_connector_register(connector);
if (aconnector->mst_connected)
dm_dp_mst_link_status_reset(connector);
} }
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
......
...@@ -31,5 +31,6 @@ struct amdgpu_dm_connector; ...@@ -31,5 +31,6 @@ struct amdgpu_dm_connector;
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector); struct amdgpu_dm_connector *aconnector);
void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif #endif
...@@ -900,6 +900,15 @@ bool dcn_validate_bandwidth( ...@@ -900,6 +900,15 @@ bool dcn_validate_bandwidth(
v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps; v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps;
v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c; v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c;
v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c; v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c;
/*
* Spreadsheet doesn't handle taps_c is one properly,
* need to force Chroma to always be scaled to pass
* bandwidth validation.
*/
if (v->override_hta_pschroma[input_idx] == 1)
v->override_hta_pschroma[input_idx] = 2;
if (v->override_vta_pschroma[input_idx] == 1)
v->override_vta_pschroma[input_idx] = 2;
v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor; v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor;
} }
if (v->is_line_buffer_bpp_fixed == dcn_bw_yes) if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
......
...@@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal) ...@@ -1801,7 +1801,7 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
link->link_enc->funcs->disable_output(link->link_enc, signal, link); link->link_enc->funcs->disable_output(link->link_enc, signal, link);
} }
bool dp_active_dongle_validate_timing( static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing, const struct dc_crtc_timing *timing,
const struct dc_dongle_caps *dongle_caps) const struct dc_dongle_caps *dongle_caps)
{ {
...@@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing( ...@@ -1833,6 +1833,8 @@ bool dp_active_dongle_validate_timing(
/* Check Color Depth and Pixel Clock */ /* Check Color Depth and Pixel Clock */
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
required_pix_clk /= 2; required_pix_clk /= 2;
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
required_pix_clk = required_pix_clk * 2 / 3;
switch (timing->display_color_depth) { switch (timing->display_color_depth) {
case COLOR_DEPTH_666: case COLOR_DEPTH_666:
......
...@@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface( ...@@ -2866,16 +2866,19 @@ static void dce110_apply_ctx_for_surface(
int num_planes, int num_planes,
struct dc_state *context) struct dc_state *context)
{ {
int i, be_idx; int i;
if (num_planes == 0) if (num_planes == 0)
return; return;
be_idx = -1;
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (stream == context->res_ctx.pipe_ctx[i].stream) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst; struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
break;
if (stream == pipe_ctx->stream) {
if (!pipe_ctx->top_pipe &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
} }
} }
...@@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface( ...@@ -2895,9 +2898,22 @@ static void dce110_apply_ctx_for_surface(
context->stream_count); context->stream_count);
dce110_program_front_end_for_pipe(dc, pipe_ctx); dce110_program_front_end_for_pipe(dc, pipe_ctx);
dc->hwss.update_plane_addr(dc, pipe_ctx);
program_surface_visibility(dc, pipe_ctx); program_surface_visibility(dc, pipe_ctx);
} }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
if ((stream == pipe_ctx->stream) &&
(!pipe_ctx->top_pipe) &&
(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
} }
static void dce110_power_down_fe(struct dc *dc, int fe_idx) static void dce110_power_down_fe(struct dc *dc, int fe_idx)
......
...@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps( ...@@ -159,11 +159,10 @@ bool dpp_get_optimal_number_of_taps(
scl_data->taps.h_taps = 1; scl_data->taps.h_taps = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert)) if (IDENTITY_RATIO(scl_data->ratios.vert))
scl_data->taps.v_taps = 1; scl_data->taps.v_taps = 1;
/* if (IDENTITY_RATIO(scl_data->ratios.horz_c))
* Spreadsheet doesn't handle taps_c is one properly, scl_data->taps.h_taps_c = 1;
* need to force Chroma to always be scaled to pass if (IDENTITY_RATIO(scl_data->ratios.vert_c))
* bandwidth validation. scl_data->taps.v_taps_c = 1;
*/
} }
return true; return true;
......
...@@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) ...@@ -455,6 +455,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
freed += (nr_free_pool - shrink_pages) << pool->order; freed += (nr_free_pool - shrink_pages) << pool->order;
if (freed >= sc->nr_to_scan) if (freed >= sc->nr_to_scan)
break; break;
shrink_pages <<= pool->order;
} }
mutex_unlock(&lock); mutex_unlock(&lock);
return freed; return freed;
...@@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -543,7 +544,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int r = 0; int r = 0;
unsigned i, j, cpages; unsigned i, j, cpages;
unsigned npages = 1 << order; unsigned npages = 1 << order;
unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC); unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC);
/* allocate array for page caching change */ /* allocate array for page caching change */
caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment