Commit 904ce198 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm/tegra/for-5.5-rc1' of git://anongit.freedesktop.org/tegra/linux into drm-next

drm/tegra: Changes for v5.5-rc1

The bulk of these changes is the addition of DisplayPort support for
Tegra210, Tegra186 and Tegra194. I've been running versions of this for
about three years now, so I'd consider these changes to be pretty
mature. These changes also unify the existing eDP support with the DP
support since the programming is very similar, except for a few steps
that can be easily parameterized.

The rest are a couple of fixes all over the place for minor issues, as
well as some work to support the IOMMU-backed DMA API, which in the end
turned out to also clean up a number of cases where the DMA API was not
being used correctly.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thierry Reding <thierry.reding@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191102140116.3860545-1-thierry.reding@gmail.com
parents 633aa7e5 84db889e
......@@ -9,7 +9,7 @@ config DRM_TEGRA
select DRM_MIPI_DSI
select DRM_PANEL
select TEGRA_HOST1X
select IOMMU_IOVA if IOMMU_SUPPORT
select IOMMU_IOVA
select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have an NVIDIA Tegra SoC.
......
......@@ -715,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
window.swap = state->swap;
for (i = 0; i < fb->format->num_planes; i++) {
struct tegra_bo *bo = tegra_fb_get_plane(fb, i);
window.base[i] = bo->paddr + fb->offsets[i];
window.base[i] = state->iova[i] + fb->offsets[i];
/*
* Tegra uses a shared stride for UV planes. Framebuffers are
......@@ -732,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = {
.prepare_fb = tegra_plane_prepare_fb,
.cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_plane_atomic_check,
.atomic_disable = tegra_plane_atomic_disable,
.atomic_update = tegra_plane_atomic_update,
......@@ -869,11 +869,11 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
return;
}
value |= (bo->paddr >> 10) & 0x3fffff;
value |= (bo->iova >> 10) & 0x3fffff;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
value = (bo->paddr >> 32) & 0x3;
value = (bo->iova >> 32) & 0x3;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
......@@ -914,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.prepare_fb = tegra_plane_prepare_fb,
.cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_cursor_atomic_check,
.atomic_update = tegra_cursor_atomic_update,
.atomic_disable = tegra_cursor_atomic_disable,
......@@ -2014,9 +2016,8 @@ static int tegra_dc_init(struct host1x_client *client)
if (!dc->syncpt)
dev_warn(dc->dev, "failed to allocate syncpoint\n");
dc->group = host1x_client_iommu_attach(client, true);
if (IS_ERR(dc->group)) {
err = PTR_ERR(dc->group);
err = host1x_client_iommu_attach(client);
if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
return err;
}
......@@ -2074,6 +2075,12 @@ static int tegra_dc_init(struct host1x_client *client)
goto cleanup;
}
/*
* Inherit the DMA parameters (such as maximum segment size) from the
* parent device.
*/
client->dev->dma_parms = client->parent->dma_parms;
return 0;
cleanup:
......@@ -2083,7 +2090,7 @@ static int tegra_dc_init(struct host1x_client *client)
if (!IS_ERR(primary))
drm_plane_cleanup(primary);
host1x_client_iommu_detach(client, dc->group);
host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return err;
......@@ -2097,6 +2104,9 @@ static int tegra_dc_exit(struct host1x_client *client)
if (!tegra_dc_has_window_groups(dc))
return 0;
/* avoid a dangling pointer just in case this disappears */
client->dev->dma_parms = NULL;
devm_free_irq(dc->dev, dc->irq, dc);
err = tegra_dc_rgb_exit(dc);
......@@ -2105,7 +2115,7 @@ static int tegra_dc_exit(struct host1x_client *client)
return err;
}
host1x_client_iommu_detach(client, dc->group);
host1x_client_iommu_detach(client);
host1x_syncpt_free(dc->syncpt);
return 0;
......
......@@ -90,8 +90,6 @@ struct tegra_dc {
struct drm_info_list *debugfs_files;
const struct tegra_dc_soc_info *soc;
struct iommu_group *group;
};
static inline struct tegra_dc *
......
......@@ -4,10 +4,158 @@
* Copyright (C) 2015 Rob Clark
*/
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_print.h>
#include "dp.h"
static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 };
static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps)
{
caps->enhanced_framing = false;
caps->tps3_supported = false;
caps->fast_training = false;
caps->channel_coding = false;
caps->alternate_scrambler_reset = false;
}
void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
const struct drm_dp_link_caps *src)
{
dest->enhanced_framing = src->enhanced_framing;
dest->tps3_supported = src->tps3_supported;
dest->fast_training = src->fast_training;
dest->channel_coding = src->channel_coding;
dest->alternate_scrambler_reset = src->alternate_scrambler_reset;
}
static void drm_dp_link_reset(struct drm_dp_link *link)
{
unsigned int i;
if (!link)
return;
link->revision = 0;
link->max_rate = 0;
link->max_lanes = 0;
drm_dp_link_caps_reset(&link->caps);
link->aux_rd_interval.cr = 0;
link->aux_rd_interval.ce = 0;
link->edp = 0;
link->rate = 0;
link->lanes = 0;
for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++)
link->rates[i] = 0;
link->num_rates = 0;
}
/**
* drm_dp_link_add_rate() - add a rate to the list of supported rates
* @link: the link to add the rate to
* @rate: the rate to add
*
* Add a link rate to the list of supported link rates.
*
* Returns:
* 0 on success or one of the following negative error codes on failure:
* - ENOSPC if the maximum number of supported rates has been reached
* - EEXISTS if the link already supports this rate
*
* See also:
* drm_dp_link_remove_rate()
*/
int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate)
{
unsigned int i, pivot;
if (link->num_rates == DP_MAX_SUPPORTED_RATES)
return -ENOSPC;
for (pivot = 0; pivot < link->num_rates; pivot++)
if (rate <= link->rates[pivot])
break;
if (pivot != link->num_rates && rate == link->rates[pivot])
return -EEXIST;
for (i = link->num_rates; i > pivot; i--)
link->rates[i] = link->rates[i - 1];
link->rates[pivot] = rate;
link->num_rates++;
return 0;
}
/**
* drm_dp_link_remove_rate() - remove a rate from the list of supported rates
* @link: the link from which to remove the rate
* @rate: the rate to remove
*
* Removes a link rate from the list of supported link rates.
*
* Returns:
* 0 on success or one of the following negative error codes on failure:
* - EINVAL if the specified rate is not among the supported rates
*
* See also:
* drm_dp_link_add_rate()
*/
int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate)
{
unsigned int i;
for (i = 0; i < link->num_rates; i++)
if (rate == link->rates[i])
break;
if (i == link->num_rates)
return -EINVAL;
link->num_rates--;
while (i < link->num_rates) {
link->rates[i] = link->rates[i + 1];
i++;
}
return 0;
}
/**
* drm_dp_link_update_rates() - normalize the supported link rates array
* @link: the link for which to normalize the supported link rates
*
* Users should call this function after they've manually modified the array
* of supported link rates. This function removes any stale entries, compacts
* the array and updates the supported link rate count. Note that calling the
* drm_dp_link_remove_rate() function already does this janitorial work.
*
* See also:
* drm_dp_link_add_rate(), drm_dp_link_remove_rate()
*/
void drm_dp_link_update_rates(struct drm_dp_link *link)
{
unsigned int i, count = 0;
for (i = 0; i < link->num_rates; i++) {
if (link->rates[i] != 0)
link->rates[count++] = link->rates[i];
}
for (i = count; i < link->num_rates; i++)
link->rates[i] = 0;
link->num_rates = count;
}
/**
* drm_dp_link_probe() - probe a DisplayPort link for capabilities
* @aux: DisplayPort AUX channel
......@@ -21,21 +169,88 @@
*/
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 values[3];
u8 dpcd[DP_RECEIVER_CAP_SIZE], value;
unsigned int rd_interval;
int err;
memset(link, 0, sizeof(*link));
drm_dp_link_reset(link);
err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values));
err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd));
if (err < 0)
return err;
link->revision = values[0];
link->rate = drm_dp_bw_code_to_link_rate(values[1]);
link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK;
link->revision = dpcd[DP_DPCD_REV];
link->max_rate = drm_dp_max_link_rate(dpcd);
link->max_lanes = drm_dp_max_lane_count(dpcd);
link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd);
link->caps.tps3_supported = drm_dp_tps3_supported(dpcd);
link->caps.fast_training = drm_dp_fast_training_cap(dpcd);
link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd);
if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
link->caps.alternate_scrambler_reset = true;
err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value);
if (err < 0)
return err;
if (value >= ARRAY_SIZE(drm_dp_edp_revisions))
DRM_ERROR("unsupported eDP version: %02x\n", value);
else
link->edp = drm_dp_edp_revisions[value];
}
/*
* The DPCD stores the AUX read interval in units of 4 ms. There are
* two special cases:
*
* 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery
* and channel equalization should use 100 us or 400 us AUX read
* intervals, respectively
*
* 2) for DP v1.4 and above, clock recovery should always use 100 us
* AUX read intervals
*/
rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4) {
DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n",
rd_interval);
rd_interval = 4;
}
rd_interval *= 4 * USEC_PER_MSEC;
if (values[2] & DP_ENHANCED_FRAME_CAP)
link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14)
link->aux_rd_interval.cr = 100;
if (rd_interval == 0)
link->aux_rd_interval.ce = 400;
link->rate = link->max_rate;
link->lanes = link->max_lanes;
/* Parse SUPPORTED_LINK_RATES from eDP 1.4 */
if (link->edp >= 0x14) {
u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2];
unsigned int i;
u16 rate;
err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES,
supported_rates,
sizeof(supported_rates));
if (err < 0)
return err;
for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) {
rate = supported_rates[i * 2 + 1] << 8 |
supported_rates[i * 2 + 0];
drm_dp_link_add_rate(link, rate * 200);
}
}
return 0;
}
......@@ -116,18 +331,546 @@ int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
*/
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 values[2];
u8 values[2], value;
int err;
if (link->ops && link->ops->configure) {
err = link->ops->configure(link);
if (err < 0) {
DRM_ERROR("failed to configure DP link: %d\n", err);
return err;
}
}
values[0] = drm_dp_link_rate_to_bw_code(link->rate);
values[1] = link->num_lanes;
values[1] = link->lanes;
if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
if (link->caps.enhanced_framing)
values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
if (err < 0)
return err;
if (link->caps.channel_coding)
value = DP_SET_ANSI_8B10B;
else
value = 0;
err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value);
if (err < 0)
return err;
if (link->caps.alternate_scrambler_reset) {
err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET,
DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
if (err < 0)
return err;
}
return 0;
}
/**
* drm_dp_link_choose() - choose the lowest possible configuration for a mode
* @link: DRM DP link object
* @mode: DRM display mode
* @info: DRM display information
*
* According to the eDP specification, a source should select a configuration
* with the lowest number of lanes and the lowest possible link rate that can
* match the bitrate requirements of a video mode. However it must ensure not
* to exceed the capabilities of the sink.
*
* Returns: 0 on success or a negative error code on failure.
*/
int drm_dp_link_choose(struct drm_dp_link *link,
const struct drm_display_mode *mode,
const struct drm_display_info *info)
{
/* available link symbol clock rates */
static const unsigned int rates[3] = { 162000, 270000, 540000 };
/* available number of lanes */
static const unsigned int lanes[3] = { 1, 2, 4 };
unsigned long requirement, capacity;
unsigned int rate = link->max_rate;
unsigned int i, j;
/* bandwidth requirement */
requirement = mode->clock * info->bpc * 3;
for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) {
for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) {
/*
* Capacity for this combination of lanes and rate,
* factoring in the ANSI 8B/10B encoding.
*
* Link rates in the DRM DP helpers are really link
* symbol frequencies, so a tenth of the actual rate
* of the link.
*/
capacity = lanes[i] * (rates[j] * 10) * 8 / 10;
if (capacity >= requirement) {
DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n",
lanes[i], rates[j], requirement,
capacity);
link->lanes = lanes[i];
link->rate = rates[j];
return 0;
}
}
}
return -ERANGE;
}
/**
* DOC: Link training
*
* These functions contain common logic and helpers to implement DisplayPort
* link training.
*/
/**
* drm_dp_link_train_init() - initialize DisplayPort link training state
* @train: DisplayPort link training state
*/
void drm_dp_link_train_init(struct drm_dp_link_train *train)
{
struct drm_dp_link_train_set *request = &train->request;
struct drm_dp_link_train_set *adjust = &train->adjust;
unsigned int i;
for (i = 0; i < 4; i++) {
request->voltage_swing[i] = 0;
adjust->voltage_swing[i] = 0;
request->pre_emphasis[i] = 0;
adjust->pre_emphasis[i] = 0;
request->post_cursor[i] = 0;
adjust->post_cursor[i] = 0;
}
train->pattern = DP_TRAINING_PATTERN_DISABLE;
train->clock_recovered = false;
train->channel_equalized = false;
}
static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train)
{
return train->clock_recovered && train->channel_equalized;
}
static int drm_dp_link_apply_training(struct drm_dp_link *link)
{
struct drm_dp_link_train_set *request = &link->train.request;
unsigned int lanes = link->lanes, *vs, *pe, *pc, i;
struct drm_dp_aux *aux = link->aux;
u8 values[4], pattern = 0;
int err;
err = link->ops->apply_training(link);
if (err < 0) {
DRM_ERROR("failed to apply link training: %d\n", err);
return err;
}
vs = request->voltage_swing;
pe = request->pre_emphasis;
pc = request->post_cursor;
/* write currently selected voltage-swing and pre-emphasis levels */
for (i = 0; i < lanes; i++)
values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) |
DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]);
err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes);
if (err < 0) {
DRM_ERROR("failed to set training parameters: %d\n", err);
return err;
}
/* write currently selected post-cursor level (if supported) */
if (link->revision >= 0x12 && link->rate == 540000) {
values[0] = values[1] = 0;
for (i = 0; i < lanes; i++)
values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]);
err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values,
DIV_ROUND_UP(lanes, 2));
if (err < 0) {
DRM_ERROR("failed to set post-cursor: %d\n", err);
return err;
}
}
/* write link pattern */
if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE)
pattern |= DP_LINK_SCRAMBLING_DISABLE;
pattern |= link->train.pattern;
err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
if (err < 0) {
DRM_ERROR("failed to set training pattern: %d\n", err);
return err;
}
return 0;
}
static void drm_dp_link_train_wait(struct drm_dp_link *link)
{
unsigned long min = 0;
switch (link->train.pattern) {
case DP_TRAINING_PATTERN_1:
min = link->aux_rd_interval.cr;
break;
case DP_TRAINING_PATTERN_2:
case DP_TRAINING_PATTERN_3:
min = link->aux_rd_interval.ce;
break;
default:
break;
}
if (min > 0)
usleep_range(min, 2 * min);
}
static void drm_dp_link_get_adjustments(struct drm_dp_link *link,
u8 status[DP_LINK_STATUS_SIZE])
{
struct drm_dp_link_train_set *adjust = &link->train.adjust;
unsigned int i;
for (i = 0; i < link->lanes; i++) {
adjust->voltage_swing[i] =
drm_dp_get_adjust_request_voltage(status, i) >>
DP_TRAIN_VOLTAGE_SWING_SHIFT;
adjust->pre_emphasis[i] =
drm_dp_get_adjust_request_pre_emphasis(status, i) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT;
adjust->post_cursor[i] =
drm_dp_get_adjust_request_post_cursor(status, i);
}
}
static void drm_dp_link_train_adjust(struct drm_dp_link_train *train)
{
struct drm_dp_link_train_set *request = &train->request;
struct drm_dp_link_train_set *adjust = &train->adjust;
unsigned int i;
for (i = 0; i < 4; i++)
if (request->voltage_swing[i] != adjust->voltage_swing[i])
request->voltage_swing[i] = adjust->voltage_swing[i];
for (i = 0; i < 4; i++)
if (request->pre_emphasis[i] != adjust->pre_emphasis[i])
request->pre_emphasis[i] = adjust->pre_emphasis[i];
for (i = 0; i < 4; i++)
if (request->post_cursor[i] != adjust->post_cursor[i])
request->post_cursor[i] = adjust->post_cursor[i];
}
static int drm_dp_link_recover_clock(struct drm_dp_link *link)
{
u8 status[DP_LINK_STATUS_SIZE];
int err;
err = drm_dp_link_apply_training(link);
if (err < 0)
return err;
drm_dp_link_train_wait(link);
err = drm_dp_dpcd_read_link_status(link->aux, status);
if (err < 0) {
DRM_ERROR("failed to read link status: %d\n", err);
return err;
}
if (!drm_dp_clock_recovery_ok(status, link->lanes))
drm_dp_link_get_adjustments(link, status);
else
link->train.clock_recovered = true;
return 0;
}
static int drm_dp_link_clock_recovery(struct drm_dp_link *link)
{
unsigned int repeat;
int err;
/* start clock recovery using training pattern 1 */
link->train.pattern = DP_TRAINING_PATTERN_1;
for (repeat = 1; repeat < 5; repeat++) {
err = drm_dp_link_recover_clock(link);
if (err < 0) {
DRM_ERROR("failed to recover clock: %d\n", err);
return err;
}
if (link->train.clock_recovered)
break;
drm_dp_link_train_adjust(&link->train);
}
return 0;
}
static int drm_dp_link_equalize_channel(struct drm_dp_link *link)
{
struct drm_dp_aux *aux = link->aux;
u8 status[DP_LINK_STATUS_SIZE];
int err;
err = drm_dp_link_apply_training(link);
if (err < 0)
return err;
drm_dp_link_train_wait(link);
err = drm_dp_dpcd_read_link_status(aux, status);
if (err < 0) {
DRM_ERROR("failed to read link status: %d\n", err);
return err;
}
if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
DRM_ERROR("clock recovery lost while equalizing channel\n");
link->train.clock_recovered = false;
return 0;
}
if (!drm_dp_channel_eq_ok(status, link->lanes))
drm_dp_link_get_adjustments(link, status);
else
link->train.channel_equalized = true;
return 0;
}
static int drm_dp_link_channel_equalization(struct drm_dp_link *link)
{
unsigned int repeat;
int err;
/* start channel equalization using pattern 2 or 3 */
if (link->caps.tps3_supported)
link->train.pattern = DP_TRAINING_PATTERN_3;
else
link->train.pattern = DP_TRAINING_PATTERN_2;
for (repeat = 1; repeat < 5; repeat++) {
err = drm_dp_link_equalize_channel(link);
if (err < 0) {
DRM_ERROR("failed to equalize channel: %d\n", err);
return err;
}
if (link->train.channel_equalized)
break;
drm_dp_link_train_adjust(&link->train);
}
return 0;
}
static int drm_dp_link_downgrade(struct drm_dp_link *link)
{
switch (link->rate) {
case 162000:
return -EINVAL;
case 270000:
link->rate = 162000;
break;
case 540000:
link->rate = 270000;
return 0;
}
return 0;
}
static void drm_dp_link_train_disable(struct drm_dp_link *link)
{
int err;
link->train.pattern = DP_TRAINING_PATTERN_DISABLE;
err = drm_dp_link_apply_training(link);
if (err < 0)
DRM_ERROR("failed to disable link training: %d\n", err);
}
static int drm_dp_link_train_full(struct drm_dp_link *link)
{
int err;
retry:
DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n",
link->lanes, (link->lanes > 1) ? "s" : "",
link->rate / 100);
err = drm_dp_link_configure(link->aux, link);
if (err < 0) {
DRM_ERROR("failed to configure DP link: %d\n", err);
return err;
}
err = drm_dp_link_clock_recovery(link);
if (err < 0) {
DRM_ERROR("clock recovery failed: %d\n", err);
goto out;
}
if (!link->train.clock_recovered) {
DRM_ERROR("clock recovery failed, downgrading link\n");
err = drm_dp_link_downgrade(link);
if (err < 0)
goto out;
goto retry;
}
DRM_DEBUG_KMS("clock recovery succeeded\n");
err = drm_dp_link_channel_equalization(link);
if (err < 0) {
DRM_ERROR("channel equalization failed: %d\n", err);
goto out;
}
if (!link->train.channel_equalized) {
DRM_ERROR("channel equalization failed, downgrading link\n");
err = drm_dp_link_downgrade(link);
if (err < 0)
goto out;
goto retry;
}
DRM_DEBUG_KMS("channel equalization succeeded\n");
out:
drm_dp_link_train_disable(link);
return err;
}
static int drm_dp_link_train_fast(struct drm_dp_link *link)
{
u8 status[DP_LINK_STATUS_SIZE];
int err;
DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n",
link->lanes, (link->lanes > 1) ? "s" : "",
link->rate / 100);
err = drm_dp_link_configure(link->aux, link);
if (err < 0) {
DRM_ERROR("failed to configure DP link: %d\n", err);
return err;
}
/* transmit training pattern 1 for 500 microseconds */
link->train.pattern = DP_TRAINING_PATTERN_1;
err = drm_dp_link_apply_training(link);
if (err < 0)
goto out;
usleep_range(500, 1000);
/* transmit training pattern 2 or 3 for 500 microseconds */
if (link->caps.tps3_supported)
link->train.pattern = DP_TRAINING_PATTERN_3;
else
link->train.pattern = DP_TRAINING_PATTERN_2;
err = drm_dp_link_apply_training(link);
if (err < 0)
goto out;
usleep_range(500, 1000);
err = drm_dp_dpcd_read_link_status(link->aux, status);
if (err < 0) {
DRM_ERROR("failed to read link status: %d\n", err);
goto out;
}
if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
DRM_ERROR("clock recovery failed\n");
err = -EIO;
}
if (!drm_dp_channel_eq_ok(status, link->lanes)) {
DRM_ERROR("channel equalization failed\n");
err = -EIO;
}
out:
drm_dp_link_train_disable(link);
return err;
}
/**
* drm_dp_link_train() - perform DisplayPort link training
* @link: a DP link object
*
* Uses the context stored in the DP link object to perform link training. It
* is expected that drivers will call drm_dp_link_probe() to obtain the link
* capabilities before performing link training.
*
* If the sink supports fast link training (no AUX CH handshake) and valid
* training settings are available, this function will try to perform fast
* link training and fall back to full link training on failure.
*
* Returns: 0 on success or a negative error code on failure.
*/
int drm_dp_link_train(struct drm_dp_link *link)
{
int err;
drm_dp_link_train_init(&link->train);
if (link->caps.fast_training) {
if (drm_dp_link_train_valid(&link->train)) {
err = drm_dp_link_train_fast(link);
if (err < 0)
DRM_ERROR("fast link training failed: %d\n",
err);
else
return 0;
} else {
DRM_DEBUG_KMS("training parameters not available\n");
}
} else {
DRM_DEBUG_KMS("fast link training not supported\n");
}
err = drm_dp_link_train_full(link);
if (err < 0)
DRM_ERROR("full link training failed: %d\n", err);
return err;
}
......@@ -7,20 +7,171 @@
#ifndef DRM_TEGRA_DP_H
#define DRM_TEGRA_DP_H 1
#include <linux/types.h>
struct drm_display_info;
struct drm_display_mode;
struct drm_dp_aux;
struct drm_dp_link;
/**
* struct drm_dp_link_caps - DP link capabilities
*/
struct drm_dp_link_caps {
/**
* @enhanced_framing:
*
* enhanced framing capability (mandatory as of DP 1.2)
*/
bool enhanced_framing;
/**
* tps3_supported:
*
* training pattern sequence 3 supported for equalization
*/
bool tps3_supported;
/**
* @fast_training:
*
* AUX CH handshake not required for link training
*/
bool fast_training;
/**
* @channel_coding:
*
* ANSI 8B/10B channel coding capability
*/
bool channel_coding;
/**
* @alternate_scrambler_reset:
*
* eDP alternate scrambler reset capability
*/
bool alternate_scrambler_reset;
};
void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest,
const struct drm_dp_link_caps *src);
/**
* struct drm_dp_link_ops - DP link operations
*/
struct drm_dp_link_ops {
/**
* @apply_training:
*/
int (*apply_training)(struct drm_dp_link *link);
/**
* @configure:
*/
int (*configure)(struct drm_dp_link *link);
};
#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0)
#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3)
#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2))
/**
* struct drm_dp_link_train_set - link training settings
* @voltage_swing: per-lane voltage swing
* @pre_emphasis: per-lane pre-emphasis
* @post_cursor: per-lane post-cursor
*/
struct drm_dp_link_train_set {
unsigned int voltage_swing[4];
unsigned int pre_emphasis[4];
unsigned int post_cursor[4];
};
/**
* struct drm_dp_link_train - link training state information
* @request: currently requested settings
* @adjust: adjustments requested by sink
* @pattern: currently requested training pattern
* @clock_recovered: flag to track if clock recovery has completed
* @channel_equalized: flag to track if channel equalization has completed
*/
struct drm_dp_link_train {
struct drm_dp_link_train_set request;
struct drm_dp_link_train_set adjust;
unsigned int pattern;
#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
bool clock_recovered;
bool channel_equalized;
};
/**
* struct drm_dp_link - DP link capabilities and configuration
* @revision: DP specification revision supported on the link
* @max_rate: maximum clock rate supported on the link
* @max_lanes: maximum number of lanes supported on the link
* @caps: capabilities supported on the link (see &drm_dp_link_caps)
* @aux_rd_interval: AUX read interval to use for training (in microseconds)
* @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...)
* @rate: currently configured link rate
* @lanes: currently configured number of lanes
* @rates: additional supported link rates in kHz (eDP 1.4)
* @num_rates: number of additional supported link rates (eDP 1.4)
*/
struct drm_dp_link {
unsigned char revision;
unsigned int max_rate;
unsigned int max_lanes;
struct drm_dp_link_caps caps;
/**
* @cr: clock recovery read interval
* @ce: channel equalization read interval
*/
struct {
unsigned int cr;
unsigned int ce;
} aux_rd_interval;
unsigned char edp;
unsigned int rate;
unsigned int num_lanes;
unsigned long capabilities;
unsigned int lanes;
unsigned long rates[DP_MAX_SUPPORTED_RATES];
unsigned int num_rates;
/**
* @ops: DP link operations
*/
const struct drm_dp_link_ops *ops;
/**
* @aux: DP AUX channel
*/
struct drm_dp_aux *aux;
/**
* @train: DP link training state
*/
struct drm_dp_link_train train;
};
int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate);
int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate);
void drm_dp_link_update_rates(struct drm_dp_link *link);
int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_link_choose(struct drm_dp_link *link,
const struct drm_display_mode *mode,
const struct drm_display_info *info);
void drm_dp_link_train_init(struct drm_dp_link_train *train);
int drm_dp_link_train(struct drm_dp_link *link);
#endif
......@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
......@@ -30,10 +31,18 @@
static DEFINE_MUTEX(dpaux_lock);
static LIST_HEAD(dpaux_list);
struct tegra_dpaux_soc {
unsigned int cmh;
unsigned int drvz;
unsigned int drvi;
};
struct tegra_dpaux {
struct drm_dp_aux aux;
struct device *dev;
const struct tegra_dpaux_soc *soc;
void __iomem *regs;
int irq;
......@@ -121,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
struct tegra_dpaux *dpaux = to_dpaux(aux);
unsigned long status;
ssize_t ret = 0;
u8 reply = 0;
u32 value;
/* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */
......@@ -215,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) {
case 0x00:
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
reply = DP_AUX_NATIVE_REPLY_ACK;
break;
case 0x01:
msg->reply = DP_AUX_NATIVE_REPLY_NACK;
reply = DP_AUX_NATIVE_REPLY_NACK;
break;
case 0x02:
msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
reply = DP_AUX_NATIVE_REPLY_DEFER;
break;
case 0x04:
msg->reply = DP_AUX_I2C_REPLY_NACK;
reply = DP_AUX_I2C_REPLY_NACK;
break;
case 0x08:
msg->reply = DP_AUX_I2C_REPLY_DEFER;
reply = DP_AUX_I2C_REPLY_DEFER;
break;
}
......@@ -239,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux,
if (msg->request & DP_AUX_I2C_READ) {
size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK;
if (WARN_ON(count != msg->size))
count = min_t(size_t, count, msg->size);
/*
* There might be a smarter way to do this, but since
* the DP helpers will already retry transactions for
* an -EBUSY return value, simply reuse that instead.
*/
if (count != msg->size) {
ret = -EBUSY;
goto out;
}
tegra_dpaux_read_fifo(dpaux, msg->buffer, count);
ret = count;
}
}
msg->reply = reply;
out:
return ret;
}
......@@ -311,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
switch (function) {
case DPAUX_PADCTL_FUNC_AUX:
value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
DPAUX_HYBRID_PADCTL_MODE_AUX;
break;
......@@ -321,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
case DPAUX_PADCTL_FUNC_I2C:
value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) |
DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) |
DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) |
DPAUX_HYBRID_PADCTL_MODE_I2C;
break;
......@@ -437,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
if (!dpaux)
return -ENOMEM;
dpaux->soc = of_device_get_match_data(&pdev->dev);
INIT_WORK(&dpaux->work, tegra_dpaux_hotplug);
init_completion(&dpaux->complete);
INIT_LIST_HEAD(&dpaux->list);
......@@ -494,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
return PTR_ERR(dpaux->vdd);
}
dpaux->vdd = NULL;
}
platform_set_drvdata(pdev, dpaux);
......@@ -642,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
};
static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
.cmh = 0x02,
.drvz = 0x04,
.drvi = 0x18,
};
static const struct tegra_dpaux_soc tegra210_dpaux_soc = {
.cmh = 0x02,
.drvz = 0x04,
.drvi = 0x30,
};
static const struct tegra_dpaux_soc tegra194_dpaux_soc = {
.cmh = 0x02,
.drvz = 0x04,
.drvi = 0x2c,
};
static const struct of_device_id tegra_dpaux_of_match[] = {
{ .compatible = "nvidia,tegra194-dpaux", },
{ .compatible = "nvidia,tegra186-dpaux", },
{ .compatible = "nvidia,tegra210-dpaux", },
{ .compatible = "nvidia,tegra124-dpaux", },
{ .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc },
{ .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc },
{ .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc },
{ .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match);
......@@ -687,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
output->connector.polled = DRM_CONNECTOR_POLL_HPD;
dpaux->output = output;
err = regulator_enable(dpaux->vdd);
if (err < 0)
return err;
if (output->panel) {
enum drm_connector_status status;
timeout = jiffies + msecs_to_jiffies(250);
if (dpaux->vdd) {
err = regulator_enable(dpaux->vdd);
if (err < 0)
return err;
}
while (time_before(jiffies, timeout)) {
enum drm_connector_status status;
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
status = drm_dp_aux_detect(aux);
if (status == connector_status_connected)
break;
status = drm_dp_aux_detect(aux);
if (status == connector_status_connected) {
enable_irq(dpaux->irq);
return 0;
usleep_range(1000, 2000);
}
usleep_range(1000, 2000);
if (status != connector_status_connected)
return -ETIMEDOUT;
}
return -ETIMEDOUT;
enable_irq(dpaux->irq);
return 0;
}
int drm_dp_aux_detach(struct drm_dp_aux *aux)
......@@ -716,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux)
disable_irq(dpaux->irq);
err = regulator_disable(dpaux->vdd);
if (err < 0)
return err;
if (dpaux->output->panel) {
enum drm_connector_status status;
timeout = jiffies + msecs_to_jiffies(250);
if (dpaux->vdd) {
err = regulator_disable(dpaux->vdd);
if (err < 0)
return err;
}
while (time_before(jiffies, timeout)) {
enum drm_connector_status status;
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
status = drm_dp_aux_detect(aux);
if (status == connector_status_disconnected)
break;
status = drm_dp_aux_detect(aux);
if (status == connector_status_disconnected) {
dpaux->output = NULL;
return 0;
usleep_range(1000, 2000);
}
usleep_range(1000, 2000);
if (status != connector_status_disconnected)
return -ETIMEDOUT;
dpaux->output = NULL;
}
return -ETIMEDOUT;
return 0;
}
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
......@@ -765,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux)
return 0;
}
int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding)
{
int err;
err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
encoding);
if (err < 0)
return err;
return 0;
}
int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
u8 pattern)
{
u8 tp = pattern & DP_TRAINING_PATTERN_MASK;
u8 status[DP_LINK_STATUS_SIZE], values[4];
unsigned int i;
int err;
err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern);
if (err < 0)
return err;
if (tp == DP_TRAINING_PATTERN_DISABLE)
return 0;
for (i = 0; i < link->num_lanes; i++)
values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED |
DP_TRAIN_PRE_EMPH_LEVEL_0 |
DP_TRAIN_MAX_SWING_REACHED |
DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values,
link->num_lanes);
if (err < 0)
return err;
usleep_range(500, 1000);
err = drm_dp_dpcd_read_link_status(aux, status);
if (err < 0)
return err;
switch (tp) {
case DP_TRAINING_PATTERN_1:
if (!drm_dp_clock_recovery_ok(status, link->num_lanes))
return -EAGAIN;
break;
case DP_TRAINING_PATTERN_2:
if (!drm_dp_channel_eq_ok(status, link->num_lanes))
return -EAGAIN;
break;
default:
dev_err(aux->dev, "unsupported training pattern %u\n", tp);
return -EINVAL;
}
err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0);
if (err < 0)
return err;
return 0;
}
......@@ -20,10 +20,6 @@
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
#include <asm/dma-iommu.h>
#endif
#include "drm.h"
#include "gem.h"
......@@ -86,168 +82,6 @@ tegra_drm_mode_config_helpers = {
.atomic_commit_tail = tegra_atomic_commit_tail,
};
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
{
struct host1x_device *device = to_host1x_device(drm->dev);
struct tegra_drm *tegra;
int err;
tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
if (iommu_present(&platform_bus_type)) {
tegra->domain = iommu_domain_alloc(&platform_bus_type);
if (!tegra->domain) {
err = -ENOMEM;
goto free;
}
err = iova_cache_get();
if (err < 0)
goto domain;
}
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
drm->dev_private = tegra;
tegra->drm = drm;
drm_mode_config_init(drm);
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.normalize_zpos = true;
drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
err = tegra_drm_fb_prepare(drm);
if (err < 0)
goto config;
drm_kms_helper_poll_init(drm);
err = host1x_device_init(device);
if (err < 0)
goto fbdev;
if (tegra->domain) {
u64 carveout_start, carveout_end, gem_start, gem_end;
u64 dma_mask = dma_get_mask(&device->dev);
dma_addr_t start, end;
unsigned long order;
start = tegra->domain->geometry.aperture_start & dma_mask;
end = tegra->domain->geometry.aperture_end & dma_mask;
gem_start = start;
gem_end = end - CARVEOUT_SZ;
carveout_start = gem_end + 1;
carveout_end = end;
order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order,
carveout_start >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
mutex_init(&tegra->mm_lock);
DRM_DEBUG("IOMMU apertures:\n");
DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
carveout_end);
}
if (tegra->hub) {
err = tegra_display_hub_prepare(tegra->hub);
if (err < 0)
goto device;
}
/*
* We don't use the drm_irq_install() helpers provided by the DRM
* core, so we need to set this manually in order to allow the
* DRM_IOCTL_WAIT_VBLANK to operate correctly.
*/
drm->irq_enabled = true;
/* syncpoints are used for full 32-bit hardware VBLANK counters */
drm->max_vblank_count = 0xffffffff;
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
goto hub;
drm_mode_config_reset(drm);
err = tegra_drm_fb_init(drm);
if (err < 0)
goto hub;
return 0;
hub:
if (tegra->hub)
tegra_display_hub_cleanup(tegra->hub);
device:
host1x_device_exit(device);
fbdev:
drm_kms_helper_poll_fini(drm);
tegra_drm_fb_free(drm);
config:
drm_mode_config_cleanup(drm);
if (tegra->domain) {
mutex_destroy(&tegra->mm_lock);
drm_mm_takedown(&tegra->mm);
put_iova_domain(&tegra->carveout.domain);
iova_cache_put();
}
domain:
if (tegra->domain)
iommu_domain_free(tegra->domain);
free:
kfree(tegra);
return err;
}
static void tegra_drm_unload(struct drm_device *drm)
{
struct host1x_device *device = to_host1x_device(drm->dev);
struct tegra_drm *tegra = drm->dev_private;
int err;
drm_kms_helper_poll_fini(drm);
tegra_drm_fb_exit(drm);
drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
err = host1x_device_exit(device);
if (err < 0)
return;
if (tegra->domain) {
mutex_destroy(&tegra->mm_lock);
drm_mm_takedown(&tegra->mm);
put_iova_domain(&tegra->carveout.domain);
iova_cache_put();
iommu_domain_free(tegra->domain);
}
kfree(tegra);
}
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
{
struct tegra_drm_file *fpriv;
......@@ -311,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
if (err < 0)
return err;
dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
......@@ -1014,8 +850,6 @@ static int tegra_debugfs_init(struct drm_minor *minor)
static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC | DRIVER_RENDER,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
.postclose = tegra_drm_postclose,
.lastclose = drm_fb_helper_lastclose,
......@@ -1068,57 +902,63 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra,
return 0;
}
struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
bool shared)
int host1x_client_iommu_attach(struct host1x_client *client)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
/*
* If the host1x client is already attached to an IOMMU domain that is
* not the shared IOMMU domain, don't try to attach it to a different
* domain. This allows using the IOMMU-backed DMA API.
*/
if (domain && domain != tegra->domain)
return 0;
if (tegra->domain) {
group = iommu_group_get(client->dev);
if (!group) {
dev_err(client->dev, "failed to get IOMMU group\n");
return ERR_PTR(-ENODEV);
return -ENODEV;
}
if (!shared || (shared && (group != tegra->group))) {
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
if (client->dev->archdata.mapping) {
struct dma_iommu_mapping *mapping =
to_dma_iommu_mapping(client->dev);
arm_iommu_detach_device(client->dev);
arm_iommu_release_mapping(mapping);
}
#endif
if (domain != tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
return ERR_PTR(err);
return err;
}
if (shared && !tegra->group)
tegra->group = group;
}
tegra->use_explicit_iommu = true;
}
return group;
client->group = group;
return 0;
}
void host1x_client_iommu_detach(struct host1x_client *client,
struct iommu_group *group)
void host1x_client_iommu_detach(struct host1x_client *client)
{
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_domain *domain;
if (group) {
if (group == tegra->group) {
iommu_detach_group(tegra->domain, group);
tegra->group = NULL;
}
if (client->group) {
/*
* Devices that are part of the same group may no longer be
* attached to a domain at this point because their group may
* have been detached by an earlier client.
*/
domain = iommu_get_domain_for_dev(client->dev);
if (domain)
iommu_detach_group(tegra->domain, client->group);
iommu_group_put(group);
iommu_group_put(client->group);
client->group = NULL;
}
}
......@@ -1202,6 +1042,8 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
static int host1x_drm_probe(struct host1x_device *dev)
{
struct drm_driver *driver = &tegra_drm_driver;
struct iommu_domain *domain;
struct tegra_drm *tegra;
struct drm_device *drm;
int err;
......@@ -1209,18 +1051,180 @@ static int host1x_drm_probe(struct host1x_device *dev)
if (IS_ERR(drm))
return PTR_ERR(drm);
tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
if (!tegra) {
err = -ENOMEM;
goto put;
}
/*
* If the Tegra DRM clients are backed by an IOMMU, push buffers are
* likely to be allocated beyond the 32-bit boundary if sufficient
* system memory is available. This is problematic on earlier Tegra
* generations where host1x supports a maximum of 32 address bits in
* the GATHER opcode. In this case, unless host1x is behind an IOMMU
* as well it won't be able to process buffers allocated beyond the
* 32-bit boundary.
*
* The DMA API will use bounce buffers in this case, so that could
* perhaps still be made to work, even if less efficient, but there
* is another catch: in order to perform cache maintenance on pages
* allocated for discontiguous buffers we need to map and unmap the
* SG table representing these buffers. This is fine for something
* small like a push buffer, but it exhausts the bounce buffer pool
* (typically on the order of a few MiB) for framebuffers (many MiB
* for any modern resolution).
*
* Work around this by making sure that Tegra DRM clients only use
* an IOMMU if the parent host1x also uses an IOMMU.
*
* Note that there's still a small gap here that we don't cover: if
* the DMA API is backed by an IOMMU there's no way to control which
* device is attached to an IOMMU and which isn't, except via wiring
* up the device tree appropriately. This is considered an problem
* of integration, so care must be taken for the DT to be consistent.
*/
domain = iommu_get_domain_for_dev(drm->dev->parent);
if (domain && iommu_present(&platform_bus_type)) {
tegra->domain = iommu_domain_alloc(&platform_bus_type);
if (!tegra->domain) {
err = -ENOMEM;
goto free;
}
err = iova_cache_get();
if (err < 0)
goto domain;
}
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
dev_set_drvdata(&dev->dev, drm);
drm->dev_private = tegra;
tegra->drm = drm;
drm_mode_config_init(drm);
err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.normalize_zpos = true;
drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
drm->mode_config.helper_private = &tegra_drm_mode_config_helpers;
err = tegra_drm_fb_prepare(drm);
if (err < 0)
goto put;
goto config;
drm_kms_helper_poll_init(drm);
err = host1x_device_init(dev);
if (err < 0)
goto fbdev;
if (tegra->use_explicit_iommu) {
u64 carveout_start, carveout_end, gem_start, gem_end;
u64 dma_mask = dma_get_mask(&dev->dev);
dma_addr_t start, end;
unsigned long order;
start = tegra->domain->geometry.aperture_start & dma_mask;
end = tegra->domain->geometry.aperture_end & dma_mask;
gem_start = start;
gem_end = end - CARVEOUT_SZ;
carveout_start = gem_end + 1;
carveout_end = end;
order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order,
carveout_start >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
mutex_init(&tegra->mm_lock);
DRM_DEBUG_DRIVER("IOMMU apertures:\n");
DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end);
DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start,
carveout_end);
} else if (tegra->domain) {
iommu_domain_free(tegra->domain);
tegra->domain = NULL;
iova_cache_put();
}
if (tegra->hub) {
err = tegra_display_hub_prepare(tegra->hub);
if (err < 0)
goto device;
}
/*
* We don't use the drm_irq_install() helpers provided by the DRM
* core, so we need to set this manually in order to allow the
* DRM_IOCTL_WAIT_VBLANK to operate correctly.
*/
drm->irq_enabled = true;
/* syncpoints are used for full 32-bit hardware VBLANK counters */
drm->max_vblank_count = 0xffffffff;
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
if (err < 0)
goto hub;
drm_mode_config_reset(drm);
err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb",
false);
if (err < 0)
goto hub;
err = tegra_drm_fb_init(drm);
if (err < 0)
goto hub;
err = drm_dev_register(drm, 0);
if (err < 0)
goto put;
goto fb;
return 0;
fb:
tegra_drm_fb_exit(drm);
hub:
if (tegra->hub)
tegra_display_hub_cleanup(tegra->hub);
device:
if (tegra->domain) {
mutex_destroy(&tegra->mm_lock);
drm_mm_takedown(&tegra->mm);
put_iova_domain(&tegra->carveout.domain);
iova_cache_put();
}
host1x_device_exit(dev);
fbdev:
drm_kms_helper_poll_fini(drm);
tegra_drm_fb_free(drm);
config:
drm_mode_config_cleanup(drm);
domain:
if (tegra->domain)
iommu_domain_free(tegra->domain);
free:
kfree(tegra);
put:
drm_dev_put(drm);
return err;
......@@ -1229,8 +1233,29 @@ static int host1x_drm_probe(struct host1x_device *dev)
static int host1x_drm_remove(struct host1x_device *dev)
{
struct drm_device *drm = dev_get_drvdata(&dev->dev);
struct tegra_drm *tegra = drm->dev_private;
int err;
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
tegra_drm_fb_exit(drm);
drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
err = host1x_device_exit(dev);
if (err < 0)
dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err);
if (tegra->domain) {
mutex_destroy(&tegra->mm_lock);
drm_mm_takedown(&tegra->mm);
put_iova_domain(&tegra->carveout.domain);
iova_cache_put();
iommu_domain_free(tegra->domain);
}
kfree(tegra);
drm_dev_put(drm);
return 0;
......
......@@ -36,7 +36,7 @@ struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
struct iommu_group *group;
bool use_explicit_iommu;
struct mutex mm_lock;
struct drm_mm mm;
......@@ -100,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client);
struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client,
bool shared);
void host1x_client_iommu_detach(struct host1x_client *client,
struct iommu_group *group);
int host1x_client_iommu_attach(struct host1x_client *client);
void host1x_client_iommu_detach(struct host1x_client *client);
int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm);
int tegra_drm_exit(struct tegra_drm *tegra);
......@@ -155,17 +153,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector);
void tegra_output_encoder_destroy(struct drm_encoder *encoder);
/* from dpaux.c */
struct drm_dp_link;
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output);
int drm_dp_aux_detach(struct drm_dp_aux *aux);
int drm_dp_aux_enable(struct drm_dp_aux *aux);
int drm_dp_aux_disable(struct drm_dp_aux *aux);
int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding);
int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link,
u8 pattern);
/* from fb.c */
struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer,
......
......@@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon,
static void falcon_copy_firmware_image(struct falcon *falcon,
const struct firmware *firmware)
{
u32 *firmware_vaddr = falcon->firmware.vaddr;
dma_addr_t daddr;
u32 *virt = falcon->firmware.virt;
size_t i;
int err;
/* copy the whole thing taking into account endianness */
for (i = 0; i < firmware->size / sizeof(u32); i++)
firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
/* ensure that caches are flushed and falcon can see the firmware */
daddr = dma_map_single(falcon->dev, firmware_vaddr,
falcon->firmware.size, DMA_TO_DEVICE);
err = dma_mapping_error(falcon->dev, daddr);
if (err) {
dev_err(falcon->dev, "failed to map firmware: %d\n", err);
return;
}
dma_sync_single_for_device(falcon->dev, daddr,
falcon->firmware.size, DMA_TO_DEVICE);
dma_unmap_single(falcon->dev, daddr, falcon->firmware.size,
DMA_TO_DEVICE);
virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]);
}
static int falcon_parse_firmware_image(struct falcon *falcon)
{
struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr;
struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt;
struct falcon_fw_os_header_v1 *os;
/* endian problems would show up right here */
......@@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon)
return -EINVAL;
}
os = falcon->firmware.vaddr + bin->os_header_offset;
os = falcon->firmware.virt + bin->os_header_offset;
falcon->firmware.bin_data.size = bin->os_size;
falcon->firmware.bin_data.offset = bin->os_data_offset;
......@@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name)
if (err < 0)
return err;
falcon->firmware.size = falcon->firmware.firmware->size;
return 0;
}
......@@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon)
const struct firmware *firmware = falcon->firmware.firmware;
int err;
falcon->firmware.size = firmware->size;
/* allocate iova space for the firmware */
falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size,
&falcon->firmware.paddr);
if (IS_ERR(falcon->firmware.vaddr)) {
dev_err(falcon->dev, "DMA memory mapping failed\n");
return PTR_ERR(falcon->firmware.vaddr);
}
/* copy firmware image into local area. this also ensures endianness */
falcon_copy_firmware_image(falcon, firmware);
......@@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon)
err = falcon_parse_firmware_image(falcon);
if (err < 0) {
dev_err(falcon->dev, "failed to parse firmware image\n");
goto err_setup_firmware_image;
return err;
}
release_firmware(firmware);
falcon->firmware.firmware = NULL;
return 0;
err_setup_firmware_image:
falcon->ops->free(falcon, falcon->firmware.size,
falcon->firmware.paddr, falcon->firmware.vaddr);
return err;
}
int falcon_init(struct falcon *falcon)
{
/* check mandatory ops */
if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free)
return -EINVAL;
falcon->firmware.vaddr = NULL;
falcon->firmware.virt = NULL;
return 0;
}
void falcon_exit(struct falcon *falcon)
{
if (falcon->firmware.firmware) {
if (falcon->firmware.firmware)
release_firmware(falcon->firmware.firmware);
falcon->firmware.firmware = NULL;
}
if (falcon->firmware.vaddr) {
falcon->ops->free(falcon, falcon->firmware.size,
falcon->firmware.paddr,
falcon->firmware.vaddr);
falcon->firmware.vaddr = NULL;
}
}
int falcon_boot(struct falcon *falcon)
......@@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon)
u32 value;
int err;
if (!falcon->firmware.vaddr)
if (!falcon->firmware.virt)
return -EINVAL;
err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value,
......@@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon)
falcon_writel(falcon, 0, FALCON_DMACTL);
/* setup the address of the binary data so Falcon can access it later */
falcon_writel(falcon, (falcon->firmware.paddr +
falcon_writel(falcon, (falcon->firmware.iova +
falcon->firmware.bin_data.offset) >> 8,
FALCON_DMATRFBASE);
......
......@@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 {
u32 data_size;
};
struct falcon;
struct falcon_ops {
void *(*alloc)(struct falcon *falcon, size_t size,
dma_addr_t *paddr);
void (*free)(struct falcon *falcon, size_t size,
dma_addr_t paddr, void *vaddr);
};
struct falcon_firmware_section {
unsigned long offset;
size_t size;
......@@ -93,8 +84,9 @@ struct falcon_firmware {
const struct firmware *firmware;
/* Raw firmware data */
dma_addr_t paddr;
void *vaddr;
dma_addr_t iova;
dma_addr_t phys;
void *virt;
size_t size;
/* Parsed firmware information */
......@@ -107,8 +99,6 @@ struct falcon {
/* Set by falcon client */
struct device *dev;
void __iomem *regs;
const struct falcon_ops *ops;
void *data;
struct falcon_firmware firmware;
};
......
......@@ -269,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
}
}
drm->mode_config.fb_base = (resource_size_t)bo->paddr;
drm->mode_config.fb_base = (resource_size_t)bo->iova;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->paddr + offset);
info->fix.smem_start = (unsigned long)(bo->iova + offset);
info->fix.smem_len = size;
return 0;
......
......@@ -27,17 +27,55 @@ static void tegra_bo_put(struct host1x_bo *bo)
drm_gem_object_put_unlocked(&obj->gem);
}
static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt)
static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
dma_addr_t *phys)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
struct sg_table *sgt;
int err;
/*
* If we've manually mapped the buffer object through the IOMMU, make
* sure to return the IOVA address of our mapping.
*/
if (phys && obj->mm) {
*phys = obj->iova;
return NULL;
}
/*
* If we don't have a mapping for this buffer yet, return an SG table
* so that host1x can do the mapping for us via the DMA API.
*/
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
*sgt = obj->sgt;
if (obj->pages) {
err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
0, obj->gem.size, GFP_KERNEL);
if (err < 0)
goto free;
} else {
err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
obj->gem.size);
if (err < 0)
goto free;
}
return obj->paddr;
return sgt;
free:
kfree(sgt);
return ERR_PTR(err);
}
static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
{
if (sgt) {
sg_free_table(sgt);
kfree(sgt);
}
}
static void *tegra_bo_mmap(struct host1x_bo *bo)
......@@ -133,9 +171,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
goto unlock;
}
bo->paddr = bo->mm->start;
bo->iova = bo->mm->start;
bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
bo->sgt->nents, prot);
if (!bo->size) {
dev_err(tegra->drm->dev, "failed to map buffer\n");
......@@ -161,7 +199,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
return 0;
mutex_lock(&tegra->mm_lock);
iommu_unmap(tegra->domain, bo->paddr, bo->size);
iommu_unmap(tegra->domain, bo->iova, bo->size);
drm_mm_remove_node(bo->mm);
mutex_unlock(&tegra->mm_lock);
......@@ -209,7 +247,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else if (bo->vaddr) {
dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
}
}
......@@ -264,7 +302,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
} else {
size_t size = bo->gem.size;
bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr,
bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
......@@ -365,7 +403,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
goto detach;
}
bo->paddr = sg_dma_address(bo->sgt->sgl);
bo->iova = sg_dma_address(bo->sgt->sgl);
}
bo->gem.import_attach = attach;
......@@ -461,7 +499,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr,
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
gem->size);
if (err < 0) {
drm_gem_vm_close(vma);
......@@ -508,25 +546,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
return NULL;
if (bo->pages) {
struct scatterlist *sg;
unsigned int i;
if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
goto free;
for_each_sg(sgt->sgl, sg, bo->num_pages, i)
sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
0, gem->size, GFP_KERNEL) < 0)
goto free;
} else {
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
gem->size) < 0)
goto free;
sg_dma_address(sgt->sgl) = bo->paddr;
sg_dma_len(sgt->sgl) = gem->size;
}
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
goto free;
return sgt;
free:
......
......@@ -31,7 +31,7 @@ struct tegra_bo {
struct host1x_bo base;
unsigned long flags;
struct sg_table *sgt;
dma_addr_t paddr;
dma_addr_t iova;
void *vaddr;
struct drm_mm_node *mm;
......
......@@ -17,7 +17,6 @@ struct gr2d_soc {
};
struct gr2d {
struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk;
......@@ -40,7 +39,7 @@ static int gr2d_init(struct host1x_client *client)
struct gr2d *gr2d = to_gr2d(drm);
int err;
gr2d->channel = host1x_channel_request(client->dev);
gr2d->channel = host1x_channel_request(client);
if (!gr2d->channel)
return -ENOMEM;
......@@ -51,9 +50,8 @@ static int gr2d_init(struct host1x_client *client)
goto put;
}
gr2d->group = host1x_client_iommu_attach(client, false);
if (IS_ERR(gr2d->group)) {
err = PTR_ERR(gr2d->group);
err = host1x_client_iommu_attach(client);
if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
......@@ -67,7 +65,7 @@ static int gr2d_init(struct host1x_client *client)
return 0;
detach:
host1x_client_iommu_detach(client, gr2d->group);
host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
......@@ -87,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client)
if (err < 0)
return err;
host1x_client_iommu_detach(client, gr2d->group);
host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr2d->channel);
......
......@@ -23,7 +23,6 @@ struct gr3d_soc {
};
struct gr3d {
struct iommu_group *group;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct clk *clk_secondary;
......@@ -49,7 +48,7 @@ static int gr3d_init(struct host1x_client *client)
struct gr3d *gr3d = to_gr3d(drm);
int err;
gr3d->channel = host1x_channel_request(client->dev);
gr3d->channel = host1x_channel_request(client);
if (!gr3d->channel)
return -ENOMEM;
......@@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client)
goto put;
}
gr3d->group = host1x_client_iommu_attach(client, false);
if (IS_ERR(gr3d->group)) {
err = PTR_ERR(gr3d->group);
err = host1x_client_iommu_attach(client);
if (err < 0) {
dev_err(client->dev, "failed to attach to domain: %d\n", err);
goto free;
}
......@@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client)
return 0;
detach:
host1x_client_iommu_detach(client, gr3d->group);
host1x_client_iommu_detach(client);
free:
host1x_syncpt_free(client->syncpts[0]);
put:
......@@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client)
if (err < 0)
return err;
host1x_client_iommu_detach(client, gr3d->group);
host1x_client_iommu_detach(client);
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(gr3d->channel);
......
......@@ -413,7 +413,6 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
unsigned int zpos = plane->state->normalized_zpos;
struct drm_framebuffer *fb = plane->state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_bo *bo;
dma_addr_t base;
u32 value;
......@@ -456,8 +455,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
/* disable compression */
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
bo = tegra_fb_get_plane(fb, 0);
base = bo->paddr;
base = state->iova[0] + fb->offsets[0];
tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
......@@ -521,6 +519,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
.prepare_fb = tegra_plane_prepare_fb,
.cleanup_fb = tegra_plane_cleanup_fb,
.atomic_check = tegra_shared_plane_atomic_check,
.atomic_update = tegra_shared_plane_atomic_update,
.atomic_disable = tegra_shared_plane_atomic_disable,
......
......@@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force)
void tegra_output_connector_destroy(struct drm_connector *connector)
{
struct tegra_output *output = connector_to_output(connector);
if (output->cec)
cec_notifier_conn_unregister(output->cec);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
......@@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output)
disable_irq(output->hpd_irq);
}
output->cec = cec_notifier_get(output->dev);
if (!output->cec)
return -ENOMEM;
return 0;
}
void tegra_output_remove(struct tegra_output *output)
{
if (output->cec)
cec_notifier_put(output->cec);
if (output->hpd_gpio)
free_irq(output->hpd_irq, output);
......@@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output)
int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
{
int connector_type;
int err;
if (output->panel) {
......@@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
if (output->hpd_gpio)
enable_irq(output->hpd_irq);
connector_type = output->connector.connector_type;
/*
* Create a CEC notifier for HDMI connector.
*/
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_HDMIB) {
struct cec_connector_info conn_info;
cec_fill_conn_info_from_drm(&conn_info, &output->connector);
output->cec = cec_notifier_conn_register(output->dev, NULL,
&conn_info);
if (!output->cec)
return -ENOMEM;
}
return 0;
}
......
......@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include "dc.h"
......@@ -23,6 +24,7 @@ static void tegra_plane_reset(struct drm_plane *plane)
{
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_plane_state *state;
unsigned int i;
if (plane->state)
__drm_atomic_helper_plane_destroy_state(plane->state);
......@@ -36,6 +38,9 @@ static void tegra_plane_reset(struct drm_plane *plane)
plane->state->plane = plane;
plane->state->zpos = p->index;
plane->state->normalized_zpos = p->index;
for (i = 0; i < 3; i++)
state->iova[i] = DMA_MAPPING_ERROR;
}
}
......@@ -60,6 +65,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane)
for (i = 0; i < 2; i++)
copy->blending[i] = state->blending[i];
for (i = 0; i < 3; i++) {
copy->iova[i] = DMA_MAPPING_ERROR;
copy->sgt[i] = NULL;
}
return &copy->base;
}
......@@ -95,6 +105,100 @@ const struct drm_plane_funcs tegra_plane_funcs = {
.format_mod_supported = tegra_plane_format_mod_supported,
};
static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
{
unsigned int i;
int err;
for (i = 0; i < state->base.fb->format->num_planes; i++) {
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
if (!dc->client.group) {
struct sg_table *sgt;
sgt = host1x_bo_pin(dc->dev, &bo->base, NULL);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
goto unpin;
}
err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents,
DMA_TO_DEVICE);
if (err == 0) {
err = -ENOMEM;
goto unpin;
}
state->iova[i] = sg_dma_address(sgt->sgl);
state->sgt[i] = sgt;
} else {
state->iova[i] = bo->iova;
}
}
return 0;
unpin:
dev_err(dc->dev, "failed to map plane %u: %d\n", i, err);
while (i--) {
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
struct sg_table *sgt = state->sgt[i];
dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
host1x_bo_unpin(dc->dev, &bo->base, sgt);
state->iova[i] = DMA_MAPPING_ERROR;
state->sgt[i] = NULL;
}
return err;
}
static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state)
{
unsigned int i;
for (i = 0; i < state->base.fb->format->num_planes; i++) {
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
if (!dc->client.group) {
struct sg_table *sgt = state->sgt[i];
if (sgt) {
dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents,
DMA_TO_DEVICE);
host1x_bo_unpin(dc->dev, &bo->base, sgt);
}
}
state->iova[i] = DMA_MAPPING_ERROR;
state->sgt[i] = NULL;
}
}
int tegra_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct tegra_dc *dc = to_tegra_dc(state->crtc);
if (!state->fb)
return 0;
drm_gem_fb_prepare_fb(plane, state);
return tegra_dc_pin(dc, to_tegra_plane_state(state));
}
void tegra_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct tegra_dc *dc = to_tegra_dc(state->crtc);
if (dc)
tegra_dc_unpin(dc, to_tegra_plane_state(state));
}
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state)
{
......
......@@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state {
struct tegra_plane_state {
struct drm_plane_state base;
struct sg_table *sgt[3];
dma_addr_t iova[3];
struct tegra_bo_tiling tiling;
u32 format;
u32 swap;
......@@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state)
extern const struct drm_plane_funcs tegra_plane_funcs;
int tegra_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state);
void tegra_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *state);
int tegra_plane_state_add(struct tegra_plane *plane,
struct drm_plane_state *state);
......
......@@ -371,10 +371,11 @@ struct tegra_sor_regs {
};
struct tegra_sor_soc {
bool supports_edp;
bool supports_lvds;
bool supports_hdmi;
bool supports_dp;
bool supports_audio;
bool supports_hdcp;
const struct tegra_sor_regs *regs;
bool has_nvdisplay;
......@@ -383,6 +384,12 @@ struct tegra_sor_soc {
unsigned int num_settings;
const u8 *xbar_cfg;
const u8 *lane_map;
const u8 (*voltage_swing)[4][4];
const u8 (*pre_emphasis)[4][4];
const u8 (*post_cursor)[4][4];
const u8 (*tx_pu)[4][4];
};
struct tegra_sor;
......@@ -391,6 +398,8 @@ struct tegra_sor_ops {
const char *name;
int (*probe)(struct tegra_sor *sor);
int (*remove)(struct tegra_sor *sor);
void (*audio_enable)(struct tegra_sor *sor);
void (*audio_disable)(struct tegra_sor *sor);
};
struct tegra_sor {
......@@ -413,6 +422,7 @@ struct tegra_sor {
u8 xbar_cfg[5];
struct drm_dp_link link;
struct drm_dp_aux *aux;
struct drm_info_list *debugfs_files;
......@@ -515,10 +525,19 @@ static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
return container_of(hw, struct tegra_clk_sor_pad, hw);
}
static const char * const tegra_clk_sor_pad_parents[] = {
"pll_d2_out0", "pll_dp"
static const char * const tegra_clk_sor_pad_parents[2][2] = {
{ "pll_d_out0", "pll_dp" },
{ "pll_d2_out0", "pll_dp" },
};
/*
* Implementing ->set_parent() here isn't really required because the parent
* will be explicitly selected in the driver code via the DP_CLK_SEL mux in
* the SOR_CLK_CNTRL register. This is primarily for compatibility with the
* Tegra186 and later SoC generations where the BPMP implements this clock
* and doesn't expose the mux via the common clock framework.
*/
static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
struct tegra_clk_sor_pad *pad = to_pad(hw);
......@@ -587,8 +606,8 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
init.name = name;
init.flags = 0;
init.parent_names = tegra_clk_sor_pad_parents;
init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
init.parent_names = tegra_clk_sor_pad_parents[sor->index];
init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]);
init.ops = &tegra_clk_sor_pad_ops;
pad->hw.init = &init;
......@@ -598,112 +617,340 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
return clk;
}
static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
struct drm_dp_link *link)
static void tegra_sor_filter_rates(struct tegra_sor *sor)
{
struct drm_dp_link *link = &sor->link;
unsigned int i;
u8 pattern;
/* Tegra only supports RBR, HBR and HBR2 */
for (i = 0; i < link->num_rates; i++) {
switch (link->rates[i]) {
case 1620000:
case 2700000:
case 5400000:
break;
default:
DRM_DEBUG_KMS("link rate %lu kHz not supported\n",
link->rates[i]);
link->rates[i] = 0;
break;
}
}
drm_dp_link_update_rates(link);
}
static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes)
{
unsigned long timeout;
u32 value;
int err;
/* setup lane parameters */
value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) |
SOR_LANE_DRIVE_CURRENT_LANE2(0x40) |
SOR_LANE_DRIVE_CURRENT_LANE1(0x40) |
SOR_LANE_DRIVE_CURRENT_LANE0(0x40);
tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
/*
* Clear or set the PD_TXD bit corresponding to each lane, depending
* on whether it is used or not.
*/
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) |
SOR_LANE_PREEMPHASIS_LANE2(0x0f) |
SOR_LANE_PREEMPHASIS_LANE1(0x0f) |
SOR_LANE_PREEMPHASIS_LANE0(0x0f);
tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
if (lanes <= 2)
value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]));
else
value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) |
SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]);
value = SOR_LANE_POSTCURSOR_LANE3(0x00) |
SOR_LANE_POSTCURSOR_LANE2(0x00) |
SOR_LANE_POSTCURSOR_LANE1(0x00) |
SOR_LANE_POSTCURSOR_LANE0(0x00);
tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0);
if (lanes <= 1)
value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
else
value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]);
/* disable LVDS mode */
tegra_sor_writel(sor, 0, SOR_LVDS);
if (lanes == 0)
value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
else
value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* start lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
SOR_LANE_SEQ_CTL_POWER_STATE_UP;
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
break;
usleep_range(250, 1000);
}
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
return -ETIMEDOUT;
return 0;
}
static int tegra_sor_power_down_lanes(struct tegra_sor *sor)
{
unsigned long timeout;
u32 value;
/* power down all lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* start lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
break;
usleep_range(25, 100);
}
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
return -ETIMEDOUT;
return 0;
}
static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes)
{
u32 value;
/* pre-charge all used lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0;
if (lanes <= 2)
value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]));
else
value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) |
SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]);
if (lanes <= 1)
value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
else
value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]);
if (lanes == 0)
value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
else
value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
usleep_range(10, 100);
usleep_range(15, 100);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 |
SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
}
err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B);
if (err < 0)
return err;
static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor)
{
u32 mask = 0x08, adj = 0, value;
/* enable pad calibration logic */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value |= SOR_PLL1_TMDS_TERM;
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
while (mask) {
adj |= mask;
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
value |= SOR_PLL1_TMDS_TERMADJ(adj);
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
usleep_range(100, 200);
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
if (value & SOR_PLL1_TERM_COMPOUT)
adj &= ~mask;
for (i = 0, value = 0; i < link->num_lanes; i++) {
unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
SOR_DP_TPG_SCRAMBLER_NONE |
SOR_DP_TPG_PATTERN_TRAIN1;
value = (value << 8) | lane;
mask >>= 1;
}
tegra_sor_writel(sor, value, SOR_DP_TPG);
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
value |= SOR_PLL1_TMDS_TERMADJ(adj);
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
pattern = DP_TRAINING_PATTERN_1;
/* disable pad calibration logic */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
}
err = drm_dp_aux_train(sor->aux, link, pattern);
if (err < 0)
return err;
static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link)
{
struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0;
const struct tegra_sor_soc *soc = sor->soc;
u32 pattern = 0, tx_pu = 0, value;
unsigned int i;
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
value |= SOR_DP_SPARE_SEQ_ENABLE;
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
value |= SOR_DP_SPARE_MACRO_SOR_CLK;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
for (value = 0, i = 0; i < link->lanes; i++) {
u8 vs = link->train.request.voltage_swing[i];
u8 pe = link->train.request.pre_emphasis[i];
u8 pc = link->train.request.post_cursor[i];
u8 shift = sor->soc->lane_map[i] << 3;
voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift;
pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift;
post_cursor |= soc->post_cursor[pc][vs][pe] << shift;
if (sor->soc->tx_pu[pc][vs][pe] > tx_pu)
tx_pu = sor->soc->tx_pu[pc][vs][pe];
switch (link->train.pattern) {
case DP_TRAINING_PATTERN_DISABLE:
value = SOR_DP_TPG_SCRAMBLER_GALIOS |
SOR_DP_TPG_PATTERN_NONE;
break;
case DP_TRAINING_PATTERN_1:
value = SOR_DP_TPG_SCRAMBLER_NONE |
SOR_DP_TPG_PATTERN_TRAIN1;
break;
case DP_TRAINING_PATTERN_2:
value = SOR_DP_TPG_SCRAMBLER_NONE |
SOR_DP_TPG_PATTERN_TRAIN2;
break;
case DP_TRAINING_PATTERN_3:
value = SOR_DP_TPG_SCRAMBLER_NONE |
SOR_DP_TPG_PATTERN_TRAIN3;
break;
default:
return -EINVAL;
}
if (link->caps.channel_coding)
value |= SOR_DP_TPG_CHANNEL_CODING;
for (i = 0, value = 0; i < link->num_lanes; i++) {
unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
SOR_DP_TPG_SCRAMBLER_NONE |
SOR_DP_TPG_PATTERN_TRAIN2;
value = (value << 8) | lane;
pattern = pattern << 8 | value;
}
tegra_sor_writel(sor, value, SOR_DP_TPG);
tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0);
tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0);
pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2;
if (link->caps.tps3_supported)
tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0);
err = drm_dp_aux_train(sor->aux, link, pattern);
if (err < 0)
return err;
tegra_sor_writel(sor, pattern, SOR_DP_TPG);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
value |= SOR_DP_PADCTL_TX_PU(tx_pu);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
usleep_range(20, 100);
return 0;
}
static int tegra_sor_dp_link_configure(struct drm_dp_link *link)
{
struct tegra_sor *sor = container_of(link, struct tegra_sor, link);
unsigned int rate, lanes;
u32 value;
int err;
rate = drm_dp_link_rate_to_bw_code(link->rate);
lanes = link->lanes;
/* configure link speed and lane count */
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
if (link->caps.enhanced_framing)
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
for (i = 0, value = 0; i < link->num_lanes; i++) {
unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
SOR_DP_TPG_SCRAMBLER_GALIOS |
SOR_DP_TPG_PATTERN_NONE;
value = (value << 8) | lane;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
usleep_range(400, 1000);
/* configure load pulse position adjustment */
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_LOADADJ_MASK;
switch (rate) {
case DP_LINK_BW_1_62:
value |= SOR_PLL1_LOADADJ(0x3);
break;
case DP_LINK_BW_2_7:
value |= SOR_PLL1_LOADADJ(0x4);
break;
case DP_LINK_BW_5_4:
value |= SOR_PLL1_LOADADJ(0x6);
break;
}
tegra_sor_writel(sor, value, SOR_DP_TPG);
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
/* use alternate scrambler reset for eDP */
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
pattern = DP_TRAINING_PATTERN_DISABLE;
if (link->edp == 0)
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
else
value |= SOR_DP_SPARE_PANEL_INTERNAL;
err = drm_dp_aux_train(sor->aux, link, pattern);
if (err < 0)
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
err = tegra_sor_power_down_lanes(sor);
if (err < 0) {
dev_err(sor->dev, "failed to power down lanes: %d\n", err);
return err;
}
/* power up and pre-charge lanes */
err = tegra_sor_power_up_lanes(sor, lanes);
if (err < 0) {
dev_err(sor->dev, "failed to power up %u lane%s: %d\n",
lanes, (lanes != 1) ? "s" : "", err);
return err;
}
tegra_sor_dp_precharge(sor, lanes);
return 0;
}
static const struct drm_dp_link_ops tegra_sor_dp_link_ops = {
.apply_training = tegra_sor_dp_link_apply_training,
.configure = tegra_sor_dp_link_configure,
};
static void tegra_sor_super_update(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_SUPER_STATE0);
......@@ -913,11 +1160,11 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
u32 num_syms_per_line;
unsigned int i;
if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel)
if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel)
return -EINVAL;
output = link_rate * 8 * link->num_lanes;
input = pclk * config->bits_per_pixel;
output = link_rate * 8 * link->lanes;
if (input >= output)
return -ERANGE;
......@@ -960,7 +1207,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
watermark = div_u64(watermark + params.error, f);
config->watermark = watermark + (config->bits_per_pixel / 8) + 2;
num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) *
(link->num_lanes * 8);
(link->lanes * 8);
if (config->watermark > 30) {
config->watermark = 30;
......@@ -977,15 +1224,15 @@ static int tegra_sor_compute_config(struct tegra_sor *sor,
num = ((mode->htotal - mode->hdisplay) - 7) * link_rate;
config->hblank_symbols = div_u64(num, pclk);
if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
if (link->caps.enhanced_framing)
config->hblank_symbols -= 3;
config->hblank_symbols -= 12 / link->num_lanes;
config->hblank_symbols -= 12 / link->lanes;
/* compute the number of symbols per vertical blanking interval */
num = (mode->hdisplay - 25) * link_rate;
config->vblank_symbols = div_u64(num, pclk);
config->vblank_symbols -= 36 / link->num_lanes + 4;
config->vblank_symbols -= 36 / link->lanes + 4;
dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols,
config->vblank_symbols);
......@@ -1201,29 +1448,6 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
return err;
}
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* stop lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP |
SOR_LANE_SEQ_CTL_POWER_STATE_DOWN;
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
break;
usleep_range(25, 100);
}
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0)
return -ETIMEDOUT;
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
......@@ -1585,800 +1809,859 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
.destroy = tegra_output_encoder_destroy,
};
static void tegra_sor_edp_disable(struct drm_encoder *encoder)
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor_state *state = to_sor_state(conn_state);
struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
unsigned long pclk = crtc_state->mode.clock * 1000;
struct tegra_sor *sor = to_sor(output);
u32 value;
struct drm_display_info *info;
int err;
if (output->panel)
drm_panel_disable(output->panel);
err = tegra_sor_detach(sor);
if (err < 0)
dev_err(sor->dev, "failed to detach SOR: %d\n", err);
tegra_sor_writel(sor, 0, SOR_STATE1);
tegra_sor_update(sor);
info = &output->connector.display_info;
/*
* The following accesses registers of the display controller, so make
* sure it's only executed when the output is attached to one.
* For HBR2 modes, the SOR brick needs to use the x20 multiplier, so
* the pixel clock must be corrected accordingly.
*/
if (dc) {
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value &= ~SOR_ENABLE(0);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
if (pclk >= 340000000) {
state->link_speed = 20;
state->pclk = pclk / 2;
} else {
state->link_speed = 10;
state->pclk = pclk;
}
err = tegra_sor_power_down(sor);
if (err < 0)
dev_err(sor->dev, "failed to power down SOR: %d\n", err);
if (sor->aux) {
err = drm_dp_aux_disable(sor->aux);
if (err < 0)
dev_err(sor->dev, "failed to disable DP: %d\n", err);
err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
pclk, 0);
if (err < 0) {
dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
return err;
}
err = tegra_io_pad_power_disable(sor->pad);
if (err < 0)
dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
if (output->panel)
drm_panel_unprepare(output->panel);
pm_runtime_put(sor->dev);
switch (info->bpc) {
case 8:
case 6:
state->bpc = info->bpc;
break;
default:
DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
state->bpc = 8;
break;
}
return 0;
}
#if 0
static int calc_h_ref_to_sync(const struct drm_display_mode *mode,
unsigned int *value)
static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
{
unsigned int hfp, hsw, hbp, a = 0, b;
u32 value = 0;
size_t i;
for (i = size; i > 0; i--)
value = (value << 8) | ptr[i - 1];
return value;
}
hfp = mode->hsync_start - mode->hdisplay;
hsw = mode->hsync_end - mode->hsync_start;
hbp = mode->htotal - mode->hsync_end;
static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
const void *data, size_t size)
{
const u8 *ptr = data;
unsigned long offset;
size_t i, j;
u32 value;
pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp);
switch (ptr[0]) {
case HDMI_INFOFRAME_TYPE_AVI:
offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
break;
b = hfp - 1;
case HDMI_INFOFRAME_TYPE_AUDIO:
offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
break;
pr_info("a: %u, b: %u\n", a, b);
pr_info("a + hsw + hbp = %u\n", a + hsw + hbp);
case HDMI_INFOFRAME_TYPE_VENDOR:
offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
break;
if (a + hsw + hbp <= 11) {
a = 1 + 11 - hsw - hbp;
pr_info("a: %u\n", a);
default:
dev_err(sor->dev, "unsupported infoframe type: %02x\n",
ptr[0]);
return;
}
if (a > b)
return -EINVAL;
value = INFOFRAME_HEADER_TYPE(ptr[0]) |
INFOFRAME_HEADER_VERSION(ptr[1]) |
INFOFRAME_HEADER_LEN(ptr[2]);
tegra_sor_writel(sor, value, offset);
offset++;
if (hsw < 1)
return -EINVAL;
/*
* Each subpack contains 7 bytes, divided into:
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
for (i = 3, j = 0; i < size; i += 7, j += 8) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
if (mode->hdisplay < 16)
return -EINVAL;
value = tegra_sor_hdmi_subpack(&ptr[i], num);
tegra_sor_writel(sor, value, offset++);
if (value) {
if (b > a && a % 2)
*value = a + 1;
else
*value = a;
}
num = min_t(size_t, rem - num, 3);
return 0;
value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
tegra_sor_writel(sor, value, offset++);
}
}
#endif
static void tegra_sor_edp_enable(struct drm_encoder *encoder)
static int
tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
const struct drm_display_mode *mode)
{
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_config config;
struct tegra_sor_state *state;
struct drm_dp_link link;
u8 rate, lanes;
unsigned int i;
int err = 0;
u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
u32 value;
int err;
state = to_sor_state(output->connector.state);
pm_runtime_get_sync(sor->dev);
if (output->panel)
drm_panel_prepare(output->panel);
err = drm_dp_aux_enable(sor->aux);
if (err < 0)
dev_err(sor->dev, "failed to enable DP: %d\n", err);
/* disable AVI infoframe */
value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
value &= ~INFOFRAME_CTRL_SINGLE;
value &= ~INFOFRAME_CTRL_OTHER;
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
err = drm_dp_link_probe(sor->aux, &link);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
&sor->output.connector, mode);
if (err < 0) {
dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
return;
dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
return err;
}
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
memset(&config, 0, sizeof(config));
config.bits_per_pixel = state->bpc * 3;
err = tegra_sor_compute_config(sor, mode, &config, &link);
if (err < 0)
dev_err(sor->dev, "failed to compute configuration: %d\n", err);
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
return err;
}
value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value |= SOR_PLL3_PLL_VDD_MODE_3V3;
tegra_sor_writel(sor, value, sor->soc->regs->pll3);
tegra_sor_hdmi_write_infopack(sor, buffer, err);
value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST |
SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT;
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
/* enable AVI infoframe */
value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
value |= INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_SEQ_PLLCAPPD;
value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
value |= SOR_PLL2_LVDS_ENABLE;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
return 0;
}
value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM;
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
static void tegra_sor_write_eld(struct tegra_sor *sor)
{
size_t length = drm_eld_size(sor->output.connector.eld), i;
while (true) {
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0)
break;
for (i = 0; i < length; i++)
tegra_sor_writel(sor, i << 8 | sor->output.connector.eld[i],
SOR_AUDIO_HDA_ELD_BUFWR);
usleep_range(250, 1000);
}
/*
* The HDA codec will always report an ELD buffer size of 96 bytes and
* the HDA codec driver will check that each byte read from the buffer
* is valid. Therefore every byte must be written, even if no 96 bytes
* were parsed from EDID.
*/
for (i = length; i < 96; i++)
tegra_sor_writel(sor, i << 8 | 0, SOR_AUDIO_HDA_ELD_BUFWR);
}
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
value &= ~SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
static void tegra_sor_audio_prepare(struct tegra_sor *sor)
{
u32 value;
/*
* power up
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
* is used for interoperability between the HDA codec driver and the
* HDMI/DP driver.
*/
value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
tegra_sor_writel(sor, value, SOR_INT_ENABLE);
tegra_sor_writel(sor, value, SOR_INT_MASK);
/* set safe link bandwidth (1.62 Gbps) */
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
tegra_sor_write_eld(sor);
/* step 1 */
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN |
SOR_PLL2_BANDGAP_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
tegra_sor_writel(sor, value, SOR_AUDIO_HDA_PRESENSE);
}
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR;
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
tegra_sor_writel(sor, 0, SOR_INT_MASK);
tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
}
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
static void tegra_sor_audio_enable(struct tegra_sor *sor)
{
u32 value;
/* step 2 */
err = tegra_io_pad_power_enable(sor->pad);
if (err < 0)
dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
usleep_range(5, 100);
/* select HDA audio input */
value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
/* step 3 */
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
/* inject null samples */
if (sor->format.channels != 2)
value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
else
value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
usleep_range(20, 100);
value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
/* step 4 */
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_VCOPD;
value &= ~SOR_PLL0_PWR;
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
/* enable advertising HBR capability */
tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
}
usleep_range(200, 1000);
static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
{
u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
struct hdmi_audio_infoframe frame;
u32 value;
int err;
/* step 5 */
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
err = hdmi_audio_infoframe_init(&frame);
if (err < 0) {
dev_err(sor->dev, "failed to setup audio infoframe: %d\n", err);
return err;
}
/* XXX not in TRM */
for (value = 0, i = 0; i < 5; i++)
value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
SOR_XBAR_CTRL_LINK1_XSEL(i, i);
frame.channels = sor->format.channels;
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(sor->dev, "failed to pack audio infoframe: %d\n", err);
return err;
}
/* switch to DP parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
if (err < 0)
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
tegra_sor_hdmi_write_infopack(sor, buffer, err);
/* power DP lanes */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
value |= INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
if (link.num_lanes <= 2)
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2);
else
value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2;
return 0;
}
if (link.num_lanes <= 1)
value &= ~SOR_DP_PADCTL_PD_TXD_1;
else
value |= SOR_DP_PADCTL_PD_TXD_1;
static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
{
u32 value;
if (link.num_lanes == 0)
value &= ~SOR_DP_PADCTL_PD_TXD_0;
else
value |= SOR_DP_PADCTL_PD_TXD_0;
tegra_sor_audio_enable(sor);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes);
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
value = SOR_HDMI_SPARE_ACR_PRIORITY_HIGH |
SOR_HDMI_SPARE_CTS_RESET(1) |
SOR_HDMI_SPARE_HW_CTS_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_SPARE);
/* start lane sequencer */
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
SOR_LANE_SEQ_CTL_POWER_STATE_UP;
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
/* enable HW CTS */
value = SOR_HDMI_ACR_SUBPACK_LOW_SB1(0);
tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_LOW);
while (true) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
break;
/* allow packet to be sent */
value = SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_HIGH);
usleep_range(250, 1000);
}
/* reset N counter and enable lookup */
value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
/* set link bandwidth */
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
value = (24000 * 4096) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
tegra_sor_apply_config(sor, &config);
tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0441);
tegra_sor_writel(sor, 4704, SOR_AUDIO_NVAL_0441);
/* enable link */
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value |= SOR_DP_LINKCTL_ENABLE;
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0882);
tegra_sor_writel(sor, 9408, SOR_AUDIO_NVAL_0882);
for (i = 0, value = 0; i < 4; i++) {
unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
SOR_DP_TPG_SCRAMBLER_GALIOS |
SOR_DP_TPG_PATTERN_NONE;
value = (value << 8) | lane;
}
tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
tegra_sor_writel(sor, value, SOR_DP_TPG);
value = (24000 * 6144) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
/* enable pad calibration logic */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
value = (24000 * 12288) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
err = drm_dp_link_probe(sor->aux, &link);
if (err < 0)
dev_err(sor->dev, "failed to probe eDP link: %d\n", err);
value = (24000 * 24576) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
err = drm_dp_link_power_up(sor->aux, &link);
if (err < 0)
dev_err(sor->dev, "failed to power up eDP link: %d\n", err);
value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_N);
value &= ~SOR_HDMI_AUDIO_N_RESET;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
err = drm_dp_link_configure(sor->aux, &link);
if (err < 0)
dev_err(sor->dev, "failed to configure eDP link: %d\n", err);
tegra_sor_hdmi_enable_audio_infoframe(sor);
}
rate = drm_dp_link_rate_to_bw_code(link.rate);
lanes = link.num_lanes;
static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
{
u32 value;
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate);
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
}
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(lanes);
static void tegra_sor_hdmi_audio_disable(struct tegra_sor *sor)
{
tegra_sor_hdmi_disable_audio_infoframe(sor);
}
if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
static struct tegra_sor_hdmi_settings *
tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
{
unsigned int i;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
for (i = 0; i < sor->num_settings; i++)
if (frequency <= sor->settings[i].frequency)
return &sor->settings[i];
return NULL;
}
static void tegra_sor_hdmi_disable_scrambling(struct tegra_sor *sor)
{
u32 value;
value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
value &= ~SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
value &= ~SOR_HDMI2_CTRL_SCRAMBLE;
tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
}
static void tegra_sor_hdmi_scdc_disable(struct tegra_sor *sor)
{
struct i2c_adapter *ddc = sor->output.ddc;
drm_scdc_set_high_tmds_clock_ratio(ddc, false);
drm_scdc_set_scrambling(ddc, false);
/* disable training pattern generator */
tegra_sor_hdmi_disable_scrambling(sor);
}
for (i = 0; i < link.num_lanes; i++) {
unsigned long lane = SOR_DP_TPG_CHANNEL_CODING |
SOR_DP_TPG_SCRAMBLER_GALIOS |
SOR_DP_TPG_PATTERN_NONE;
value = (value << 8) | lane;
static void tegra_sor_hdmi_scdc_stop(struct tegra_sor *sor)
{
if (sor->scdc_enabled) {
cancel_delayed_work_sync(&sor->scdc);
tegra_sor_hdmi_scdc_disable(sor);
}
}
tegra_sor_writel(sor, value, SOR_DP_TPG);
static void tegra_sor_hdmi_enable_scrambling(struct tegra_sor *sor)
{
u32 value;
err = tegra_sor_dp_train_fast(sor, &link);
if (err < 0)
dev_err(sor->dev, "DP fast link training failed: %d\n", err);
value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
value |= SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
value |= SOR_HDMI2_CTRL_SCRAMBLE;
tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
}
dev_dbg(sor->dev, "fast link training succeeded\n");
static void tegra_sor_hdmi_scdc_enable(struct tegra_sor *sor)
{
struct i2c_adapter *ddc = sor->output.ddc;
err = tegra_sor_power_up(sor, 250);
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
drm_scdc_set_high_tmds_clock_ratio(ddc, true);
drm_scdc_set_scrambling(ddc, true);
/* CSTM (LVDS, link A/B, upper) */
value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
SOR_CSTM_UPPER;
tegra_sor_writel(sor, value, SOR_CSTM);
tegra_sor_hdmi_enable_scrambling(sor);
}
/* use DP-A protocol */
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
value |= SOR_STATE_ASY_PROTOCOL_DP_A;
tegra_sor_writel(sor, value, SOR_STATE1);
static void tegra_sor_hdmi_scdc_work(struct work_struct *work)
{
struct tegra_sor *sor = container_of(work, struct tegra_sor, scdc.work);
struct i2c_adapter *ddc = sor->output.ddc;
tegra_sor_mode_set(sor, mode, state);
if (!drm_scdc_get_scrambling_status(ddc)) {
DRM_DEBUG_KMS("SCDC not scrambled\n");
tegra_sor_hdmi_scdc_enable(sor);
}
schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
}
static void tegra_sor_hdmi_scdc_start(struct tegra_sor *sor)
{
struct drm_scdc *scdc = &sor->output.connector.display_info.hdmi.scdc;
struct drm_display_mode *mode;
mode = &sor->output.encoder.crtc->state->adjusted_mode;
if (mode->clock >= 340000 && scdc->supported) {
schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
tegra_sor_hdmi_scdc_enable(sor);
sor->scdc_enabled = true;
}
}
static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor *sor = to_sor(output);
u32 value;
int err;
/* PWM setup */
err = tegra_sor_setup_pwm(sor, 250);
tegra_sor_audio_unprepare(sor);
tegra_sor_hdmi_scdc_stop(sor);
err = tegra_sor_detach(sor);
if (err < 0)
dev_err(sor->dev, "failed to setup PWM: %d\n", err);
dev_err(sor->dev, "failed to detach SOR: %d\n", err);
tegra_sor_writel(sor, 0, SOR_STATE1);
tegra_sor_update(sor);
/* disable display to SOR clock */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value |= SOR_ENABLE(0);
if (!sor->soc->has_nvdisplay)
value &= ~SOR1_TIMING_CYA;
value &= ~SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
err = tegra_sor_attach(sor);
err = tegra_sor_power_down(sor);
if (err < 0)
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
dev_err(sor->dev, "failed to power down SOR: %d\n", err);
err = tegra_sor_wakeup(sor);
err = tegra_io_pad_power_disable(sor->pad);
if (err < 0)
dev_err(sor->dev, "failed to enable DC: %d\n", err);
dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
if (output->panel)
drm_panel_enable(output->panel);
pm_runtime_put(sor->dev);
}
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_sor_state *state = to_sor_state(conn_state);
struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
unsigned long pclk = crtc_state->mode.clock * 1000;
unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor_hdmi_settings *settings;
struct tegra_sor *sor = to_sor(output);
struct drm_display_info *info;
struct tegra_sor_state *state;
struct drm_display_mode *mode;
unsigned long rate, pclk;
unsigned int div, i;
u32 value;
int err;
info = &output->connector.display_info;
state = to_sor_state(output->connector.state);
mode = &encoder->crtc->state->adjusted_mode;
pclk = mode->clock * 1000;
/*
* For HBR2 modes, the SOR brick needs to use the x20 multiplier, so
* the pixel clock must be corrected accordingly.
*/
if (pclk >= 340000000) {
state->link_speed = 20;
state->pclk = pclk / 2;
} else {
state->link_speed = 10;
state->pclk = pclk;
}
pm_runtime_get_sync(sor->dev);
err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
pclk, 0);
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0) {
dev_err(output->dev, "failed to setup CRTC state: %d\n", err);
return err;
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
return;
}
switch (info->bpc) {
case 8:
case 6:
state->bpc = info->bpc;
break;
default:
DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
state->bpc = 8;
break;
}
return 0;
}
static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = {
.disable = tegra_sor_edp_disable,
.enable = tegra_sor_edp_enable,
.atomic_check = tegra_sor_encoder_atomic_check,
};
div = clk_get_rate(sor->clk) / 1000000 * 4;
static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size)
{
u32 value = 0;
size_t i;
err = tegra_io_pad_power_enable(sor->pad);
if (err < 0)
dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
for (i = size; i > 0; i--)
value = (value << 8) | ptr[i - 1];
usleep_range(20, 100);
return value;
}
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
const void *data, size_t size)
{
const u8 *ptr = data;
unsigned long offset;
size_t i, j;
u32 value;
usleep_range(20, 100);
switch (ptr[0]) {
case HDMI_INFOFRAME_TYPE_AVI:
offset = SOR_HDMI_AVI_INFOFRAME_HEADER;
break;
value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
tegra_sor_writel(sor, value, sor->soc->regs->pll3);
case HDMI_INFOFRAME_TYPE_AUDIO:
offset = SOR_HDMI_AUDIO_INFOFRAME_HEADER;
break;
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_VCOPD;
value &= ~SOR_PLL0_PWR;
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
case HDMI_INFOFRAME_TYPE_VENDOR:
offset = SOR_HDMI_VSI_INFOFRAME_HEADER;
break;
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
default:
dev_err(sor->dev, "unsupported infoframe type: %02x\n",
ptr[0]);
return;
}
usleep_range(200, 400);
value = INFOFRAME_HEADER_TYPE(ptr[0]) |
INFOFRAME_HEADER_VERSION(ptr[1]) |
INFOFRAME_HEADER_LEN(ptr[2]);
tegra_sor_writel(sor, value, offset);
offset++;
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_POWERDOWN_OVERRIDE;
value &= ~SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
/*
* Each subpack contains 7 bytes, divided into:
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
for (i = 3, j = 0; i < size; i += 7, j += 8) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
usleep_range(20, 100);
value = tegra_sor_hdmi_subpack(&ptr[i], num);
tegra_sor_writel(sor, value, offset++);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
num = min_t(size_t, rem - num, 3);
while (true) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
break;
value = tegra_sor_hdmi_subpack(&ptr[i + 4], num);
tegra_sor_writel(sor, value, offset++);
usleep_range(250, 1000);
}
}
static int
tegra_sor_hdmi_setup_avi_infoframe(struct tegra_sor *sor,
const struct drm_display_mode *mode)
{
u8 buffer[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
u32 value;
int err;
/* disable AVI infoframe */
value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
value &= ~INFOFRAME_CTRL_SINGLE;
value &= ~INFOFRAME_CTRL_OTHER;
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
&sor->output.connector, mode);
if (err < 0) {
dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
return err;
}
while (true) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
break;
err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
dev_err(sor->dev, "failed to pack AVI infoframe: %d\n", err);
return err;
usleep_range(250, 1000);
}
tegra_sor_hdmi_write_infopack(sor, buffer, err);
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
/* enable AVI infoframe */
value = tegra_sor_readl(sor, SOR_HDMI_AVI_INFOFRAME_CTRL);
value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
value |= INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AVI_INFOFRAME_CTRL);
if (mode->clock < 340000) {
DRM_DEBUG_KMS("setting 2.7 GHz link speed\n");
value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
} else {
DRM_DEBUG_KMS("setting 5.4 GHz link speed\n");
value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
}
return 0;
}
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
static void tegra_sor_write_eld(struct tegra_sor *sor)
{
size_t length = drm_eld_size(sor->output.connector.eld), i;
/* SOR pad PLL stabilization time */
usleep_range(250, 1000);
for (i = 0; i < length; i++)
tegra_sor_writel(sor, i << 8 | sor->output.connector.eld[i],
SOR_AUDIO_HDA_ELD_BUFWR);
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(4);
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
/*
* The HDA codec will always report an ELD buffer size of 96 bytes and
* the HDA codec driver will check that each byte read from the buffer
* is valid. Therefore every byte must be written, even if no 96 bytes
* were parsed from EDID.
*/
for (i = length; i < 96; i++)
tegra_sor_writel(sor, i << 8 | 0, SOR_AUDIO_HDA_ELD_BUFWR);
}
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
value &= ~SOR_DP_SPARE_SEQ_ENABLE;
value &= ~SOR_DP_SPARE_MACRO_SOR_CLK;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
static void tegra_sor_audio_prepare(struct tegra_sor *sor)
{
u32 value;
value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
tegra_sor_writel(sor, value, SOR_SEQ_CTL);
tegra_sor_write_eld(sor);
value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD;
tegra_sor_writel(sor, value, SOR_AUDIO_HDA_PRESENSE);
}
if (!sor->soc->has_nvdisplay) {
/* program the reference clock */
value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
tegra_sor_writel(sor, value, SOR_REFCLK);
}
static void tegra_sor_audio_unprepare(struct tegra_sor *sor)
{
tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE);
}
/* XXX not in TRM */
for (value = 0, i = 0; i < 5; i++)
value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
SOR_XBAR_CTRL_LINK1_XSEL(i, i);
static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor)
{
u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
struct hdmi_audio_infoframe frame;
u32 value;
int err;
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
err = hdmi_audio_infoframe_init(&frame);
/*
* Switch the pad clock to the DP clock. Note that we cannot actually
* do this because Tegra186 and later don't support clk_set_parent()
* on the sorX_pad_clkout clocks. We already do the equivalent above
* using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
*/
#if 0
err = clk_set_parent(sor->clk_pad, sor->clk_dp);
if (err < 0) {
dev_err(sor->dev, "failed to setup audio infoframe: %d\n", err);
return err;
dev_err(sor->dev, "failed to select pad parent clock: %d\n",
err);
return;
}
#endif
frame.channels = sor->format.channels;
/* switch the SOR clock to the pad clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
if (err < 0) {
dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
err);
return;
}
err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
/* switch the output clock to the parent pixel clock */
err = clk_set_parent(sor->clk, sor->clk_parent);
if (err < 0) {
dev_err(sor->dev, "failed to pack audio infoframe: %d\n", err);
return err;
dev_err(sor->dev, "failed to select output parent clock: %d\n",
err);
return;
}
tegra_sor_hdmi_write_infopack(sor, buffer, err);
/* adjust clock rate for HDMI 2.0 modes */
rate = clk_get_rate(sor->clk_parent);
value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
value |= INFOFRAME_CTRL_CHECKSUM_ENABLE;
value |= INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
if (mode->clock >= 340000)
rate /= 2;
return 0;
}
DRM_DEBUG_KMS("setting clock to %lu Hz, mode: %lu Hz\n", rate, pclk);
static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor)
{
u32 value;
clk_set_rate(sor->clk, rate);
value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL);
if (!sor->soc->has_nvdisplay) {
value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
/* select HDA audio input */
value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK);
value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA);
/* XXX is this the proper check? */
if (mode->clock < 75000)
value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
/* inject null samples */
if (sor->format.channels != 2)
value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
else
value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL;
tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
}
value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH;
max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL);
value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
/* enable advertising HBR capability */
tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE);
if (!dc->soc->has_nvdisplay) {
/* H_PULSE2 setup */
pulse_start = h_ref_to_sync +
(mode->hsync_end - mode->hsync_start) +
(mode->htotal - mode->hsync_end) - 10;
tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL);
value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
value = SOR_HDMI_SPARE_ACR_PRIORITY_HIGH |
SOR_HDMI_SPARE_CTS_RESET(1) |
SOR_HDMI_SPARE_HW_CTS_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_SPARE);
value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
/* enable HW CTS */
value = SOR_HDMI_ACR_SUBPACK_LOW_SB1(0);
tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_LOW);
value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
value |= H_PULSE2_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
}
/* allow packet to be sent */
value = SOR_HDMI_ACR_SUBPACK_HIGH_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_ACR_0441_SUBPACK_HIGH);
/* infoframe setup */
err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
if (err < 0)
dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
/* reset N counter and enable lookup */
value = SOR_HDMI_AUDIO_N_RESET | SOR_HDMI_AUDIO_N_LOOKUP;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
/* XXX HDMI audio support not implemented yet */
tegra_sor_hdmi_disable_audio_infoframe(sor);
value = (24000 * 4096) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0320);
tegra_sor_writel(sor, 4096, SOR_AUDIO_NVAL_0320);
/* use single TMDS protocol */
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
tegra_sor_writel(sor, value, SOR_STATE1);
tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0441);
tegra_sor_writel(sor, 4704, SOR_AUDIO_NVAL_0441);
/* power up pad calibration */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_0882);
tegra_sor_writel(sor, 9408, SOR_AUDIO_NVAL_0882);
/* production settings */
settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
if (!settings) {
dev_err(sor->dev, "no settings for pixel clock %d Hz\n",
mode->clock * 1000);
return;
}
tegra_sor_writel(sor, 20000, SOR_AUDIO_AVAL_1764);
tegra_sor_writel(sor, 18816, SOR_AUDIO_NVAL_1764);
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_ICHPMP_MASK;
value &= ~SOR_PLL0_FILTER_MASK;
value &= ~SOR_PLL0_VCOCAP_MASK;
value |= SOR_PLL0_ICHPMP(settings->ichpmp);
value |= SOR_PLL0_FILTER(settings->filter);
value |= SOR_PLL0_VCOCAP(settings->vcocap);
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
value = (24000 * 6144) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0480);
tegra_sor_writel(sor, 6144, SOR_AUDIO_NVAL_0480);
/* XXX not in TRM */
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_LOADADJ_MASK;
value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
value |= SOR_PLL1_LOADADJ(settings->loadadj);
value |= SOR_PLL1_TMDS_TERMADJ(settings->tmds_termadj);
value |= SOR_PLL1_TMDS_TERM;
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
value = (24000 * 12288) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_0960);
tegra_sor_writel(sor, 12288, SOR_AUDIO_NVAL_0960);
value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value &= ~SOR_PLL3_BG_TEMP_COEF_MASK;
value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
value &= ~SOR_PLL3_AVDD10_LEVEL_MASK;
value &= ~SOR_PLL3_AVDD14_LEVEL_MASK;
value |= SOR_PLL3_BG_TEMP_COEF(settings->bg_temp_coef);
value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref_level);
value |= SOR_PLL3_AVDD10_LEVEL(settings->avdd10_level);
value |= SOR_PLL3_AVDD14_LEVEL(settings->avdd14_level);
tegra_sor_writel(sor, value, sor->soc->regs->pll3);
value = (24000 * 24576) / (128 * sor->format.sample_rate / 1000);
tegra_sor_writel(sor, value, SOR_AUDIO_AVAL_1920);
tegra_sor_writel(sor, 24576, SOR_AUDIO_NVAL_1920);
value = settings->drive_current[3] << 24 |
settings->drive_current[2] << 16 |
settings->drive_current[1] << 8 |
settings->drive_current[0] << 0;
tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_N);
value &= ~SOR_HDMI_AUDIO_N_RESET;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_N);
value = settings->preemphasis[3] << 24 |
settings->preemphasis[2] << 16 |
settings->preemphasis[1] << 8 |
settings->preemphasis[0] << 0;
tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
tegra_sor_hdmi_enable_audio_infoframe(sor);
}
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu_value);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl2);
value &= ~SOR_DP_PADCTL_SPAREPLL_MASK;
value |= SOR_DP_PADCTL_SPAREPLL(settings->sparepll);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl2);
/* power down pad calibration */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
if (!dc->soc->has_nvdisplay) {
/* miscellaneous display controller settings */
value = VSYNC_H_POSITION(1);
tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
}
static void tegra_sor_hdmi_disable_audio_infoframe(struct tegra_sor *sor)
{
u32 value;
value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
value &= ~DITHER_CONTROL_MASK;
value &= ~BASE_COLOR_SIZE_MASK;
value = tegra_sor_readl(sor, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
value &= ~INFOFRAME_CTRL_ENABLE;
tegra_sor_writel(sor, value, SOR_HDMI_AUDIO_INFOFRAME_CTRL);
}
switch (state->bpc) {
case 6:
value |= BASE_COLOR_SIZE_666;
break;
static void tegra_sor_hdmi_audio_disable(struct tegra_sor *sor)
{
tegra_sor_hdmi_disable_audio_infoframe(sor);
}
case 8:
value |= BASE_COLOR_SIZE_888;
break;
static struct tegra_sor_hdmi_settings *
tegra_sor_hdmi_find_settings(struct tegra_sor *sor, unsigned long frequency)
{
unsigned int i;
case 10:
value |= BASE_COLOR_SIZE_101010;
break;
for (i = 0; i < sor->num_settings; i++)
if (frequency <= sor->settings[i].frequency)
return &sor->settings[i];
case 12:
value |= BASE_COLOR_SIZE_121212;
break;
return NULL;
}
default:
WARN(1, "%u bits-per-color not supported\n", state->bpc);
value |= BASE_COLOR_SIZE_888;
break;
}
static void tegra_sor_hdmi_disable_scrambling(struct tegra_sor *sor)
{
u32 value;
tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
value &= ~SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
value &= ~SOR_HDMI2_CTRL_SCRAMBLE;
tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
}
/* XXX set display head owner */
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_OWNER_MASK;
value |= SOR_STATE_ASY_OWNER(1 + dc->pipe);
tegra_sor_writel(sor, value, SOR_STATE1);
static void tegra_sor_hdmi_scdc_disable(struct tegra_sor *sor)
{
struct i2c_adapter *ddc = sor->output.ddc;
err = tegra_sor_power_up(sor, 250);
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
drm_scdc_set_high_tmds_clock_ratio(ddc, false);
drm_scdc_set_scrambling(ddc, false);
/* configure dynamic range of output */
value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
tegra_sor_hdmi_disable_scrambling(sor);
}
/* configure colorspace */
value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
value |= SOR_HEAD_STATE_COLORSPACE_RGB;
tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
static void tegra_sor_hdmi_scdc_stop(struct tegra_sor *sor)
{
if (sor->scdc_enabled) {
cancel_delayed_work_sync(&sor->scdc);
tegra_sor_hdmi_scdc_disable(sor);
}
}
tegra_sor_mode_set(sor, mode, state);
static void tegra_sor_hdmi_enable_scrambling(struct tegra_sor *sor)
{
u32 value;
tegra_sor_update(sor);
value = tegra_sor_readl(sor, SOR_HDMI2_CTRL);
value |= SOR_HDMI2_CTRL_CLOCK_MODE_DIV_BY_4;
value |= SOR_HDMI2_CTRL_SCRAMBLE;
tegra_sor_writel(sor, value, SOR_HDMI2_CTRL);
}
/* program preamble timing in SOR (XXX) */
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
static void tegra_sor_hdmi_scdc_enable(struct tegra_sor *sor)
{
struct i2c_adapter *ddc = sor->output.ddc;
err = tegra_sor_attach(sor);
if (err < 0)
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
drm_scdc_set_high_tmds_clock_ratio(ddc, true);
drm_scdc_set_scrambling(ddc, true);
/* enable display to SOR clock and generate HDMI preamble */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
tegra_sor_hdmi_enable_scrambling(sor);
}
if (!sor->soc->has_nvdisplay)
value |= SOR1_TIMING_CYA;
static void tegra_sor_hdmi_scdc_work(struct work_struct *work)
{
struct tegra_sor *sor = container_of(work, struct tegra_sor, scdc.work);
struct i2c_adapter *ddc = sor->output.ddc;
value |= SOR_ENABLE(sor->index);
if (!drm_scdc_get_scrambling_status(ddc)) {
DRM_DEBUG_KMS("SCDC not scrambled\n");
tegra_sor_hdmi_scdc_enable(sor);
}
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
}
if (dc->soc->has_nvdisplay) {
value = tegra_dc_readl(dc, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
value &= ~PROTOCOL_MASK;
value |= PROTOCOL_SINGLE_TMDS_A;
tegra_dc_writel(dc, value, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
}
static void tegra_sor_hdmi_scdc_start(struct tegra_sor *sor)
{
struct drm_scdc *scdc = &sor->output.connector.display_info.hdmi.scdc;
struct drm_display_mode *mode;
tegra_dc_commit(dc);
mode = &sor->output.encoder.crtc->state->adjusted_mode;
err = tegra_sor_wakeup(sor);
if (err < 0)
dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
if (mode->clock >= 340000 && scdc->supported) {
schedule_delayed_work(&sor->scdc, msecs_to_jiffies(5000));
tegra_sor_hdmi_scdc_enable(sor);
sor->scdc_enabled = true;
}
tegra_sor_hdmi_scdc_start(sor);
tegra_sor_audio_prepare(sor);
}
static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
.disable = tegra_sor_hdmi_disable,
.enable = tegra_sor_hdmi_enable,
.atomic_check = tegra_sor_encoder_atomic_check,
};
static void tegra_sor_dp_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
......@@ -2386,8 +2669,19 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
u32 value;
int err;
tegra_sor_audio_unprepare(sor);
tegra_sor_hdmi_scdc_stop(sor);
if (output->panel)
drm_panel_disable(output->panel);
/*
* Do not attempt to power down a DP link if we're not connected since
* the AUX transactions would just be timing out.
*/
if (output->connector.status != connector_status_disconnected) {
err = drm_dp_link_power_down(sor->aux, &sor->link);
if (err < 0)
dev_err(sor->dev, "failed to power down link: %d\n",
err);
}
err = tegra_sor_detach(sor);
if (err < 0)
......@@ -2396,18 +2690,23 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
tegra_sor_writel(sor, 0, SOR_STATE1);
tegra_sor_update(sor);
/* disable display to SOR clock */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
if (!sor->soc->has_nvdisplay)
value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1));
else
value &= ~SOR_ENABLE(sor->index);
value &= ~SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
tegra_dc_commit(dc);
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
value &= ~SOR_STATE_ASY_SUBOWNER_MASK;
value &= ~SOR_STATE_ASY_OWNER_MASK;
tegra_sor_writel(sor, value, SOR_STATE1);
tegra_sor_update(sor);
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0)
dev_err(sor->dev, "failed to set safe clock: %d\n", err);
err = tegra_sor_power_down(sor);
if (err < 0)
dev_err(sor->dev, "failed to power down SOR: %d\n", err);
......@@ -2416,61 +2715,80 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
if (err < 0)
dev_err(sor->dev, "failed to power off I/O pad: %d\n", err);
err = drm_dp_aux_disable(sor->aux);
if (err < 0)
dev_err(sor->dev, "failed disable DPAUX: %d\n", err);
if (output->panel)
drm_panel_unprepare(output->panel);
pm_runtime_put(sor->dev);
}
static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
static void tegra_sor_dp_enable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
struct tegra_sor_hdmi_settings *settings;
struct tegra_sor *sor = to_sor(output);
struct tegra_sor_config config;
struct tegra_sor_state *state;
struct drm_display_mode *mode;
unsigned long rate, pclk;
unsigned int div, i;
struct drm_display_info *info;
unsigned int i;
u32 value;
int err;
state = to_sor_state(output->connector.state);
mode = &encoder->crtc->state->adjusted_mode;
pclk = mode->clock * 1000;
info = &output->connector.display_info;
pm_runtime_get_sync(sor->dev);
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
if (err < 0) {
if (err < 0)
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
return;
}
div = clk_get_rate(sor->clk) / 1000000 * 4;
err = tegra_io_pad_power_enable(sor->pad);
if (err < 0)
dev_err(sor->dev, "failed to power on I/O pad: %d\n", err);
dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err);
usleep_range(20, 100);
err = drm_dp_aux_enable(sor->aux);
if (err < 0)
dev_err(sor->dev, "failed to enable DPAUX: %d\n", err);
err = drm_dp_link_probe(sor->aux, &sor->link);
if (err < 0)
dev_err(sor->dev, "failed to probe DP link: %d\n", err);
tegra_sor_filter_rates(sor);
err = drm_dp_link_choose(&sor->link, mode, info);
if (err < 0)
dev_err(sor->dev, "failed to choose link: %d\n", err);
if (output->panel)
drm_panel_prepare(output->panel);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_BANDGAP_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
usleep_range(20, 40);
value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value &= ~SOR_PLL3_PLL_VDD_MODE_3V3;
value |= SOR_PLL3_PLL_VDD_MODE_3V3;
tegra_sor_writel(sor, value, sor->soc->regs->pll3);
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_VCOPD;
value &= ~SOR_PLL0_PWR;
value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR);
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
value = tegra_sor_readl(sor, sor->soc->regs->pll2);
value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE;
value |= SOR_PLL2_SEQ_PLLCAPPD;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(200, 400);
......@@ -2480,328 +2798,257 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
value &= ~SOR_PLL2_PORT_POWERDOWN;
tegra_sor_writel(sor, value, sor->soc->regs->pll2);
usleep_range(20, 100);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
while (true) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_STATE_BUSY) == 0)
break;
usleep_range(250, 1000);
}
value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN |
SOR_LANE_SEQ_CTL_POWER_STATE_UP | SOR_LANE_SEQ_CTL_DELAY(5);
tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL);
while (true) {
value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL);
if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0)
break;
usleep_range(250, 1000);
}
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK;
value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
if (mode->clock < 340000) {
DRM_DEBUG_KMS("setting 2.7 GHz link speed\n");
value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G2_70;
} else {
DRM_DEBUG_KMS("setting 5.4 GHz link speed\n");
value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G5_40;
}
if (output->panel)
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
else
value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK;
value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
/* SOR pad PLL stabilization time */
usleep_range(250, 1000);
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK;
value |= SOR_DP_LINKCTL_LANE_COUNT(4);
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
usleep_range(200, 400);
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
value &= ~SOR_DP_SPARE_SEQ_ENABLE;
value &= ~SOR_DP_SPARE_MACRO_SOR_CLK;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
/* XXX not in TRM */
if (output->panel)
value |= SOR_DP_SPARE_PANEL_INTERNAL;
else
value &= ~SOR_DP_SPARE_PANEL_INTERNAL;
value = SOR_SEQ_CTL_PU_PC(0) | SOR_SEQ_CTL_PU_PC_ALT(0) |
SOR_SEQ_CTL_PD_PC(8) | SOR_SEQ_CTL_PD_PC_ALT(8);
tegra_sor_writel(sor, value, SOR_SEQ_CTL);
value |= SOR_DP_SPARE_SEQ_ENABLE;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
value = SOR_SEQ_INST_DRIVE_PWM_OUT_LO | SOR_SEQ_INST_HALT |
SOR_SEQ_INST_WAIT_VSYNC | SOR_SEQ_INST_WAIT(1);
tegra_sor_writel(sor, value, SOR_SEQ_INST(0));
tegra_sor_writel(sor, value, SOR_SEQ_INST(8));
/* XXX not in TRM */
tegra_sor_writel(sor, 0, SOR_LVDS);
if (!sor->soc->has_nvdisplay) {
/* program the reference clock */
value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
tegra_sor_writel(sor, value, SOR_REFCLK);
}
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_ICHPMP_MASK;
value &= ~SOR_PLL0_VCOCAP_MASK;
value |= SOR_PLL0_ICHPMP(0x1);
value |= SOR_PLL0_VCOCAP(0x3);
value |= SOR_PLL0_RESISTOR_EXT;
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
/* XXX not in TRM */
for (value = 0, i = 0; i < 5; i++)
value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) |
value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
SOR_XBAR_CTRL_LINK1_XSEL(i, i);
tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
/* switch to parent clock */
err = clk_set_parent(sor->clk, sor->clk_parent);
/*
* Switch the pad clock to the DP clock. Note that we cannot actually
* do this because Tegra186 and later don't support clk_set_parent()
* on the sorX_pad_clkout clocks. We already do the equivalent above
* using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register.
*/
#if 0
err = clk_set_parent(sor->clk_pad, sor->clk_parent);
if (err < 0) {
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
dev_err(sor->dev, "failed to select pad parent clock: %d\n",
err);
return;
}
#endif
/* switch the SOR clock to the pad clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
if (err < 0) {
dev_err(sor->dev, "failed to set pad clock: %d\n", err);
dev_err(sor->dev, "failed to select SOR parent clock: %d\n",
err);
return;
}
/* adjust clock rate for HDMI 2.0 modes */
rate = clk_get_rate(sor->clk_parent);
if (mode->clock >= 340000)
rate /= 2;
DRM_DEBUG_KMS("setting clock to %lu Hz, mode: %lu Hz\n", rate, pclk);
clk_set_rate(sor->clk, rate);
if (!sor->soc->has_nvdisplay) {
value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
/* XXX is this the proper check? */
if (mode->clock < 75000)
value |= SOR_INPUT_CONTROL_ARM_VIDEO_RANGE_LIMITED;
tegra_sor_writel(sor, value, SOR_INPUT_CONTROL);
/* switch the output clock to the parent pixel clock */
err = clk_set_parent(sor->clk, sor->clk_parent);
if (err < 0) {
dev_err(sor->dev, "failed to select output parent clock: %d\n",
err);
return;
}
max_ac = ((mode->htotal - mode->hdisplay) - SOR_REKEY - 18) / 32;
/* use DP-A protocol */
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
value |= SOR_STATE_ASY_PROTOCOL_DP_A;
tegra_sor_writel(sor, value, SOR_STATE1);
value = SOR_HDMI_CTRL_ENABLE | SOR_HDMI_CTRL_MAX_AC_PACKET(max_ac) |
SOR_HDMI_CTRL_AUDIO_LAYOUT | SOR_HDMI_CTRL_REKEY(SOR_REKEY);
tegra_sor_writel(sor, value, SOR_HDMI_CTRL);
/* enable port */
value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
value |= SOR_DP_LINKCTL_ENABLE;
tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
if (!dc->soc->has_nvdisplay) {
/* H_PULSE2 setup */
pulse_start = h_ref_to_sync +
(mode->hsync_end - mode->hsync_start) +
(mode->htotal - mode->hsync_end) - 10;
tegra_sor_dp_term_calibrate(sor);
value = PULSE_LAST_END_A | PULSE_QUAL_VACTIVE |
PULSE_POLARITY_HIGH | PULSE_MODE_NORMAL;
tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
err = drm_dp_link_train(&sor->link);
if (err < 0)
dev_err(sor->dev, "link training failed: %d\n", err);
else
dev_dbg(sor->dev, "link training succeeded\n");
value = PULSE_END(pulse_start + 8) | PULSE_START(pulse_start);
tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
err = drm_dp_link_power_up(sor->aux, &sor->link);
if (err < 0)
dev_err(sor->dev, "failed to power up DP link: %d\n", err);
value = tegra_dc_readl(dc, DC_DISP_DISP_SIGNAL_OPTIONS0);
value |= H_PULSE2_ENABLE;
tegra_dc_writel(dc, value, DC_DISP_DISP_SIGNAL_OPTIONS0);
}
/* compute configuration */
memset(&config, 0, sizeof(config));
config.bits_per_pixel = state->bpc * 3;
/* infoframe setup */
err = tegra_sor_hdmi_setup_avi_infoframe(sor, mode);
err = tegra_sor_compute_config(sor, mode, &config, &sor->link);
if (err < 0)
dev_err(sor->dev, "failed to setup AVI infoframe: %d\n", err);
/* XXX HDMI audio support not implemented yet */
tegra_sor_hdmi_disable_audio_infoframe(sor);
dev_err(sor->dev, "failed to compute configuration: %d\n", err);
/* use single TMDS protocol */
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
value |= SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A;
tegra_sor_writel(sor, value, SOR_STATE1);
tegra_sor_apply_config(sor, &config);
tegra_sor_mode_set(sor, mode, state);
/* power up pad calibration */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
if (output->panel) {
/* CSTM (LVDS, link A/B, upper) */
value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
SOR_CSTM_UPPER;
tegra_sor_writel(sor, value, SOR_CSTM);
/* production settings */
settings = tegra_sor_hdmi_find_settings(sor, mode->clock * 1000);
if (!settings) {
dev_err(sor->dev, "no settings for pixel clock %d Hz\n",
mode->clock * 1000);
return;
/* PWM setup */
err = tegra_sor_setup_pwm(sor, 250);
if (err < 0)
dev_err(sor->dev, "failed to setup PWM: %d\n", err);
}
value = tegra_sor_readl(sor, sor->soc->regs->pll0);
value &= ~SOR_PLL0_ICHPMP_MASK;
value &= ~SOR_PLL0_FILTER_MASK;
value &= ~SOR_PLL0_VCOCAP_MASK;
value |= SOR_PLL0_ICHPMP(settings->ichpmp);
value |= SOR_PLL0_FILTER(settings->filter);
value |= SOR_PLL0_VCOCAP(settings->vcocap);
tegra_sor_writel(sor, value, sor->soc->regs->pll0);
/* XXX not in TRM */
value = tegra_sor_readl(sor, sor->soc->regs->pll1);
value &= ~SOR_PLL1_LOADADJ_MASK;
value &= ~SOR_PLL1_TMDS_TERMADJ_MASK;
value |= SOR_PLL1_LOADADJ(settings->loadadj);
value |= SOR_PLL1_TMDS_TERMADJ(settings->tmds_termadj);
value |= SOR_PLL1_TMDS_TERM;
tegra_sor_writel(sor, value, sor->soc->regs->pll1);
value = tegra_sor_readl(sor, sor->soc->regs->pll3);
value &= ~SOR_PLL3_BG_TEMP_COEF_MASK;
value &= ~SOR_PLL3_BG_VREF_LEVEL_MASK;
value &= ~SOR_PLL3_AVDD10_LEVEL_MASK;
value &= ~SOR_PLL3_AVDD14_LEVEL_MASK;
value |= SOR_PLL3_BG_TEMP_COEF(settings->bg_temp_coef);
value |= SOR_PLL3_BG_VREF_LEVEL(settings->bg_vref_level);
value |= SOR_PLL3_AVDD10_LEVEL(settings->avdd10_level);
value |= SOR_PLL3_AVDD14_LEVEL(settings->avdd14_level);
tegra_sor_writel(sor, value, sor->soc->regs->pll3);
value = settings->drive_current[3] << 24 |
settings->drive_current[2] << 16 |
settings->drive_current[1] << 8 |
settings->drive_current[0] << 0;
tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0);
value = settings->preemphasis[3] << 24 |
settings->preemphasis[2] << 16 |
settings->preemphasis[1] << 8 |
settings->preemphasis[0] << 0;
tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value &= ~SOR_DP_PADCTL_TX_PU_MASK;
value |= SOR_DP_PADCTL_TX_PU_ENABLE;
value |= SOR_DP_PADCTL_TX_PU(settings->tx_pu_value);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
tegra_sor_update(sor);
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl2);
value &= ~SOR_DP_PADCTL_SPAREPLL_MASK;
value |= SOR_DP_PADCTL_SPAREPLL(settings->sparepll);
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl2);
err = tegra_sor_power_up(sor, 250);
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
/* power down pad calibration */
value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0);
value |= SOR_DP_PADCTL_PAD_CAL_PD;
tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0);
/* attach and wake up */
err = tegra_sor_attach(sor);
if (err < 0)
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
if (!dc->soc->has_nvdisplay) {
/* miscellaneous display controller settings */
value = VSYNC_H_POSITION(1);
tegra_dc_writel(dc, value, DC_DISP_DISP_TIMING_OPTIONS);
}
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
value |= SOR_ENABLE(sor->index);
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
value = tegra_dc_readl(dc, DC_DISP_DISP_COLOR_CONTROL);
value &= ~DITHER_CONTROL_MASK;
value &= ~BASE_COLOR_SIZE_MASK;
tegra_dc_commit(dc);
switch (state->bpc) {
case 6:
value |= BASE_COLOR_SIZE_666;
break;
err = tegra_sor_wakeup(sor);
if (err < 0)
dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
case 8:
value |= BASE_COLOR_SIZE_888;
break;
if (output->panel)
drm_panel_enable(output->panel);
}
case 10:
value |= BASE_COLOR_SIZE_101010;
break;
static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = {
.disable = tegra_sor_dp_disable,
.enable = tegra_sor_dp_enable,
.atomic_check = tegra_sor_encoder_atomic_check,
};
case 12:
value |= BASE_COLOR_SIZE_121212;
break;
static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
{
int err;
default:
WARN(1, "%u bits-per-color not supported\n", state->bpc);
value |= BASE_COLOR_SIZE_888;
break;
sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
if (IS_ERR(sor->avdd_io_supply)) {
dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
PTR_ERR(sor->avdd_io_supply));
return PTR_ERR(sor->avdd_io_supply);
}
tegra_dc_writel(dc, value, DC_DISP_DISP_COLOR_CONTROL);
err = regulator_enable(sor->avdd_io_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
err);
return err;
}
/* XXX set display head owner */
value = tegra_sor_readl(sor, SOR_STATE1);
value &= ~SOR_STATE_ASY_OWNER_MASK;
value |= SOR_STATE_ASY_OWNER(1 + dc->pipe);
tegra_sor_writel(sor, value, SOR_STATE1);
sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
if (IS_ERR(sor->vdd_pll_supply)) {
dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
PTR_ERR(sor->vdd_pll_supply));
return PTR_ERR(sor->vdd_pll_supply);
}
err = tegra_sor_power_up(sor, 250);
if (err < 0)
dev_err(sor->dev, "failed to power up SOR: %d\n", err);
err = regulator_enable(sor->vdd_pll_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
err);
return err;
}
/* configure dynamic range of output */
value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
if (IS_ERR(sor->hdmi_supply)) {
dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
PTR_ERR(sor->hdmi_supply));
return PTR_ERR(sor->hdmi_supply);
}
/* configure colorspace */
value = tegra_sor_readl(sor, sor->soc->regs->head_state0 + dc->pipe);
value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
value |= SOR_HEAD_STATE_COLORSPACE_RGB;
tegra_sor_writel(sor, value, sor->soc->regs->head_state0 + dc->pipe);
err = regulator_enable(sor->hdmi_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
return err;
}
tegra_sor_mode_set(sor, mode, state);
INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
tegra_sor_update(sor);
return 0;
}
/* program preamble timing in SOR (XXX) */
value = tegra_sor_readl(sor, SOR_DP_SPARE0);
value &= ~SOR_DP_SPARE_DISP_VIDEO_PREAMBLE;
tegra_sor_writel(sor, value, SOR_DP_SPARE0);
static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
{
regulator_disable(sor->hdmi_supply);
regulator_disable(sor->vdd_pll_supply);
regulator_disable(sor->avdd_io_supply);
err = tegra_sor_attach(sor);
if (err < 0)
dev_err(sor->dev, "failed to attach SOR: %d\n", err);
return 0;
}
/* enable display to SOR clock and generate HDMI preamble */
value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
.name = "HDMI",
.probe = tegra_sor_hdmi_probe,
.remove = tegra_sor_hdmi_remove,
.audio_enable = tegra_sor_hdmi_audio_enable,
.audio_disable = tegra_sor_hdmi_audio_disable,
};
if (!sor->soc->has_nvdisplay)
value |= SOR_ENABLE(1) | SOR1_TIMING_CYA;
else
value |= SOR_ENABLE(sor->index);
static int tegra_sor_dp_probe(struct tegra_sor *sor)
{
int err;
tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp");
if (IS_ERR(sor->avdd_io_supply))
return PTR_ERR(sor->avdd_io_supply);
if (dc->soc->has_nvdisplay) {
value = tegra_dc_readl(dc, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
value &= ~PROTOCOL_MASK;
value |= PROTOCOL_SINGLE_TMDS_A;
tegra_dc_writel(dc, value, DC_DISP_CORE_SOR_SET_CONTROL(sor->index));
}
err = regulator_enable(sor->avdd_io_supply);
if (err < 0)
return err;
tegra_dc_commit(dc);
sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll");
if (IS_ERR(sor->vdd_pll_supply))
return PTR_ERR(sor->vdd_pll_supply);
err = tegra_sor_wakeup(sor);
err = regulator_enable(sor->vdd_pll_supply);
if (err < 0)
dev_err(sor->dev, "failed to wakeup SOR: %d\n", err);
return err;
tegra_sor_hdmi_scdc_start(sor);
tegra_sor_audio_prepare(sor);
return 0;
}
static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = {
.disable = tegra_sor_hdmi_disable,
.enable = tegra_sor_hdmi_enable,
.atomic_check = tegra_sor_encoder_atomic_check,
static int tegra_sor_dp_remove(struct tegra_sor *sor)
{
regulator_disable(sor->vdd_pll_supply);
regulator_disable(sor->avdd_io_supply);
return 0;
}
static const struct tegra_sor_ops tegra_sor_dp_ops = {
.name = "DP",
.probe = tegra_sor_dp_probe,
.remove = tegra_sor_dp_remove,
};
static int tegra_sor_init(struct host1x_client *client)
......@@ -2811,11 +3058,10 @@ static int tegra_sor_init(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int connector = DRM_MODE_CONNECTOR_Unknown;
int encoder = DRM_MODE_ENCODER_NONE;
u32 value;
int err;
if (!sor->aux) {
if (sor->soc->supports_hdmi) {
if (sor->ops == &tegra_sor_hdmi_ops) {
connector = DRM_MODE_CONNECTOR_HDMIA;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_hdmi_helpers;
......@@ -2824,14 +3070,18 @@ static int tegra_sor_init(struct host1x_client *client)
encoder = DRM_MODE_ENCODER_LVDS;
}
} else {
if (sor->soc->supports_edp) {
if (sor->output.panel) {
connector = DRM_MODE_CONNECTOR_eDP;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_edp_helpers;
} else if (sor->soc->supports_dp) {
helpers = &tegra_sor_dp_helpers;
} else {
connector = DRM_MODE_CONNECTOR_DisplayPort;
encoder = DRM_MODE_ENCODER_TMDS;
helpers = &tegra_sor_dp_helpers;
}
sor->link.ops = &tegra_sor_dp_link_ops;
sor->link.aux = sor->aux;
}
sor->output.dev = sor->dev;
......@@ -2914,15 +3164,6 @@ static int tegra_sor_init(struct host1x_client *client)
if (err < 0)
return err;
/*
* Enable and unmask the HDA codec SCRATCH0 register interrupt. This
* is used for interoperability between the HDA codec driver and the
* HDMI/DP driver.
*/
value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0;
tegra_sor_writel(sor, value, SOR_INT_ENABLE);
tegra_sor_writel(sor, value, SOR_INT_MASK);
return 0;
}
......@@ -2931,9 +3172,6 @@ static int tegra_sor_exit(struct host1x_client *client)
struct tegra_sor *sor = host1x_client_to_sor(client);
int err;
tegra_sor_writel(sor, 0, SOR_INT_MASK);
tegra_sor_writel(sor, 0, SOR_INT_ENABLE);
tegra_output_exit(&sor->output);
if (sor->aux) {
......@@ -2956,75 +3194,6 @@ static const struct host1x_client_ops sor_client_ops = {
.exit = tegra_sor_exit,
};
static const struct tegra_sor_ops tegra_sor_edp_ops = {
.name = "eDP",
};
static int tegra_sor_hdmi_probe(struct tegra_sor *sor)
{
int err;
sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io");
if (IS_ERR(sor->avdd_io_supply)) {
dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n",
PTR_ERR(sor->avdd_io_supply));
return PTR_ERR(sor->avdd_io_supply);
}
err = regulator_enable(sor->avdd_io_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n",
err);
return err;
}
sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll");
if (IS_ERR(sor->vdd_pll_supply)) {
dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n",
PTR_ERR(sor->vdd_pll_supply));
return PTR_ERR(sor->vdd_pll_supply);
}
err = regulator_enable(sor->vdd_pll_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n",
err);
return err;
}
sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi");
if (IS_ERR(sor->hdmi_supply)) {
dev_err(sor->dev, "cannot get HDMI supply: %ld\n",
PTR_ERR(sor->hdmi_supply));
return PTR_ERR(sor->hdmi_supply);
}
err = regulator_enable(sor->hdmi_supply);
if (err < 0) {
dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err);
return err;
}
INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work);
return 0;
}
static int tegra_sor_hdmi_remove(struct tegra_sor *sor)
{
regulator_disable(sor->hdmi_supply);
regulator_disable(sor->vdd_pll_supply);
regulator_disable(sor->avdd_io_supply);
return 0;
}
static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
.name = "HDMI",
.probe = tegra_sor_hdmi_probe,
.remove = tegra_sor_hdmi_remove,
};
static const u8 tegra124_sor_xbar_cfg[5] = {
0, 1, 2, 3, 4
};
......@@ -3044,14 +3213,161 @@ static const struct tegra_sor_regs tegra124_sor_regs = {
.dp_padctl2 = 0x73,
};
/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */
static const u8 tegra124_sor_lane_map[4] = {
2, 1, 0, 3,
};
static const u8 tegra124_sor_voltage_swing[4][4][4] = {
{
{ 0x13, 0x19, 0x1e, 0x28 },
{ 0x1e, 0x25, 0x2d, },
{ 0x28, 0x32, },
{ 0x3c, },
}, {
{ 0x12, 0x17, 0x1b, 0x25 },
{ 0x1c, 0x23, 0x2a, },
{ 0x25, 0x2f, },
{ 0x39, }
}, {
{ 0x12, 0x16, 0x1a, 0x22 },
{ 0x1b, 0x20, 0x27, },
{ 0x24, 0x2d, },
{ 0x36, },
}, {
{ 0x11, 0x14, 0x17, 0x1f },
{ 0x19, 0x1e, 0x24, },
{ 0x22, 0x2a, },
{ 0x32, },
},
};
static const u8 tegra124_sor_pre_emphasis[4][4][4] = {
{
{ 0x00, 0x09, 0x13, 0x25 },
{ 0x00, 0x0f, 0x1e, },
{ 0x00, 0x14, },
{ 0x00, },
}, {
{ 0x00, 0x0a, 0x14, 0x28 },
{ 0x00, 0x0f, 0x1e, },
{ 0x00, 0x14, },
{ 0x00 },
}, {
{ 0x00, 0x0a, 0x14, 0x28 },
{ 0x00, 0x0f, 0x1e, },
{ 0x00, 0x14, },
{ 0x00, },
}, {
{ 0x00, 0x0a, 0x14, 0x28 },
{ 0x00, 0x0f, 0x1e, },
{ 0x00, 0x14, },
{ 0x00, },
},
};
static const u8 tegra124_sor_post_cursor[4][4][4] = {
{
{ 0x00, 0x00, 0x00, 0x00 },
{ 0x00, 0x00, 0x00, },
{ 0x00, 0x00, },
{ 0x00, },
}, {
{ 0x02, 0x02, 0x04, 0x05 },
{ 0x02, 0x04, 0x05, },
{ 0x04, 0x05, },
{ 0x05, },
}, {
{ 0x04, 0x05, 0x08, 0x0b },
{ 0x05, 0x09, 0x0b, },
{ 0x08, 0x0a, },
{ 0x0b, },
}, {
{ 0x05, 0x09, 0x0b, 0x12 },
{ 0x09, 0x0d, 0x12, },
{ 0x0b, 0x0f, },
{ 0x12, },
},
};
static const u8 tegra124_sor_tx_pu[4][4][4] = {
{
{ 0x20, 0x30, 0x40, 0x60 },
{ 0x30, 0x40, 0x60, },
{ 0x40, 0x60, },
{ 0x60, },
}, {
{ 0x20, 0x20, 0x30, 0x50 },
{ 0x30, 0x40, 0x50, },
{ 0x40, 0x50, },
{ 0x60, },
}, {
{ 0x20, 0x20, 0x30, 0x40, },
{ 0x30, 0x30, 0x40, },
{ 0x40, 0x50, },
{ 0x60, },
}, {
{ 0x20, 0x20, 0x20, 0x40, },
{ 0x30, 0x30, 0x40, },
{ 0x40, 0x40, },
{ 0x60, },
},
};
static const struct tegra_sor_soc tegra124_sor = {
.supports_edp = true,
.supports_lvds = true,
.supports_hdmi = false,
.supports_dp = false,
.supports_dp = true,
.supports_audio = false,
.supports_hdcp = false,
.regs = &tegra124_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
.lane_map = tegra124_sor_lane_map,
.voltage_swing = tegra124_sor_voltage_swing,
.pre_emphasis = tegra124_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const u8 tegra132_sor_pre_emphasis[4][4][4] = {
{
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x01, 0x0e, 0x1d, },
{ 0x01, 0x13, },
{ 0x00, },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00 },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00, },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00, },
},
};
static const struct tegra_sor_soc tegra132_sor = {
.supports_lvds = true,
.supports_hdmi = false,
.supports_dp = true,
.supports_audio = false,
.supports_hdcp = false,
.regs = &tegra124_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
.lane_map = tegra124_sor_lane_map,
.voltage_swing = tegra124_sor_voltage_swing,
.pre_emphasis = tegra132_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra210_sor_regs = {
......@@ -3069,33 +3385,50 @@ static const struct tegra_sor_regs tegra210_sor_regs = {
.dp_padctl2 = 0x73,
};
static const u8 tegra210_sor_xbar_cfg[5] = {
2, 1, 0, 3, 4
};
static const u8 tegra210_sor_lane_map[4] = {
0, 1, 2, 3,
};
static const struct tegra_sor_soc tegra210_sor = {
.supports_edp = true,
.supports_lvds = false,
.supports_hdmi = false,
.supports_dp = false,
.supports_dp = true,
.supports_audio = false,
.supports_hdcp = false,
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
.xbar_cfg = tegra124_sor_xbar_cfg,
};
static const u8 tegra210_sor_xbar_cfg[5] = {
2, 1, 0, 3, 4
.xbar_cfg = tegra210_sor_xbar_cfg,
.lane_map = tegra210_sor_lane_map,
.voltage_swing = tegra124_sor_voltage_swing,
.pre_emphasis = tegra124_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_soc tegra210_sor1 = {
.supports_edp = false,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
.supports_audio = true,
.supports_hdcp = true,
.regs = &tegra210_sor_regs,
.has_nvdisplay = false,
.num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
.settings = tegra210_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
.lane_map = tegra210_sor_lane_map,
.voltage_swing = tegra124_sor_voltage_swing,
.pre_emphasis = tegra124_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra186_sor_regs = {
......@@ -3113,31 +3446,72 @@ static const struct tegra_sor_regs tegra186_sor_regs = {
.dp_padctl2 = 0x16a,
};
static const struct tegra_sor_soc tegra186_sor = {
.supports_edp = false,
.supports_lvds = false,
.supports_hdmi = false,
.supports_dp = true,
.regs = &tegra186_sor_regs,
.has_nvdisplay = true,
static const u8 tegra186_sor_voltage_swing[4][4][4] = {
{
{ 0x13, 0x19, 0x1e, 0x28 },
{ 0x1e, 0x25, 0x2d, },
{ 0x28, 0x32, },
{ 0x39, },
}, {
{ 0x12, 0x16, 0x1b, 0x25 },
{ 0x1c, 0x23, 0x2a, },
{ 0x25, 0x2f, },
{ 0x37, }
}, {
{ 0x12, 0x16, 0x1a, 0x22 },
{ 0x1b, 0x20, 0x27, },
{ 0x24, 0x2d, },
{ 0x35, },
}, {
{ 0x11, 0x14, 0x17, 0x1f },
{ 0x19, 0x1e, 0x24, },
{ 0x22, 0x2a, },
{ 0x32, },
},
};
.xbar_cfg = tegra124_sor_xbar_cfg,
static const u8 tegra186_sor_pre_emphasis[4][4][4] = {
{
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x01, 0x0e, 0x1d, },
{ 0x01, 0x13, },
{ 0x00, },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00 },
}, {
{ 0x00, 0x08, 0x14, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00, },
}, {
{ 0x00, 0x08, 0x12, 0x24 },
{ 0x00, 0x0e, 0x1d, },
{ 0x00, 0x13, },
{ 0x00, },
},
};
static const struct tegra_sor_soc tegra186_sor1 = {
.supports_edp = false,
static const struct tegra_sor_soc tegra186_sor = {
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
.supports_audio = true,
.supports_hdcp = true,
.regs = &tegra186_sor_regs,
.has_nvdisplay = true,
.num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults),
.settings = tegra186_sor_hdmi_defaults,
.xbar_cfg = tegra124_sor_xbar_cfg,
.lane_map = tegra124_sor_lane_map,
.voltage_swing = tegra186_sor_voltage_swing,
.pre_emphasis = tegra186_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct tegra_sor_regs tegra194_sor_regs = {
......@@ -3156,10 +3530,11 @@ static const struct tegra_sor_regs tegra194_sor_regs = {
};
static const struct tegra_sor_soc tegra194_sor = {
.supports_edp = true,
.supports_lvds = false,
.supports_hdmi = true,
.supports_dp = true,
.supports_audio = true,
.supports_hdcp = true,
.regs = &tegra194_sor_regs,
.has_nvdisplay = true,
......@@ -3168,14 +3543,19 @@ static const struct tegra_sor_soc tegra194_sor = {
.settings = tegra194_sor_hdmi_defaults,
.xbar_cfg = tegra210_sor_xbar_cfg,
.lane_map = tegra124_sor_lane_map,
.voltage_swing = tegra186_sor_voltage_swing,
.pre_emphasis = tegra186_sor_pre_emphasis,
.post_cursor = tegra124_sor_post_cursor,
.tx_pu = tegra124_sor_tx_pu,
};
static const struct of_device_id tegra_sor_of_match[] = {
{ .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor },
{ .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 },
{ .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor },
{ .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 },
{ .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor },
{ .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor },
{ .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor },
{ },
};
......@@ -3201,6 +3581,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor)
* earlier
*/
sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index;
} else {
if (!sor->soc->supports_audio)
sor->index = 0;
else
sor->index = 1;
}
err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5);
......@@ -3235,9 +3620,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data)
tegra_hda_parse_format(format, &sor->format);
tegra_sor_hdmi_audio_enable(sor);
if (sor->ops->audio_enable)
sor->ops->audio_enable(sor);
} else {
tegra_sor_hdmi_audio_disable(sor);
if (sor->ops->audio_disable)
sor->ops->audio_disable(sor);
}
}
......@@ -3274,6 +3661,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
if (!sor->aux)
return -EPROBE_DEFER;
sor->output.ddc = &sor->aux->ddc;
}
if (!sor->aux) {
......@@ -3288,16 +3677,15 @@ static int tegra_sor_probe(struct platform_device *pdev)
return -ENODEV;
}
} else {
if (sor->soc->supports_edp) {
sor->ops = &tegra_sor_edp_ops;
sor->pad = TEGRA_IO_PAD_LVDS;
} else if (sor->soc->supports_dp) {
dev_err(&pdev->dev, "DisplayPort not supported yet\n");
return -ENODEV;
} else {
dev_err(&pdev->dev, "unknown (DP) support\n");
return -ENODEV;
}
np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0);
/*
* No need to keep this around since we only use it as a check
* to see if a panel is connected (eDP) or not (DP).
*/
of_node_put(np);
sor->ops = &tegra_sor_dp_ops;
sor->pad = TEGRA_IO_PAD_LVDS;
}
err = tegra_sor_parse_dt(sor);
......@@ -3452,6 +3840,8 @@ static int tegra_sor_probe(struct platform_device *pdev)
* pad output clock.
*/
if (!sor->clk_pad) {
char *name;
err = pm_runtime_get_sync(&pdev->dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
......@@ -3459,8 +3849,13 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
sor->clk_pad = tegra_clk_sor_pad_register(sor,
"sor1_pad_clkout");
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index);
if (!name) {
err = -ENOMEM;
goto remove;
}
sor->clk_pad = tegra_clk_sor_pad_register(sor, name);
pm_runtime_put(&pdev->dev);
}
......
......@@ -39,6 +39,7 @@
#define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6)
#define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6)
#define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6)
#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4)
#define SOR_STATE_ASY_OWNER_MASK 0xf
#define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0)
......@@ -283,10 +284,12 @@
#define SOR_DP_PADCTL_CM_TXD_2 (1 << 6)
#define SOR_DP_PADCTL_CM_TXD_1 (1 << 5)
#define SOR_DP_PADCTL_CM_TXD_0 (1 << 4)
#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x)))
#define SOR_DP_PADCTL_PD_TXD_3 (1 << 3)
#define SOR_DP_PADCTL_PD_TXD_0 (1 << 2)
#define SOR_DP_PADCTL_PD_TXD_1 (1 << 1)
#define SOR_DP_PADCTL_PD_TXD_2 (1 << 0)
#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x)))
#define SOR_DP_PADCTL1 0x5d
......
......@@ -34,7 +34,6 @@ struct vic {
void __iomem *regs;
struct tegra_drm_client client;
struct host1x_channel *channel;
struct iommu_domain *domain;
struct device *dev;
struct clk *clk;
struct reset_control *rst;
......@@ -97,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev)
static int vic_boot(struct vic *vic)
{
#ifdef CONFIG_IOMMU_API
struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
#endif
u32 fce_ucode_size, fce_bin_data_offset;
void *hdr;
int err = 0;
......@@ -105,15 +107,14 @@ static int vic_boot(struct vic *vic)
return 0;
#ifdef CONFIG_IOMMU_API
if (vic->config->supports_sid) {
struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
if (vic->config->supports_sid && spec) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
TRANSCFG_ATT(0, TRANSCFG_SID_HW);
vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
if (spec && spec->num_ids > 0) {
if (spec->num_ids > 0) {
value = spec->ids[0] & 0xffff;
vic_writel(vic, value, VIC_THI_STREAMID0);
......@@ -132,9 +133,9 @@ static int vic_boot(struct vic *vic)
if (err < 0)
return err;
hdr = vic->falcon.firmware.vaddr;
hdr = vic->falcon.firmware.virt;
fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET);
hdr = vic->falcon.firmware.vaddr +
hdr = vic->falcon.firmware.virt +
*(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET);
fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET);
......@@ -142,7 +143,7 @@ static int vic_boot(struct vic *vic)
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE,
fce_ucode_size);
falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET,
(vic->falcon.firmware.paddr + fce_bin_data_offset)
(vic->falcon.firmware.iova + fce_bin_data_offset)
>> 8);
err = falcon_wait_idle(&vic->falcon);
......@@ -157,48 +158,21 @@ static int vic_boot(struct vic *vic)
return 0;
}
static void *vic_falcon_alloc(struct falcon *falcon, size_t size,
dma_addr_t *iova)
{
struct tegra_drm *tegra = falcon->data;
return tegra_drm_alloc(tegra, size, iova);
}
static void vic_falcon_free(struct falcon *falcon, size_t size,
dma_addr_t iova, void *va)
{
struct tegra_drm *tegra = falcon->data;
return tegra_drm_free(tegra, size, va, iova);
}
static const struct falcon_ops vic_falcon_ops = {
.alloc = vic_falcon_alloc,
.free = vic_falcon_free
};
static int vic_init(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
if (group && tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
dev_err(vic->dev, "failed to attach to domain: %d\n",
err);
return err;
}
vic->domain = tegra->domain;
err = host1x_client_iommu_attach(client);
if (err < 0) {
dev_err(vic->dev, "failed to attach to domain: %d\n", err);
return err;
}
vic->channel = host1x_channel_request(client->dev);
vic->channel = host1x_channel_request(client);
if (!vic->channel) {
err = -ENOMEM;
goto detach;
......@@ -214,6 +188,12 @@ static int vic_init(struct host1x_client *client)
if (err < 0)
goto free_syncpt;
/*
* Inherit the DMA parameters (such as maximum segment size) from the
* parent device.
*/
client->dev->dma_parms = client->parent->dma_parms;
return 0;
free_syncpt:
......@@ -221,8 +201,7 @@ static int vic_init(struct host1x_client *client)
free_channel:
host1x_channel_put(vic->channel);
detach:
if (group && tegra->domain)
iommu_detach_group(tegra->domain, group);
host1x_client_iommu_detach(client);
return err;
}
......@@ -230,22 +209,32 @@ static int vic_init(struct host1x_client *client)
static int vic_exit(struct host1x_client *client)
{
struct tegra_drm_client *drm = host1x_to_drm_client(client);
struct iommu_group *group = iommu_group_get(client->dev);
struct drm_device *dev = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = dev->dev_private;
struct vic *vic = to_vic(drm);
int err;
/* avoid a dangling pointer just in case this disappears */
client->dev->dma_parms = NULL;
err = tegra_drm_unregister_client(tegra, drm);
if (err < 0)
return err;
host1x_syncpt_free(client->syncpts[0]);
host1x_channel_put(vic->channel);
if (vic->domain) {
iommu_detach_group(vic->domain, group);
vic->domain = NULL;
host1x_client_iommu_detach(client);
if (client->group) {
dma_unmap_single(vic->dev, vic->falcon.firmware.phys,
vic->falcon.firmware.size, DMA_TO_DEVICE);
tegra_drm_free(tegra, vic->falcon.firmware.size,
vic->falcon.firmware.virt,
vic->falcon.firmware.iova);
} else {
dma_free_coherent(vic->dev, vic->falcon.firmware.size,
vic->falcon.firmware.virt,
vic->falcon.firmware.iova);
}
return 0;
......@@ -258,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = {
static int vic_load_firmware(struct vic *vic)
{
struct host1x_client *client = &vic->client.base;
struct tegra_drm *tegra = vic->client.drm;
dma_addr_t iova;
size_t size;
void *virt;
int err;
if (vic->falcon.data)
if (vic->falcon.firmware.virt)
return 0;
vic->falcon.data = vic->client.drm;
err = falcon_read_firmware(&vic->falcon, vic->config->firmware);
if (err < 0)
goto cleanup;
return err;
size = vic->falcon.firmware.size;
if (!client->group) {
virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL);
err = dma_mapping_error(vic->dev, iova);
if (err < 0)
return err;
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
}
vic->falcon.firmware.virt = virt;
vic->falcon.firmware.iova = iova;
err = falcon_load_firmware(&vic->falcon);
if (err < 0)
goto cleanup;
/*
* In this case we have received an IOVA from the shared domain, so we
* need to make sure to get the physical address so that the DMA API
* knows what memory pages to flush the cache for.
*/
if (client->group) {
dma_addr_t phys;
phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE);
err = dma_mapping_error(vic->dev, phys);
if (err < 0)
goto cleanup;
vic->falcon.firmware.phys = phys;
}
return 0;
cleanup:
vic->falcon.data = NULL;
if (!client->group)
dma_free_coherent(vic->dev, size, virt, iova);
else
tegra_drm_free(tegra, size, virt, iova);
return err;
}
......@@ -374,6 +402,13 @@ static int vic_probe(struct platform_device *pdev)
struct vic *vic;
int err;
/* inherit DMA mask from host1x parent */
err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
if (err < 0) {
dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
return err;
}
vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL);
if (!vic)
return -ENOMEM;
......@@ -410,7 +445,6 @@ static int vic_probe(struct platform_device *pdev)
vic->falcon.dev = dev;
vic->falcon.regs = vic->regs;
vic->falcon.ops = &vic_falcon_ops;
err = falcon_init(&vic->falcon);
if (err < 0)
......
......@@ -2,7 +2,7 @@
config TEGRA_HOST1X
tristate "NVIDIA Tegra host1x driver"
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
select IOMMU_IOVA if IOMMU_SUPPORT
select IOMMU_IOVA
help
Driver for the NVIDIA Tegra host1x hardware.
......
......@@ -445,7 +445,7 @@ static int host1x_device_add(struct host1x *host1x,
of_dma_configure(&device->dev, host1x->dev->of_node, true);
device->dev.dma_parms = &device->dma_parms;
dma_set_max_seg_size(&device->dev, SZ_4M);
dma_set_max_seg_size(&device->dev, UINT_MAX);
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
......
......@@ -232,9 +232,9 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
*
* Must be called with the cdma lock held.
*/
int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
struct host1x_cdma *cdma,
unsigned int needed)
static int host1x_cdma_wait_pushbuffer_space(struct host1x *host1x,
struct host1x_cdma *cdma,
unsigned int needed)
{
while (true) {
struct push_buffer *pb = &cdma->push_buffer;
......
......@@ -115,14 +115,14 @@ static struct host1x_channel *acquire_unused_channel(struct host1x *host)
/**
* host1x_channel_request() - Allocate a channel
* @device: Host1x unit this channel will be used to send commands to
* @client: Host1x client this channel will be used to send commands to
*
* Allocates a new host1x channel for @device. May return NULL if CDMA
* Allocates a new host1x channel for @client. May return NULL if CDMA
* initialization fails.
*/
struct host1x_channel *host1x_channel_request(struct device *dev)
struct host1x_channel *host1x_channel_request(struct host1x_client *client)
{
struct host1x *host = dev_get_drvdata(dev->parent);
struct host1x *host = dev_get_drvdata(client->dev->parent);
struct host1x_channel_list *chlist = &host->channel_list;
struct host1x_channel *channel;
int err;
......@@ -133,7 +133,8 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
kref_init(&channel->refcount);
mutex_init(&channel->submitlock);
channel->dev = dev;
channel->client = client;
channel->dev = client->dev;
err = host1x_hw_channel_init(host, channel, channel->id);
if (err < 0)
......@@ -148,7 +149,7 @@ struct host1x_channel *host1x_channel_request(struct device *dev)
fail:
clear_bit(channel->id, chlist->allocated_channels);
dev_err(dev, "failed to initialize channel\n");
dev_err(client->dev, "failed to initialize channel\n");
return NULL;
}
......
......@@ -26,6 +26,7 @@ struct host1x_channel {
unsigned int id;
struct mutex submitlock;
void __iomem *regs;
struct host1x_client *client;
struct device *dev;
struct host1x_cdma cdma;
};
......
......@@ -18,10 +18,6 @@
#include <trace/events/host1x.h>
#undef CREATE_TRACE_POINTS
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
#include <asm/dma-iommu.h>
#endif
#include "bus.h"
#include "channel.h"
#include "debug.h"
......@@ -77,6 +73,10 @@ static const struct host1x_info host1x01_info = {
.init = host1x01_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
};
static const struct host1x_info host1x02_info = {
......@@ -87,6 +87,10 @@ static const struct host1x_info host1x02_info = {
.init = host1x02_init,
.sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
};
static const struct host1x_info host1x04_info = {
......@@ -97,6 +101,10 @@ static const struct host1x_info host1x04_info = {
.init = host1x04_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
};
static const struct host1x_info host1x05_info = {
......@@ -107,6 +115,10 @@ static const struct host1x_info host1x05_info = {
.init = host1x05_init,
.sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
.has_wide_gather = false,
.has_hypervisor = false,
.num_sid_entries = 0,
.sid_table = NULL,
};
static const struct host1x_sid_entry tegra186_sid_table[] = {
......@@ -126,6 +138,7 @@ static const struct host1x_info host1x06_info = {
.init = host1x06_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
.has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra186_sid_table),
.sid_table = tegra186_sid_table,
......@@ -148,6 +161,7 @@ static const struct host1x_info host1x07_info = {
.init = host1x07_init,
.sync_offset = 0x0,
.dma_mask = DMA_BIT_MASK(40),
.has_wide_gather = true,
.has_hypervisor = true,
.num_sid_entries = ARRAY_SIZE(tegra194_sid_table),
.sid_table = tegra194_sid_table,
......@@ -178,6 +192,117 @@ static void host1x_setup_sid_table(struct host1x *host)
}
}
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
int err;
/*
* If the host1x firewall is enabled, there's no need to enable IOMMU
* support. Similarly, if host1x is already attached to an IOMMU (via
* the DMA API), don't try to attach again.
*/
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
return domain;
host->group = iommu_group_get(host->dev);
if (host->group) {
struct iommu_domain_geometry *geometry;
dma_addr_t start, end;
unsigned long order;
err = iova_cache_get();
if (err < 0)
goto put_group;
host->domain = iommu_domain_alloc(&platform_bus_type);
if (!host->domain) {
err = -ENOMEM;
goto put_cache;
}
err = iommu_attach_group(host->domain, host->group);
if (err) {
if (err == -ENODEV)
err = 0;
goto free_domain;
}
geometry = &host->domain->geometry;
start = geometry->aperture_start & host->info->dma_mask;
end = geometry->aperture_end & host->info->dma_mask;
order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order, start >> order);
host->iova_end = end;
domain = host->domain;
}
return domain;
free_domain:
iommu_domain_free(host->domain);
host->domain = NULL;
put_cache:
iova_cache_put();
put_group:
iommu_group_put(host->group);
host->group = NULL;
return ERR_PTR(err);
}
static int host1x_iommu_init(struct host1x *host)
{
u64 mask = host->info->dma_mask;
struct iommu_domain *domain;
int err;
domain = host1x_iommu_attach(host);
if (IS_ERR(domain)) {
err = PTR_ERR(domain);
dev_err(host->dev, "failed to attach to IOMMU: %d\n", err);
return err;
}
/*
* If we're not behind an IOMMU make sure we don't get push buffers
* that are allocated outside of the range addressable by the GATHER
* opcode.
*
* Newer generations of Tegra (Tegra186 and later) support a wide
* variant of the GATHER opcode that allows addressing more bits.
*/
if (!domain && !host->info->has_wide_gather)
mask = DMA_BIT_MASK(32);
err = dma_coerce_mask_and_coherent(host->dev, mask);
if (err < 0) {
dev_err(host->dev, "failed to set DMA mask: %d\n", err);
return err;
}
return 0;
}
static void host1x_iommu_exit(struct host1x *host)
{
if (host->domain) {
put_iova_domain(&host->iova);
iommu_detach_group(host->domain, host->group);
iommu_domain_free(host->domain);
host->domain = NULL;
iova_cache_put();
iommu_group_put(host->group);
host->group = NULL;
}
}
static int host1x_probe(struct platform_device *pdev)
{
struct host1x *host;
......@@ -237,7 +362,8 @@ static int host1x_probe(struct platform_device *pdev)
return PTR_ERR(host->hv_regs);
}
dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
host->dev->dma_parms = &host->dma_parms;
dma_set_max_seg_size(host->dev, UINT_MAX);
if (host->info->init) {
err = host->info->init(host);
......@@ -261,87 +387,42 @@ static int host1x_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get reset: %d\n", err);
return err;
}
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
if (host->dev->archdata.mapping) {
struct dma_iommu_mapping *mapping =
to_dma_iommu_mapping(host->dev);
arm_iommu_detach_device(host->dev);
arm_iommu_release_mapping(mapping);
}
#endif
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
goto skip_iommu;
host->group = iommu_group_get(&pdev->dev);
if (host->group) {
struct iommu_domain_geometry *geometry;
u64 mask = dma_get_mask(host->dev);
dma_addr_t start, end;
unsigned long order;
err = iova_cache_get();
if (err < 0)
goto put_group;
host->domain = iommu_domain_alloc(&platform_bus_type);
if (!host->domain) {
err = -ENOMEM;
goto put_cache;
}
err = iommu_attach_group(host->domain, host->group);
if (err) {
if (err == -ENODEV) {
iommu_domain_free(host->domain);
host->domain = NULL;
iova_cache_put();
iommu_group_put(host->group);
host->group = NULL;
goto skip_iommu;
}
goto fail_free_domain;
}
geometry = &host->domain->geometry;
start = geometry->aperture_start & mask;
end = geometry->aperture_end & mask;
order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order, start >> order);
host->iova_end = end;
err = host1x_iommu_init(host);
if (err < 0) {
dev_err(&pdev->dev, "failed to setup IOMMU: %d\n", err);
return err;
}
skip_iommu:
err = host1x_channel_list_init(&host->channel_list,
host->info->nb_channels);
if (err) {
dev_err(&pdev->dev, "failed to initialize channel list\n");
goto fail_detach_device;
goto iommu_exit;
}
err = clk_prepare_enable(host->clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable clock\n");
goto fail_free_channels;
goto free_channels;
}
err = reset_control_deassert(host->rst);
if (err < 0) {
dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
goto fail_unprepare_disable;
goto unprepare_disable;
}
err = host1x_syncpt_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize syncpts\n");
goto fail_reset_assert;
goto reset_assert;
}
err = host1x_intr_init(host, syncpt_irq);
if (err) {
dev_err(&pdev->dev, "failed to initialize interrupts\n");
goto fail_deinit_syncpt;
goto deinit_syncpt;
}
host1x_debug_init(host);
......@@ -351,33 +432,22 @@ static int host1x_probe(struct platform_device *pdev)
err = host1x_register(host);
if (err < 0)
goto fail_deinit_intr;
goto deinit_intr;
return 0;
fail_deinit_intr:
deinit_intr:
host1x_intr_deinit(host);
fail_deinit_syncpt:
deinit_syncpt:
host1x_syncpt_deinit(host);
fail_reset_assert:
reset_assert:
reset_control_assert(host->rst);
fail_unprepare_disable:
unprepare_disable:
clk_disable_unprepare(host->clk);
fail_free_channels:
free_channels:
host1x_channel_list_free(&host->channel_list);
fail_detach_device:
if (host->group && host->domain) {
put_iova_domain(&host->iova);
iommu_detach_group(host->domain, host->group);
}
fail_free_domain:
if (host->domain)
iommu_domain_free(host->domain);
put_cache:
if (host->group)
iova_cache_put();
put_group:
iommu_group_put(host->group);
iommu_exit:
host1x_iommu_exit(host);
return err;
}
......@@ -387,18 +457,12 @@ static int host1x_remove(struct platform_device *pdev)
struct host1x *host = platform_get_drvdata(pdev);
host1x_unregister(host);
host1x_debug_deinit(host);
host1x_intr_deinit(host);
host1x_syncpt_deinit(host);
reset_control_assert(host->rst);
clk_disable_unprepare(host->clk);
if (host->domain) {
put_iova_domain(&host->iova);
iommu_detach_group(host->domain, host->group);
iommu_domain_free(host->domain);
iova_cache_put();
iommu_group_put(host->group);
}
host1x_iommu_exit(host);
return 0;
}
......
......@@ -97,6 +97,7 @@ struct host1x_info {
int (*init)(struct host1x *host1x); /* initialize per SoC ops */
unsigned int sync_offset; /* offset of syncpoint registers */
u64 dma_mask; /* mask of addressable memory */
bool has_wide_gather; /* supports GATHER_W opcode */
bool has_hypervisor; /* has hypervisor registers */
unsigned int num_sid_entries;
const struct host1x_sid_entry *sid_table;
......@@ -140,6 +141,8 @@ struct host1x {
struct list_head devices;
struct list_head list;
struct device_dma_parameters dma_parms;
};
void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);
......
......@@ -105,7 +105,6 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
/* Add nr_completed to trace */
trace_host1x_channel_submit_complete(dev_name(channel->dev),
waiter->count, waiter->thresh);
}
static void action_wakeup(struct host1x_waitlist *waiter)
......
......@@ -99,6 +99,8 @@ EXPORT_SYMBOL(host1x_job_add_gather);
static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
{
struct host1x_client *client = job->client;
struct device *dev = client->dev;
unsigned int i;
int err;
......@@ -106,8 +108,8 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
for (i = 0; i < job->num_relocs; i++) {
struct host1x_reloc *reloc = &job->relocs[i];
dma_addr_t phys_addr, *phys;
struct sg_table *sgt;
dma_addr_t phys_addr;
reloc->target.bo = host1x_bo_get(reloc->target.bo);
if (!reloc->target.bo) {
......@@ -115,7 +117,50 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
if (client->group)
phys = &phys_addr;
else
phys = NULL;
sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
goto unpin;
}
if (sgt) {
unsigned long mask = HOST1X_RELOC_READ |
HOST1X_RELOC_WRITE;
enum dma_data_direction dir;
switch (reloc->flags & mask) {
case HOST1X_RELOC_READ:
dir = DMA_TO_DEVICE;
break;
case HOST1X_RELOC_WRITE:
dir = DMA_FROM_DEVICE;
break;
case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
dir = DMA_BIDIRECTIONAL;
break;
default:
err = -EINVAL;
goto unpin;
}
err = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
if (!err) {
err = -ENOMEM;
goto unpin;
}
job->unpins[job->num_unpins].dev = dev;
job->unpins[job->num_unpins].dir = dir;
phys_addr = sg_dma_address(sgt->sgl);
}
job->addr_phys[job->num_unpins] = phys_addr;
job->unpins[job->num_unpins].bo = reloc->target.bo;
......@@ -139,7 +184,11 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
phys_addr = host1x_bo_pin(g->bo, &sgt);
sgt = host1x_bo_pin(host->dev, g->bo, NULL);
if (IS_ERR(sgt)) {
err = PTR_ERR(sgt);
goto unpin;
}
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
for_each_sg(sgt->sgl, sg, sgt->nents, j)
......@@ -163,15 +212,24 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin;
}
job->addr_phys[job->num_unpins] =
iova_dma_addr(&host->iova, alloc);
job->unpins[job->num_unpins].size = gather_size;
phys_addr = iova_dma_addr(&host->iova, alloc);
} else {
job->addr_phys[job->num_unpins] = phys_addr;
err = dma_map_sg(host->dev, sgt->sgl, sgt->nents,
DMA_TO_DEVICE);
if (!err) {
err = -ENOMEM;
goto unpin;
}
job->unpins[job->num_unpins].dev = host->dev;
phys_addr = sg_dma_address(sgt->sgl);
}
job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
job->addr_phys[job->num_unpins] = phys_addr;
job->gather_addr_phys[i] = phys_addr;
job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
job->unpins[job->num_unpins].bo = g->bo;
job->unpins[job->num_unpins].sgt = sgt;
job->num_unpins++;
......@@ -436,7 +494,8 @@ static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
return err;
}
static inline int copy_gathers(struct host1x_job *job, struct device *dev)
static inline int copy_gathers(struct device *host, struct host1x_job *job,
struct device *dev)
{
struct host1x_firewall fw;
size_t size = 0;
......@@ -459,12 +518,12 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
* Try a non-blocking allocation from a higher priority pools first,
* as awaiting for the allocation here is a major performance hit.
*/
job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
GFP_NOWAIT);
/* the higher priority allocation failed, try the generic-blocking */
if (!job->gather_copy_mapped)
job->gather_copy_mapped = dma_alloc_wc(dev, size,
job->gather_copy_mapped = dma_alloc_wc(host, size,
&job->gather_copy,
GFP_KERNEL);
if (!job->gather_copy_mapped)
......@@ -512,7 +571,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
goto out;
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
err = copy_gathers(job, dev);
err = copy_gathers(host->dev, job, dev);
if (err)
goto out;
}
......@@ -557,6 +616,8 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
struct device *dev = unpin->dev ?: host->dev;
struct sg_table *sgt = unpin->sgt;
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
unpin->size && host->domain) {
......@@ -566,14 +627,18 @@ void host1x_job_unpin(struct host1x_job *job)
iova_pfn(&host->iova, job->addr_phys[i]));
}
host1x_bo_unpin(unpin->bo, unpin->sgt);
if (unpin->dev && sgt)
dma_unmap_sg(unpin->dev, sgt->sgl, sgt->nents,
unpin->dir);
host1x_bo_unpin(dev, unpin->bo, sgt);
host1x_bo_put(unpin->bo);
}
job->num_unpins = 0;
if (job->gather_copy_size)
dma_free_wc(job->channel->dev, job->gather_copy_size,
dma_free_wc(host->dev, job->gather_copy_size,
job->gather_copy_mapped, job->gather_copy);
}
EXPORT_SYMBOL(host1x_job_unpin);
......
......@@ -8,6 +8,8 @@
#ifndef __HOST1X_JOB_H
#define __HOST1X_JOB_H
#include <linux/dma-direction.h>
struct host1x_job_gather {
unsigned int words;
dma_addr_t base;
......@@ -19,7 +21,9 @@ struct host1x_job_gather {
struct host1x_job_unpin_data {
struct host1x_bo *bo;
struct sg_table *sgt;
struct device *dev;
size_t size;
enum dma_data_direction dir;
};
/*
......
......@@ -18,6 +18,7 @@ enum host1x_class {
};
struct host1x_client;
struct iommu_group;
/**
* struct host1x_client_ops - host1x client operations
......@@ -34,6 +35,7 @@ struct host1x_client_ops {
* @list: list node for the host1x client
* @parent: pointer to struct device representing the host1x controller
* @dev: pointer to struct device backing this host1x client
* @group: IOMMU group that this client is a member of
* @ops: host1x client operations
* @class: host1x class represented by this client
* @channel: host1x channel associated with this client
......@@ -44,6 +46,7 @@ struct host1x_client {
struct list_head list;
struct device *parent;
struct device *dev;
struct iommu_group *group;
const struct host1x_client_ops *ops;
......@@ -64,8 +67,9 @@ struct sg_table;
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
dma_addr_t *phys);
void (*unpin)(struct device *dev, struct sg_table *sgt);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
......@@ -92,15 +96,17 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
struct sg_table **sgt)
static inline struct sg_table *host1x_bo_pin(struct device *dev,
struct host1x_bo *bo,
dma_addr_t *phys)
{
return bo->ops->pin(bo, sgt);
return bo->ops->pin(dev, bo, phys);
}
static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
struct sg_table *sgt)
{
bo->ops->unpin(bo, sgt);
bo->ops->unpin(dev, sgt);
}
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
......@@ -158,7 +164,7 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
struct host1x_channel;
struct host1x_job;
struct host1x_channel *host1x_channel_request(struct device *dev);
struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
......@@ -167,6 +173,9 @@ int host1x_job_submit(struct host1x_job *job);
* host1x job
*/
#define HOST1X_RELOC_READ (1 << 0)
#define HOST1X_RELOC_WRITE (1 << 1)
struct host1x_reloc {
struct {
struct host1x_bo *bo;
......@@ -177,6 +186,7 @@ struct host1x_reloc {
unsigned long offset;
} target;
unsigned long shift;
unsigned long flags;
};
struct host1x_job {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment