Commit 6295f1d8 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2022-11-03' of...

Merge tag 'drm-intel-fixes-2022-11-03' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Add locking around DKL PHY register accesses (Imre Deak)
- Stop abusing swiotlb_max_segment (Robert Beckett)
- Filter out invalid outputs more sensibly (Ville Syrjälä)
- Setup DDC fully before output init (Ville Syrjälä)
- Simplify intel_panel_add_edid_alt_fixed_modes() (Ville Syrjälä)
- Grab mode_config.mutex during LVDS init to avoid WARNs (Ville Syrjälä)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/Y2ODlCGM4nACmzsJ@tursulin-desk
parents 980a2ff2 12caf46c
...@@ -282,6 +282,7 @@ i915-y += \ ...@@ -282,6 +282,7 @@ i915-y += \
display/intel_ddi.o \ display/intel_ddi.o \
display/intel_ddi_buf_trans.o \ display/intel_ddi_buf_trans.o \
display/intel_display_trace.o \ display/intel_display_trace.o \
display/intel_dkl_phy.o \
display/intel_dp.o \ display/intel_dp.o \
display/intel_dp_aux.o \ display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \ display/intel_dp_aux_backlight.o \
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include "intel_de.h" #include "intel_de.h"
#include "intel_display_power.h" #include "intel_display_power.h"
#include "intel_display_types.h" #include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dp.h" #include "intel_dp.h"
#include "intel_dp_link_training.h" #include "intel_dp_link_training.h"
#include "intel_dp_mst.h" #include "intel_dp_mst.h"
...@@ -1262,33 +1263,30 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, ...@@ -1262,33 +1263,30 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
for (ln = 0; ln < 2; ln++) { for (ln = 0; ln < 2; ln++) {
int level; int level;
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), intel_dkl_phy_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), ln, 0);
HIP_INDEX_VAL(tc_port, ln));
intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0);
level = intel_ddi_level(encoder, crtc_state, 2*ln+0); level = intel_ddi_level(encoder, crtc_state, 2*ln+0);
intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), ln,
DKL_TX_PRESHOOT_COEFF_MASK | DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK | DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK, DKL_TX_VSWING_CONTROL_MASK,
DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
level = intel_ddi_level(encoder, crtc_state, 2*ln+1); level = intel_ddi_level(encoder, crtc_state, 2*ln+1);
intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), ln,
DKL_TX_PRESHOOT_COEFF_MASK | DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK | DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK, DKL_TX_VSWING_CONTROL_MASK,
DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) |
DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) |
DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing));
intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln,
DKL_TX_DP20BITMODE, 0); DKL_TX_DP20BITMODE, 0);
if (IS_ALDERLAKE_P(dev_priv)) { if (IS_ALDERLAKE_P(dev_priv)) {
u32 val; u32 val;
...@@ -1306,10 +1304,10 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, ...@@ -1306,10 +1304,10 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0); val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0);
} }
intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln,
DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK |
DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK,
val); val);
} }
} }
} }
...@@ -2019,12 +2017,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, ...@@ -2019,12 +2017,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
return; return;
if (DISPLAY_VER(dev_priv) >= 12) { if (DISPLAY_VER(dev_priv) >= 12) {
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), ln0 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 0);
HIP_INDEX_VAL(tc_port, 0x0)); ln1 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 1);
ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, 0x1));
ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
} else { } else {
ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port)); ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port));
ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port)); ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
...@@ -2085,12 +2079,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, ...@@ -2085,12 +2079,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
} }
if (DISPLAY_VER(dev_priv) >= 12) { if (DISPLAY_VER(dev_priv) >= 12) {
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 0, ln0);
HIP_INDEX_VAL(tc_port, 0x0)); intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 1, ln1);
intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0);
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, 0x1));
intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1);
} else { } else {
intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0); intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1); intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
...@@ -3094,10 +3084,8 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder) ...@@ -3094,10 +3084,8 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder)
enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); enum tc_port tc_port = intel_port_to_tc(i915, encoder->port);
int ln; int ln;
for (ln = 0; ln < 2; ln++) { for (ln = 0; ln < 2; ln++)
intel_de_write(i915, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port), ln, DKL_PCS_DW5_CORE_SOFTRESET, 0);
intel_de_rmw(i915, DKL_PCS_DW5(tc_port), DKL_PCS_DW5_CORE_SOFTRESET, 0);
}
} }
static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp,
......
...@@ -315,6 +315,14 @@ struct intel_display { ...@@ -315,6 +315,14 @@ struct intel_display {
struct intel_global_obj obj; struct intel_global_obj obj;
} dbuf; } dbuf;
struct {
/*
* dkl.phy_lock protects against concurrent access of the
* Dekel TypeC PHYs.
*/
spinlock_t phy_lock;
} dkl;
struct { struct {
/* VLV/CHV/BXT/GLK DSI MMIO register base address */ /* VLV/CHV/BXT/GLK DSI MMIO register base address */
u32 mmio_base; u32 mmio_base;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "intel_de.h" #include "intel_de.h"
#include "intel_display_power_well.h" #include "intel_display_power_well.h"
#include "intel_display_types.h" #include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dmc.h" #include "intel_dmc.h"
#include "intel_dpio_phy.h" #include "intel_dpio_phy.h"
#include "intel_dpll.h" #include "intel_dpll.h"
...@@ -529,11 +530,9 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, ...@@ -529,11 +530,9 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
enum tc_port tc_port; enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, 0x2));
if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port), 2) &
DKL_CMN_UC_DW27_UC_HEALTH, 1)) DKL_CMN_UC_DW27_UC_HEALTH, 1))
drm_warn(&dev_priv->drm, drm_warn(&dev_priv->drm,
"Timeout waiting TC uC health\n"); "Timeout waiting TC uC health\n");
} }
......
// SPDX-License-Identifier: MIT
/*
* Copyright © 2022 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_dkl_phy.h"
static void
dkl_phy_set_hip_idx(struct drm_i915_private *i915, i915_reg_t reg, int idx)
{
enum tc_port tc_port = DKL_REG_TC_PORT(reg);
drm_WARN_ON(&i915->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS);
intel_de_write(i915,
HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, idx));
}
/**
* intel_dkl_phy_read - read a Dekel PHY register
* @i915: i915 device instance
* @reg: Dekel PHY register
* @ln: lane instance of @reg
*
* Read the @reg Dekel PHY register.
*
* Returns the read value.
*/
u32
intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln)
{
u32 val;
spin_lock(&i915->display.dkl.phy_lock);
dkl_phy_set_hip_idx(i915, reg, ln);
val = intel_de_read(i915, reg);
spin_unlock(&i915->display.dkl.phy_lock);
return val;
}
/**
* intel_dkl_phy_write - write a Dekel PHY register
* @i915: i915 device instance
* @reg: Dekel PHY register
* @ln: lane instance of @reg
* @val: value to write
*
* Write @val to the @reg Dekel PHY register.
*/
void
intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val)
{
spin_lock(&i915->display.dkl.phy_lock);
dkl_phy_set_hip_idx(i915, reg, ln);
intel_de_write(i915, reg, val);
spin_unlock(&i915->display.dkl.phy_lock);
}
/**
* intel_dkl_phy_rmw - read-modify-write a Dekel PHY register
* @i915: i915 device instance
* @reg: Dekel PHY register
* @ln: lane instance of @reg
* @clear: mask to clear
* @set: mask to set
*
* Read the @reg Dekel PHY register, clearing then setting the @clear/@set bits in it, and writing
* this value back to the register if the value differs from the read one.
*/
void
intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set)
{
spin_lock(&i915->display.dkl.phy_lock);
dkl_phy_set_hip_idx(i915, reg, ln);
intel_de_rmw(i915, reg, clear, set);
spin_unlock(&i915->display.dkl.phy_lock);
}
/**
* intel_dkl_phy_posting_read - do a posting read from a Dekel PHY register
* @i915: i915 device instance
* @reg: Dekel PHY register
* @ln: lane instance of @reg
*
* Read the @reg Dekel PHY register without returning the read value.
*/
void
intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln)
{
spin_lock(&i915->display.dkl.phy_lock);
dkl_phy_set_hip_idx(i915, reg, ln);
intel_de_posting_read(i915, reg);
spin_unlock(&i915->display.dkl.phy_lock);
}
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2022 Intel Corporation
*/
#ifndef __INTEL_DKL_PHY_H__
#define __INTEL_DKL_PHY_H__
#include <linux/types.h>
#include "i915_reg_defs.h"
struct drm_i915_private;
u32
intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln);
void
intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val);
void
intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set);
void
intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln);
#endif /* __INTEL_DKL_PHY_H__ */
...@@ -5276,7 +5276,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, ...@@ -5276,7 +5276,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
encoder->devdata, IS_ERR(edid) ? NULL : edid); encoder->devdata, IS_ERR(edid) ? NULL : edid);
intel_panel_add_edid_fixed_modes(intel_connector, intel_panel_add_edid_fixed_modes(intel_connector,
intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE, intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE ||
intel_vrr_is_capable(intel_connector)); intel_vrr_is_capable(intel_connector));
/* MSO requires information from the EDID */ /* MSO requires information from the EDID */
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include "intel_de.h" #include "intel_de.h"
#include "intel_display_types.h" #include "intel_display_types.h"
#include "intel_dkl_phy.h"
#include "intel_dpio_phy.h" #include "intel_dpio_phy.h"
#include "intel_dpll.h" #include "intel_dpll.h"
#include "intel_dpll_mgr.h" #include "intel_dpll_mgr.h"
...@@ -3508,15 +3509,12 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, ...@@ -3508,15 +3509,12 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
* All registers read here have the same HIP_INDEX_REG even though * All registers read here have the same HIP_INDEX_REG even though
* they are on different building blocks * they are on different building blocks
*/ */
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv,
HIP_INDEX_VAL(tc_port, 0x2)); DKL_REFCLKIN_CTL(tc_port), 2);
hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
DKL_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_hsclkctl = hw_state->mg_clktop2_hsclkctl =
intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2);
hw_state->mg_clktop2_hsclkctl &= hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
...@@ -3524,32 +3522,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, ...@@ -3524,32 +3522,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
hw_state->mg_clktop2_coreclkctl1 = hw_state->mg_clktop2_coreclkctl1 =
intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2);
hw_state->mg_clktop2_coreclkctl1 &= hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port), 2);
val = DKL_PLL_DIV0_MASK; val = DKL_PLL_DIV0_MASK;
if (dev_priv->display.vbt.override_afc_startup) if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
hw_state->mg_pll_div0 &= val; hw_state->mg_pll_div0 &= val;
hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2);
hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2);
hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN); DKL_PLL_SSC_EN);
hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2);
hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK); DKL_PLL_BIAS_FBDIV_FRAC_MASK);
hw_state->mg_pll_tdc_coldst_bias = hw_state->mg_pll_tdc_coldst_bias =
intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2);
hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK); DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
...@@ -3737,61 +3735,58 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv, ...@@ -3737,61 +3735,58 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
* All registers programmed here have the same HIP_INDEX_REG even * All registers programmed here have the same HIP_INDEX_REG even
* though on different building block * though on different building block
*/ */
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
HIP_INDEX_VAL(tc_port, 0x2));
/* All the registers are RMW */ /* All the registers are RMW */
val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port)); val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2);
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl; val |= hw_state->mg_refclkin_ctl;
intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val); intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2, val);
val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2);
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1; val |= hw_state->mg_clktop2_coreclkctl1;
intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val); intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2, val);
val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2);
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl; val |= hw_state->mg_clktop2_hsclkctl;
intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2, val);
val = DKL_PLL_DIV0_MASK; val = DKL_PLL_DIV0_MASK;
if (dev_priv->display.vbt.override_afc_startup) if (dev_priv->display.vbt.override_afc_startup)
val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; val |= DKL_PLL_DIV0_AFC_STARTUP_MASK;
intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val, intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), 2, val,
hw_state->mg_pll_div0); hw_state->mg_pll_div0);
val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2);
val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
val |= hw_state->mg_pll_div1; val |= hw_state->mg_pll_div1;
intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val); intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), 2, val);
val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2);
val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN); DKL_PLL_SSC_EN);
val |= hw_state->mg_pll_ssc; val |= hw_state->mg_pll_ssc;
intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val); intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), 2, val);
val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2);
val &= ~(DKL_PLL_BIAS_FRAC_EN_H | val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK); DKL_PLL_BIAS_FBDIV_FRAC_MASK);
val |= hw_state->mg_pll_bias; val |= hw_state->mg_pll_bias;
intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val); intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), 2, val);
val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2);
val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK); DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
val |= hw_state->mg_pll_tdc_coldst_bias; val |= hw_state->mg_pll_tdc_coldst_bias;
intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val); intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2, val);
intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2);
} }
static void icl_pll_power_enable(struct drm_i915_private *dev_priv, static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
......
...@@ -972,8 +972,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) ...@@ -972,8 +972,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
/* Try EDID first */ /* Try EDID first */
intel_panel_add_edid_fixed_modes(intel_connector, intel_panel_add_edid_fixed_modes(intel_connector,
intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE, intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE);
false);
/* Failed to get EDID, what about VBT? */ /* Failed to get EDID, what about VBT? */
if (!intel_panel_preferred_fixed_mode(intel_connector)) if (!intel_panel_preferred_fixed_mode(intel_connector))
......
...@@ -254,10 +254,10 @@ static void intel_panel_destroy_probed_modes(struct intel_connector *connector) ...@@ -254,10 +254,10 @@ static void intel_panel_destroy_probed_modes(struct intel_connector *connector)
} }
void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
bool has_drrs, bool has_vrr) bool use_alt_fixed_modes)
{ {
intel_panel_add_edid_preferred_mode(connector); intel_panel_add_edid_preferred_mode(connector);
if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr)) if (intel_panel_preferred_fixed_mode(connector) && use_alt_fixed_modes)
intel_panel_add_edid_alt_fixed_modes(connector); intel_panel_add_edid_alt_fixed_modes(connector);
intel_panel_destroy_probed_modes(connector); intel_panel_destroy_probed_modes(connector);
} }
......
...@@ -44,7 +44,7 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state, ...@@ -44,7 +44,7 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state,
int intel_panel_compute_config(struct intel_connector *connector, int intel_panel_compute_config(struct intel_connector *connector,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, void intel_panel_add_edid_fixed_modes(struct intel_connector *connector,
bool has_drrs, bool has_vrr); bool use_alt_fixed_modes);
void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector); void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector);
void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector); void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector, void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
......
...@@ -2747,13 +2747,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2747,13 +2747,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
if (!intel_sdvo_connector) if (!intel_sdvo_connector)
return false; return false;
if (device == 0) { if (device == 0)
intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
} else if (device == 1) { else if (device == 1)
intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
}
intel_connector = &intel_sdvo_connector->base; intel_connector = &intel_sdvo_connector->base;
connector = &intel_connector->base; connector = &intel_connector->base;
...@@ -2808,7 +2805,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) ...@@ -2808,7 +2805,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
intel_sdvo->controlled_output |= type;
intel_sdvo_connector->output_flag = type; intel_sdvo_connector->output_flag = type;
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
...@@ -2849,13 +2845,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2849,13 +2845,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_DAC; encoder->encoder_type = DRM_MODE_ENCODER_DAC;
connector->connector_type = DRM_MODE_CONNECTOR_VGA; connector->connector_type = DRM_MODE_CONNECTOR_VGA;
if (device == 0) { if (device == 0)
intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
} else if (device == 1) { else if (device == 1)
intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
}
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector); kfree(intel_sdvo_connector);
...@@ -2885,13 +2878,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2885,13 +2878,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_LVDS; encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
connector->connector_type = DRM_MODE_CONNECTOR_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
if (device == 0) { if (device == 0)
intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
} else if (device == 1) { else if (device == 1)
intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
}
if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) {
kfree(intel_sdvo_connector); kfree(intel_sdvo_connector);
...@@ -2910,8 +2900,12 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2910,8 +2900,12 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
intel_panel_add_vbt_sdvo_fixed_mode(intel_connector); intel_panel_add_vbt_sdvo_fixed_mode(intel_connector);
if (!intel_panel_preferred_fixed_mode(intel_connector)) { if (!intel_panel_preferred_fixed_mode(intel_connector)) {
mutex_lock(&i915->drm.mode_config.mutex);
intel_ddc_get_modes(connector, &intel_sdvo->ddc); intel_ddc_get_modes(connector, &intel_sdvo->ddc);
intel_panel_add_edid_fixed_modes(intel_connector, false, false); intel_panel_add_edid_fixed_modes(intel_connector, false);
mutex_unlock(&i915->drm.mode_config.mutex);
} }
intel_panel_init(intel_connector); intel_panel_init(intel_connector);
...@@ -2926,16 +2920,39 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) ...@@ -2926,16 +2920,39 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
return false; return false;
} }
static u16 intel_sdvo_filter_output_flags(u16 flags)
{
flags &= SDVO_OUTPUT_MASK;
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
if (!(flags & SDVO_OUTPUT_TMDS0))
flags &= ~SDVO_OUTPUT_TMDS1;
if (!(flags & SDVO_OUTPUT_RGB0))
flags &= ~SDVO_OUTPUT_RGB1;
if (!(flags & SDVO_OUTPUT_LVDS0))
flags &= ~SDVO_OUTPUT_LVDS1;
return flags;
}
static bool static bool
intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
{ {
/* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
flags = intel_sdvo_filter_output_flags(flags);
intel_sdvo->controlled_output = flags;
intel_sdvo_select_ddc_bus(i915, intel_sdvo);
if (flags & SDVO_OUTPUT_TMDS0) if (flags & SDVO_OUTPUT_TMDS0)
if (!intel_sdvo_dvi_init(intel_sdvo, 0)) if (!intel_sdvo_dvi_init(intel_sdvo, 0))
return false; return false;
if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) if (flags & SDVO_OUTPUT_TMDS1)
if (!intel_sdvo_dvi_init(intel_sdvo, 1)) if (!intel_sdvo_dvi_init(intel_sdvo, 1))
return false; return false;
...@@ -2956,7 +2973,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) ...@@ -2956,7 +2973,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
if (!intel_sdvo_analog_init(intel_sdvo, 0)) if (!intel_sdvo_analog_init(intel_sdvo, 0))
return false; return false;
if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) if (flags & SDVO_OUTPUT_RGB1)
if (!intel_sdvo_analog_init(intel_sdvo, 1)) if (!intel_sdvo_analog_init(intel_sdvo, 1))
return false; return false;
...@@ -2964,14 +2981,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) ...@@ -2964,14 +2981,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags)
if (!intel_sdvo_lvds_init(intel_sdvo, 0)) if (!intel_sdvo_lvds_init(intel_sdvo, 0))
return false; return false;
if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) if (flags & SDVO_OUTPUT_LVDS1)
if (!intel_sdvo_lvds_init(intel_sdvo, 1)) if (!intel_sdvo_lvds_init(intel_sdvo, 1))
return false; return false;
if ((flags & SDVO_OUTPUT_MASK) == 0) { if (flags == 0) {
unsigned char bytes[2]; unsigned char bytes[2];
intel_sdvo->controlled_output = 0;
memcpy(bytes, &intel_sdvo->caps.output_flags, 2); memcpy(bytes, &intel_sdvo->caps.output_flags, 2);
DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
SDVO_NAME(intel_sdvo), SDVO_NAME(intel_sdvo),
...@@ -3383,8 +3399,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, ...@@ -3383,8 +3399,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
*/ */
intel_sdvo->base.cloneable = 0; intel_sdvo->base.cloneable = 0;
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
/* Set the input timing to the screen. Assume always input 0. */ /* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo)) if (!intel_sdvo_set_target_input(intel_sdvo))
goto err_output; goto err_output;
......
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/swiotlb.h>
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_gem.h" #include "i915_gem.h"
...@@ -38,22 +37,12 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -38,22 +37,12 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct scatterlist *sg; struct scatterlist *sg;
unsigned int sg_page_sizes; unsigned int sg_page_sizes;
unsigned int npages; unsigned int npages;
int max_order; int max_order = MAX_ORDER;
unsigned int max_segment;
gfp_t gfp; gfp_t gfp;
max_order = MAX_ORDER; max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT;
#ifdef CONFIG_SWIOTLB max_order = min(max_order, get_order(max_segment));
if (is_swiotlb_active(obj->base.dev->dev)) {
unsigned int max_segment;
max_segment = swiotlb_max_segment();
if (max_segment) {
max_segment = max_t(unsigned int, max_segment,
PAGE_SIZE) >> PAGE_SHIFT;
max_order = min(max_order, ilog2(max_segment));
}
}
#endif
gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
if (IS_I965GM(i915) || IS_I965G(i915)) { if (IS_I965GM(i915) || IS_I965G(i915)) {
......
...@@ -194,7 +194,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) ...@@ -194,7 +194,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
struct intel_memory_region *mem = obj->mm.region; struct intel_memory_region *mem = obj->mm.region;
struct address_space *mapping = obj->base.filp->f_mapping; struct address_space *mapping = obj->base.filp->f_mapping;
const unsigned long page_count = obj->base.size / PAGE_SIZE; const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned int max_segment = i915_sg_segment_size(); unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
struct sg_table *st; struct sg_table *st;
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
struct page *page; struct page *page;
......
...@@ -189,7 +189,7 @@ static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, ...@@ -189,7 +189,7 @@ static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev,
struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev);
struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
const unsigned int max_segment = i915_sg_segment_size(); const unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT;
struct file *filp = i915_tt->filp; struct file *filp = i915_tt->filp;
struct sgt_iter sgt_iter; struct sgt_iter sgt_iter;
...@@ -538,7 +538,7 @@ static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) ...@@ -538,7 +538,7 @@ static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm)
ret = sg_alloc_table_from_pages_segment(st, ret = sg_alloc_table_from_pages_segment(st,
ttm->pages, ttm->num_pages, ttm->pages, ttm->num_pages,
0, (unsigned long)ttm->num_pages << PAGE_SHIFT, 0, (unsigned long)ttm->num_pages << PAGE_SHIFT,
i915_sg_segment_size(), GFP_KERNEL); i915_sg_segment_size(i915_tt->dev), GFP_KERNEL);
if (ret) { if (ret) {
st->sgl = NULL; st->sgl = NULL;
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -129,7 +129,7 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) ...@@ -129,7 +129,7 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{ {
const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
unsigned int max_segment = i915_sg_segment_size(); unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
struct sg_table *st; struct sg_table *st;
unsigned int sg_page_sizes; unsigned int sg_page_sizes;
struct page **pvec; struct page **pvec;
......
...@@ -353,6 +353,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) ...@@ -353,6 +353,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->display.wm.wm_mutex); mutex_init(&dev_priv->display.wm.wm_mutex);
mutex_init(&dev_priv->display.pps.mutex); mutex_init(&dev_priv->display.pps.mutex);
mutex_init(&dev_priv->display.hdcp.comp_mutex); mutex_init(&dev_priv->display.hdcp.comp_mutex);
spin_lock_init(&dev_priv->display.dkl.phy_lock);
i915_memcpy_init_early(dev_priv); i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm); intel_runtime_pm_init_early(&dev_priv->runtime_pm);
......
...@@ -7420,6 +7420,9 @@ enum skl_power_gate { ...@@ -7420,6 +7420,9 @@ enum skl_power_gate {
#define _DKL_PHY5_BASE 0x16C000 #define _DKL_PHY5_BASE 0x16C000
#define _DKL_PHY6_BASE 0x16D000 #define _DKL_PHY6_BASE 0x16D000
#define DKL_REG_TC_PORT(__reg) \
(TC_PORT_1 + ((__reg).reg - _DKL_PHY1_BASE) / (_DKL_PHY2_BASE - _DKL_PHY1_BASE))
/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */ /* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */
#define _DKL_PCS_DW5 0x14 #define _DKL_PCS_DW5 0x14
#define DKL_PCS_DW5(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ #define DKL_PCS_DW5(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \
......
...@@ -9,7 +9,8 @@ ...@@ -9,7 +9,8 @@
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/swiotlb.h> #include <linux/dma-mapping.h>
#include <xen/xen.h>
#include "i915_gem.h" #include "i915_gem.h"
...@@ -127,19 +128,26 @@ static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg) ...@@ -127,19 +128,26 @@ static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg)
return page_sizes; return page_sizes;
} }
static inline unsigned int i915_sg_segment_size(void) static inline unsigned int i915_sg_segment_size(struct device *dev)
{ {
unsigned int size = swiotlb_max_segment(); size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev));
if (size == 0) /*
size = UINT_MAX; * For Xen PV guests pages aren't contiguous in DMA (machine) address
* space. The DMA API takes care of that both in dma_alloc_* (by
size = rounddown(size, PAGE_SIZE); * calling into the hypervisor to make the pages contiguous) and in
/* swiotlb_max_segment_size can return 1 byte when it means one page. */ * dma_map_* (by bounce buffering). But i915 abuses ignores the
if (size < PAGE_SIZE) * coherency aspects of the DMA API and thus can't cope with bounce
size = PAGE_SIZE; * buffering actually happening, so add a hack here to force small
* allocations and mappings when running in PV mode on Xen.
return size; *
* Note this will still break if bounce buffering is required for other
* reasons, like confidential computing hypervisors or PCIe root ports
* with addressing limitations.
*/
if (xen_pv_domain())
max = PAGE_SIZE;
return round_down(max, PAGE_SIZE);
} }
bool i915_sg_trim(struct sg_table *orig_st); bool i915_sg_trim(struct sg_table *orig_st);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment