Commit 0f0f9aee authored by Stanislav Lisovskiy's avatar Stanislav Lisovskiy Committed by Ville Syrjälä

drm/i915: Manipulate DBuf slices properly

Start manipulating DBuf slices as a mask,
but not as a total number, as current approach
doesn't give us full control on all combinations
of slices, which we might need(like enabling S2
only can't enabled by setting enabled_slices=1).

Removed wrong code from intel_get_ddb_size as
it doesn't match to BSpec. For now still just
use DBuf slice until proper algorithm is implemented.

Other minor code refactoring to get prepared
for major DBuf assignment changes landed:
- As now enabled slices contain a mask
  we still need some value which should
  reflect how much DBuf slices are supported
  by the platform, now device info contains
  num_supported_dbuf_slices.
- Removed unneeded assertion as we are now
  manipulating slices in a more proper way.

v2: Start using enabled_slices in dev_priv

v3: "enabled_slices" is now "enabled_dbuf_slices_mask",
    as this now sits in dev_priv independently.

v4: - Fixed debug print formatting to hex(Matt Roper)
    - Optimized dbuf slice updates to be used only
      if slice union is different from current conf(Matt Roper)
    - Fixed some functions to be static(Matt Roper)
    - Created a parameterized version for DBUF_CTL to
      simplify DBuf programming cycle(Matt Roper)
    - Removed unrequred field from GEN10_FEATURES(Matt Roper)

v5: - Removed redundant programming dbuf slices helper(Ville Syrjälä)
    - Started to use parameterized loop for hw readout to get slices
      (Ville Syrjälä)
    - Added back assertion checking amount of DBUF slices enabled
      after DC states 5/6 transition, also added new assertion
      as starting from ICL DMC seems to restore the last DBuf
      power state set, rather than power up all dbuf slices
      as assertion was previously expecting(Ville Syrjälä)

v6: - Now using enum for DBuf slices in this patch (Ville Syrjälä)
    - Removed gen11_assert_dbuf_enabled and put gen9_assert_dbuf_enabled
      back, as we really need to have a single unified assert here
      however currently enabling always slice 1 is enforced by BSpec,
      so we will have to OR enabled slices mask with 1 in order
      to be consistent with BSpec, that way we can unify that
      assertion and against the actual state from the driver, but
      not some hardcoded value.(concluded with Ville)
    - Remove parameterized DBUF_CTL version, to extract it to another
      patch.(Ville Syrjälä)
v7:
    - Removed unneeded hardcoded return value for older gens from
      intel_enabled_dbuf_slices_mask - this now is handled in a
      unified manner since device info anyway returns max dbuf slices
      as 1 for older platforms(Matthew Roper)
    - Now using INTEL_INFO(dev_priv)->num_supported_dbuf_slices instead
      of intel_dbuf_max_slices function as it is trivial(Matthew Roper)

v8: - Fixed icl_dbuf_disable to disable all dbufs still(Ville Syrjälä)

v9: - Renamed _DBUF_CTL_S to DBUF_CTL_S(Ville Syrjälä)
    - Now using power_domain mutex to protect from race condition, which
      can occur because intel_dbuf_slices_update might be running in
      parallel to gen9_dc_off_power_well_enable being called from
      intel_dp_detect for instance, which causes assertion triggered by
      race condition, as gen9_assert_dbuf_enabled might preempt this
      when registers were already updated, while dev_priv was not.
Reviewed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Reviewed-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: default avatarStanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200202230630.8975-6-stanislav.lisovskiy@intel.com
parent 2570b7e3
......@@ -14084,13 +14084,13 @@ static void verify_wm_state(struct intel_crtc *crtc,
skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
hw_enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 &&
hw_enabled_slices != dev_priv->enabled_dbuf_slices_num)
hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
drm_err(&dev_priv->drm,
"mismatch in DBUF Slices (expected %u, got %u)\n",
dev_priv->enabled_dbuf_slices_num,
"mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
dev_priv->enabled_dbuf_slices_mask,
hw_enabled_slices);
/* planes */
......@@ -15448,22 +15448,23 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
u8 required_slices = state->enabled_dbuf_slices_num;
u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
u8 required_slices = state->enabled_dbuf_slices_mask;
u8 slices_union = hw_enabled_slices | required_slices;
/* If 2nd DBuf slice required, enable it here */
if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
icl_dbuf_slices_update(dev_priv, required_slices);
if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
icl_dbuf_slices_update(dev_priv, slices_union);
}
static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
u8 required_slices = state->enabled_dbuf_slices_num;
u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
u8 required_slices = state->enabled_dbuf_slices_mask;
/* If 2nd DBuf slice is no more required disable it */
if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
icl_dbuf_slices_update(dev_priv, required_slices);
}
......
......@@ -15,6 +15,7 @@
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vga.h"
......@@ -1041,11 +1042,13 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
u32 tmp = intel_de_read(dev_priv, DBUF_CTL_S(0));
u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
(DBUF_POWER_STATE | DBUF_POWER_REQUEST),
"Unexpected DBuf power power state (0x%08x)\n", tmp);
WARN(hw_enabled_dbuf_slices != enabled_dbuf_slices,
"Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
hw_enabled_dbuf_slices,
enabled_dbuf_slices);
}
static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
......@@ -4425,87 +4428,58 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
intel_dbuf_slice_set(dev_priv, DBUF_CTL_S(0), true);
icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
intel_dbuf_slice_set(dev_priv, DBUF_CTL_S(0), false);
}
static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
{
if (INTEL_GEN(dev_priv) < 11)
return 1;
return 2;
icl_dbuf_slices_update(dev_priv, 0);
}
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
const u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_num;
bool ret;
int i;
int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
struct i915_power_domains *power_domains = &dev_priv->power_domains;
if (req_slices > intel_dbuf_max_slices(dev_priv)) {
drm_err(&dev_priv->drm,
WARN(hweight8(req_slices) > max_slices,
"Invalid number of dbuf slices requested\n");
return;
}
if (req_slices == hw_enabled_slices || req_slices == 0)
return;
DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
if (req_slices > hw_enabled_slices)
ret = intel_dbuf_slice_set(dev_priv,
DBUF_CTL_S(DBUF_S2), true);
else
ret = intel_dbuf_slice_set(dev_priv,
DBUF_CTL_S(DBUF_S2), false);
/*
* Might be running this in parallel to gen9_dc_off_power_well_enable
* being called from intel_dp_detect for instance,
* which causes assertion triggered by race condition,
* as gen9_assert_dbuf_enabled might preempt this when registers
* were already updated, while dev_priv was not.
*/
mutex_lock(&power_domains->lock);
if (ret)
dev_priv->enabled_dbuf_slices_num = req_slices;
for (i = 0; i < max_slices; i++) {
intel_dbuf_slice_set(dev_priv,
DBUF_CTL_S(i),
(req_slices & BIT(i)) != 0);
}
dev_priv->enabled_dbuf_slices_mask = req_slices;
mutex_unlock(&power_domains->lock);
}
static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
{
intel_de_write(dev_priv, DBUF_CTL_S(0),
intel_de_read(dev_priv, DBUF_CTL_S(0)) | DBUF_POWER_REQUEST);
intel_de_write(dev_priv, DBUF_CTL_S(1),
intel_de_read(dev_priv, DBUF_CTL_S(1)) | DBUF_POWER_REQUEST);
intel_de_posting_read(dev_priv, DBUF_CTL_S(1));
udelay(10);
if (!(intel_de_read(dev_priv, DBUF_CTL_S(0)) & DBUF_POWER_STATE) ||
!(intel_de_read(dev_priv, DBUF_CTL_S(1)) & DBUF_POWER_STATE))
drm_err(&dev_priv->drm, "DBuf power enable timeout\n");
else
/*
* FIXME: for now pretend that we only have 1 slice, see
* intel_enabled_dbuf_slices_num().
* Just power up 1 slice, we will
* figure out later which slices we have and what we need.
*/
dev_priv->enabled_dbuf_slices_num = 1;
icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
{
intel_de_write(dev_priv, DBUF_CTL_S(0),
intel_de_read(dev_priv, DBUF_CTL_S(0)) & ~DBUF_POWER_REQUEST);
intel_de_write(dev_priv, DBUF_CTL_S(1),
intel_de_read(dev_priv, DBUF_CTL_S(1)) & ~DBUF_POWER_REQUEST);
intel_de_posting_read(dev_priv, DBUF_CTL_S(1));
udelay(10);
if ((intel_de_read(dev_priv, DBUF_CTL_S(0)) & DBUF_POWER_STATE) ||
(intel_de_read(dev_priv, DBUF_CTL_S(1)) & DBUF_POWER_STATE))
drm_err(&dev_priv->drm, "DBuf power disable timeout!\n");
else
/*
* FIXME: for now pretend that the first slice is always
* enabled, see intel_enabled_dbuf_slices_num().
*/
dev_priv->enabled_dbuf_slices_num = 1;
icl_dbuf_slices_update(dev_priv, 0);
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
......
......@@ -496,7 +496,7 @@ struct intel_atomic_state {
bool global_state_changed;
/* Number of enabled DBuf slices */
u8 enabled_dbuf_slices_num;
u8 enabled_dbuf_slices_mask;
struct i915_sw_fence commit_ready;
......
......@@ -1186,7 +1186,7 @@ struct drm_i915_private {
bool distrust_bios_wm;
} wm;
u8 enabled_dbuf_slices_num; /* GEN11 has configurable 2 slices */
u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
struct dram_info {
bool valid;
......
......@@ -615,7 +615,8 @@ static const struct intel_device_info chv_info = {
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
.display.has_ipc = 1, \
.ddb_size = 896
.ddb_size = 896, \
.num_supported_dbuf_slices = 1
#define SKL_PLATFORM \
GEN9_FEATURES, \
......@@ -650,6 +651,7 @@ static const struct intel_device_info skl_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
.num_supported_dbuf_slices = 1, \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
......@@ -774,6 +776,7 @@ static const struct intel_device_info cnl_info = {
}, \
GEN(11), \
.ddb_size = 2048, \
.num_supported_dbuf_slices = 2, \
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
......
......@@ -180,6 +180,7 @@ struct intel_device_info {
} display;
u16 ddb_size; /* in blocks */
u8 num_supported_dbuf_slices; /* number of DBuf slices */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
......
......@@ -3597,26 +3597,18 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
{
u8 enabled_dbuf_slices_num;
/* Slice 1 will always be enabled */
enabled_dbuf_slices_num = 1;
/* Gen prior to GEN11 have only one DBuf slice */
if (INTEL_GEN(dev_priv) < 11)
return enabled_dbuf_slices_num;
int i;
int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
u8 enabled_slices_mask = 0;
/*
* FIXME: for now we'll only ever use 1 slice; pretend that we have
* only that 1 slice enabled until we have a proper way for on-demand
* toggling of the second slice.
*/
if (0 && I915_READ(DBUF_CTL_S(DBUF_S2)) & DBUF_POWER_STATE)
enabled_dbuf_slices_num++;
for (i = 0; i < max_slices; i++) {
if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
enabled_slices_mask |= BIT(i);
}
return enabled_dbuf_slices_num;
return enabled_slices_mask;
}
/*
......@@ -3824,8 +3816,6 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
{
struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
const struct drm_display_mode *adjusted_mode;
u64 total_data_bw;
u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
......@@ -3833,23 +3823,8 @@ static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
adjusted_mode = &crtc_state->hw.adjusted_mode;
total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
/*
* 12GB/s is maximum BW supported by single DBuf slice.
*
* FIXME dbuf slice code is broken:
* - must wait for planes to stop using the slice before powering it off
* - plane straddling both slices is illegal in multi-pipe scenarios
* - should validate we stay within the hw bandwidth limits
*/
if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
intel_state->enabled_dbuf_slices_num = 2;
} else {
intel_state->enabled_dbuf_slices_num = 1;
intel_state->enabled_dbuf_slices_mask = BIT(DBUF_S1);
ddb_size /= 2;
}
return ddb_size;
}
......@@ -4046,8 +4021,8 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
{
dev_priv->enabled_dbuf_slices_num =
intel_enabled_dbuf_slices_num(dev_priv);
dev_priv->enabled_dbuf_slices_mask =
intel_enabled_dbuf_slices_mask(dev_priv);
}
/*
......@@ -5155,7 +5130,7 @@ skl_compute_ddb(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int ret, i;
state->enabled_dbuf_slices_num = dev_priv->enabled_dbuf_slices_num;
state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
......
......@@ -32,7 +32,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv);
u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment