Commit dbef0f15 authored by Paulo Zanoni's avatar Paulo Zanoni Committed by Daniel Vetter

drm/i915: add frontbuffer tracking to FBC

Kill the blt/render tracking we currently have and use the frontbuffer
tracking infrastructure.

Don't enable things by default yet.

v2: (Rodrigo) Fix small conflict on rebase and typo at subject.
v3: (Paulo) Rebase on RENDER_CS change.
v4: (Paulo) Rebase.
v5: (Paulo) Simplify: flushes don't have origin (Daniel).
            Also rebase due to patch order changes.
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarPaulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 3954e733
......@@ -805,6 +805,8 @@ struct i915_fbc {
unsigned long uncompressed_size;
unsigned threshold;
unsigned int fb_id;
unsigned int possible_framebuffer_bits;
unsigned int busy_bits;
struct intel_crtc *crtc;
int y;
......@@ -817,14 +819,6 @@ struct i915_fbc {
* possible. */
bool enabled;
/* On gen8 some rings cannont perform fbc clean operation so for now
* we are doing this on SW with mmio.
* This variable works in the opposite information direction
* of ring->fbc_dirty telling software on frontbuffer tracking
* to perform the cache clean on sw side.
*/
bool need_sw_cache_clean;
struct intel_fbc_work {
struct delayed_work work;
struct drm_crtc *crtc;
......
......@@ -1115,7 +1115,11 @@ bool intel_fbc_enabled(struct drm_device *dev);
void intel_fbc_update(struct drm_device *dev);
void intel_fbc_init(struct drm_i915_private *dev_priv);
void intel_fbc_disable(struct drm_device *dev);
void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin);
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits);
/* intel_hdmi.c */
void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port);
......
......@@ -174,29 +174,10 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
static void snb_fbc_blit_update(struct drm_device *dev)
static void intel_fbc_nuke(struct drm_i915_private *dev_priv)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blt_ecoskpd;
/* Make sure blitter notifies FBC of writes */
/* Blitter is part of Media powerwell on VLV. No impact of
* his param in other platforms for now */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
GEN6_BLITTER_LOCK_SHIFT);
I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
POSTING_READ(GEN6_BLITTER_ECOSKPD);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
POSTING_READ(MSG_FBC_REND_STATE);
}
static void ilk_fbc_enable(struct drm_crtc *crtc)
......@@ -239,9 +220,10 @@ static void ilk_fbc_enable(struct drm_crtc *crtc)
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
snb_fbc_blit_update(dev);
}
intel_fbc_nuke(dev_priv);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
......@@ -320,7 +302,7 @@ static void gen7_fbc_enable(struct drm_crtc *crtc)
SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
snb_fbc_blit_update(dev);
intel_fbc_nuke(dev_priv);
DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
}
......@@ -340,19 +322,6 @@ bool intel_fbc_enabled(struct drm_device *dev)
return dev_priv->fbc.enabled;
}
void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_GEN8(dev))
return;
if (!intel_fbc_enabled(dev))
return;
I915_WRITE(MSG_FBC_REND_STATE, value);
}
static void intel_fbc_work_fn(struct work_struct *__work)
{
struct intel_fbc_work *work =
......@@ -685,6 +654,44 @@ void intel_fbc_update(struct drm_device *dev)
i915_gem_stolen_cleanup_compression(dev);
}
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
struct drm_device *dev = dev_priv->dev;
unsigned int fbc_bits;
if (origin == ORIGIN_GTT)
return;
if (dev_priv->fbc.enabled)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
else if (dev_priv->fbc.fbc_work)
fbc_bits = INTEL_FRONTBUFFER_PRIMARY(
to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe);
else
fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
if (dev_priv->fbc.busy_bits)
intel_fbc_disable(dev);
}
void intel_fbc_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits)
{
struct drm_device *dev = dev_priv->dev;
if (!dev_priv->fbc.busy_bits)
return;
dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
if (!dev_priv->fbc.busy_bits)
intel_fbc_update(dev);
}
/**
* intel_fbc_init - Initialize FBC
* @dev_priv: the i915 device
......@@ -693,12 +700,22 @@ void intel_fbc_update(struct drm_device *dev)
*/
void intel_fbc_init(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
if (!HAS_FBC(dev_priv)) {
dev_priv->fbc.enabled = false;
dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED;
return;
}
for_each_pipe(dev_priv, pipe) {
dev_priv->fbc.possible_framebuffer_bits |=
INTEL_FRONTBUFFER_PRIMARY(pipe);
if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
break;
}
if (INTEL_INFO(dev_priv)->gen >= 7) {
dev_priv->display.fbc_enabled = ilk_fbc_enabled;
dev_priv->display.enable_fbc = gen7_fbc_enable;
......
......@@ -118,8 +118,6 @@ static void intel_mark_fb_busy(struct drm_device *dev,
continue;
intel_increase_pllclock(dev, pipe);
if (ring && intel_fbc_enabled(dev))
ring->fbc_dirty = true;
}
}
......@@ -160,6 +158,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
intel_psr_invalidate(dev, obj->frontbuffer_bits);
intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
}
/**
......@@ -187,16 +186,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
intel_edp_drrs_flush(dev, frontbuffer_bits);
intel_psr_flush(dev, frontbuffer_bits);
/*
* FIXME: Unconditional fbc flushing here is a rather gross hack and
* needs to be reworked into a proper frontbuffer tracking scheme like
* psr employs.
*/
if (dev_priv->fbc.need_sw_cache_clean) {
dev_priv->fbc.need_sw_cache_clean = false;
bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
}
intel_fbc_flush(dev_priv, frontbuffer_bits);
}
/**
......
......@@ -317,29 +317,6 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)
return 0;
}
static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)
{
int ret;
if (!ring->fbc_dirty)
return 0;
ret = intel_ring_begin(ring, 6);
if (ret)
return ret;
/* WaFbcNukeOn3DBlt:ivb/hsw */
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, value);
intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT);
intel_ring_emit(ring, MSG_FBC_REND_STATE);
intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
intel_ring_advance(ring);
ring->fbc_dirty = false;
return 0;
}
static int
gen7_render_ring_flush(struct intel_engine_cs *ring,
u32 invalidate_domains, u32 flush_domains)
......@@ -398,9 +375,6 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
if (!invalidate_domains && flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
return 0;
}
......@@ -462,9 +436,6 @@ gen8_render_ring_flush(struct intel_engine_cs *ring,
if (ret)
return ret;
if (!invalidate_domains && flush_domains)
return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
return 0;
}
......@@ -2477,7 +2448,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
u32 invalidate, u32 flush)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t cmd;
int ret;
......@@ -2486,7 +2456,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
return ret;
cmd = MI_FLUSH_DW;
if (INTEL_INFO(ring->dev)->gen >= 8)
if (INTEL_INFO(dev)->gen >= 8)
cmd += 1;
/* We always require a command barrier so that subsequent
......@@ -2506,7 +2476,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
cmd |= MI_INVALIDATE_TLB;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
if (INTEL_INFO(ring->dev)->gen >= 8) {
if (INTEL_INFO(dev)->gen >= 8) {
intel_ring_emit(ring, 0); /* upper addr */
intel_ring_emit(ring, 0); /* value */
} else {
......@@ -2515,13 +2485,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
}
intel_ring_advance(ring);
if (!invalidate && flush) {
if (IS_GEN7(dev))
return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
else if (IS_BROADWELL(dev))
dev_priv->fbc.need_sw_cache_clean = true;
}
return 0;
}
......
......@@ -267,7 +267,6 @@ struct intel_engine_cs {
*/
struct drm_i915_gem_request *outstanding_lazy_request;
bool gpu_caches_dirty;
bool fbc_dirty;
wait_queue_head_t irq_queue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment