Commit 05a2fb15 authored by Mika Kuoppala's avatar Mika Kuoppala Committed by Daniel Vetter

drm/i915: Consolidate forcewake code

As we now have forcewake domains, take advantage of it
by putting the differences in gen fw handling in data rather
than in code.

In past we have opencoded this quite extensively as the fw handling
is in the fast path. There has also been a lot of cargo-culted
copy'n'pasting from older gens to newer ones.

Now when the releasing of the forcewake is done by deferred timer,
it gives chance to consolidate more. Due to the frequency of actual hw
access being significantly less.

Take advantage of this and generalize the fw handling code
as much as possible. But we still aim to keep the forcewake sequence
particularities for each gen intact. So the access pattern
to fw engines should remain the same.

v2: - s/old_ack/clear_ack (Chris)
    - s/post_read/posting_read (Chris)
    - less polite commit msg (Chris)

v3: - rebase
    - check and clear wake_count in init

v4: - fix posting reads for gen8 (PRTS)
Signed-off-by: default avatarMika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Deepak S <deepak.s@linux.intel.com> (v2)
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent b2cff0db
...@@ -1294,17 +1294,12 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data) ...@@ -1294,17 +1294,12 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore_forcewake_domain *fw_domain; struct intel_uncore_forcewake_domain *fw_domain;
const char *domain_names[] = {
"render",
"blitter",
"media",
};
int i; int i;
spin_lock_irq(&dev_priv->uncore.lock); spin_lock_irq(&dev_priv->uncore.lock);
for_each_fw_domain(fw_domain, dev_priv, i) { for_each_fw_domain(fw_domain, dev_priv, i) {
seq_printf(m, "%s.wake_count = %u\n", seq_printf(m, "%s.wake_count = %u\n",
domain_names[i], intel_uncore_forcewake_domain_to_str(i),
fw_domain->wake_count); fw_domain->wake_count);
} }
spin_unlock_irq(&dev_priv->uncore.lock); spin_unlock_irq(&dev_priv->uncore.lock);
......
...@@ -617,6 +617,12 @@ struct intel_uncore { ...@@ -617,6 +617,12 @@ struct intel_uncore {
int id; int id;
unsigned wake_count; unsigned wake_count;
struct timer_list timer; struct timer_list timer;
u32 reg_set;
u32 val_set;
u32 val_clear;
u32 reg_ack;
u32 reg_post;
u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT]; } fw_domain[FW_DOMAIN_ID_COUNT];
#define FORCEWAKE_RENDER (1 << FW_DOMAIN_ID_RENDER) #define FORCEWAKE_RENDER (1 << FW_DOMAIN_ID_RENDER)
#define FORCEWAKE_BLITTER (1 << FW_DOMAIN_ID_BLITTER) #define FORCEWAKE_BLITTER (1 << FW_DOMAIN_ID_BLITTER)
...@@ -2557,6 +2563,7 @@ extern void intel_uncore_init(struct drm_device *dev); ...@@ -2557,6 +2563,7 @@ extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev); extern void intel_uncore_check_errors(struct drm_device *dev);
extern void intel_uncore_fini(struct drm_device *dev); extern void intel_uncore_fini(struct drm_device *dev);
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
const char *intel_uncore_forcewake_domain_to_str(const int domain_id);
void void
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
......
...@@ -42,6 +42,26 @@ ...@@ -42,6 +42,26 @@
#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__) #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
static const char * const forcewake_domain_names[] = {
"render",
"blitter",
"media",
};
const char *
intel_uncore_forcewake_domain_to_str(const int id)
{
BUILD_BUG_ON((sizeof(forcewake_domain_names)/sizeof(const char *)) !=
FW_DOMAIN_ID_COUNT);
if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
return forcewake_domain_names[id];
WARN_ON(id);
return "unknown";
}
static void static void
assert_device_not_suspended(struct drm_i915_private *dev_priv) assert_device_not_suspended(struct drm_i915_private *dev_priv)
{ {
...@@ -49,73 +69,125 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv) ...@@ -49,73 +69,125 @@ assert_device_not_suspended(struct drm_i915_private *dev_priv)
"Device suspended\n"); "Device suspended\n");
} }
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{ {
/* w/a for a sporadic read returning 0 by waiting for the GT __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
* thread to wake up.
*/
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
} }
static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv) static inline void
fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
{ {
__raw_i915_write32(dev_priv, FORCEWAKE, 0); mod_timer_pinned(&d->timer, jiffies + 1);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
} }
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, static inline void
int fw_engine) fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{ {
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0, if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS)) FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
__raw_i915_write32(dev_priv, FORCEWAKE, 1); static inline void
/* something from same cacheline, but !FORCEWAKE */ fw_domain_get(const struct intel_uncore_forcewake_domain *d)
__raw_posting_read(dev_priv, ECOBUS); {
__raw_i915_write32(d->i915, d->reg_set, d->val_set);
}
if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1), static inline void
fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
{
if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS)) FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
/* WaRsForcewakeWaitTC0:snb */ static inline void
__gen6_gt_wait_for_thread_c0(dev_priv); fw_domain_put(const struct intel_uncore_forcewake_domain *d)
{
__raw_i915_write32(d->i915, d->reg_set, d->val_clear);
} }
static void __gen7_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv) static inline void
fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
{ {
__raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff)); /* something from same cacheline, but not from the set register */
/* something from same cacheline, but !FORCEWAKE_MT */ if (d->reg_post)
__raw_posting_read(dev_priv, ECOBUS); __raw_posting_read(d->i915, d->reg_post);
} }
static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv, static void
int fw_engine) fw_domains_get(struct drm_i915_private *dev_priv, int fw_domains)
{ {
u32 forcewake_ack; struct intel_uncore_forcewake_domain *d;
int id;
if (IS_HASWELL(dev_priv->dev) || IS_BROADWELL(dev_priv->dev)) for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
forcewake_ack = FORCEWAKE_ACK_HSW; fw_domain_wait_ack_clear(d);
else fw_domain_get(d);
forcewake_ack = FORCEWAKE_MT_ACK; fw_domain_posting_read(d);
fw_domain_wait_ack(d);
}
}
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0, static void
FORCEWAKE_ACK_TIMEOUT_MS)) fw_domains_put(struct drm_i915_private *dev_priv, int fw_domains)
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); {
struct intel_uncore_forcewake_domain *d;
int id;
__raw_i915_write32(dev_priv, FORCEWAKE_MT, for_each_fw_domain_mask(d, fw_domains, dev_priv, id) {
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL)); fw_domain_put(d);
/* something from same cacheline, but !FORCEWAKE_MT */ fw_domain_posting_read(d);
__raw_posting_read(dev_priv, ECOBUS); }
}
if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL), static void
FORCEWAKE_ACK_TIMEOUT_MS)) fw_domains_posting_read(struct drm_i915_private *dev_priv)
DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); {
struct intel_uncore_forcewake_domain *d;
int id;
/* No need to do for all, just do for first found */
for_each_fw_domain(d, dev_priv, id) {
fw_domain_posting_read(d);
break;
}
}
static void
fw_domains_reset(struct drm_i915_private *dev_priv, const unsigned fw_domains)
{
struct intel_uncore_forcewake_domain *d;
int id;
for_each_fw_domain_mask(d, fw_domains, dev_priv, id)
fw_domain_reset(d);
fw_domains_posting_read(dev_priv);
}
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
{
/* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
DRM_ERROR("GT thread status wait timed out\n");
}
static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
int fw_domains)
{
fw_domains_get(dev_priv, fw_domains);
/* WaRsForcewakeWaitTC0:ivb,hsw */ /* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
__gen6_gt_wait_for_thread_c0(dev_priv); __gen6_gt_wait_for_thread_c0(dev_priv);
} }
...@@ -128,27 +200,13 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) ...@@ -128,27 +200,13 @@ static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
__raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg); __raw_i915_write32(dev_priv, GTFIFODBG, gtfifodbg);
} }
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, static void fw_domains_put_with_fifo(struct drm_i915_private *dev_priv,
int fw_engine) int fw_domains)
{ {
__raw_i915_write32(dev_priv, FORCEWAKE, 0); fw_domains_put(dev_priv, fw_domains);
/* something from same cacheline, but !FORCEWAKE */
__raw_posting_read(dev_priv, ECOBUS);
gen6_gt_check_fifodbg(dev_priv); gen6_gt_check_fifodbg(dev_priv);
} }
static void __gen7_gt_force_wake_mt_put(struct drm_i915_private *dev_priv,
int fw_engine)
{
__raw_i915_write32(dev_priv, FORCEWAKE_MT,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
__raw_posting_read(dev_priv, ECOBUS);
if (IS_GEN7(dev_priv->dev))
gen6_gt_check_fifodbg(dev_priv);
}
static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
{ {
int ret = 0; int ret = 0;
...@@ -176,165 +234,16 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) ...@@ -176,165 +234,16 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
return ret; return ret;
} }
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(0xffff));
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
}
static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
int fw_engine)
{
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_VLV) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Render to ack.\n");
}
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_VLV) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for media to ack.\n");
}
}
static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
int fw_engine) int fw_engine)
{ {
/* Check for Render Engine */ fw_domains_put(dev_priv, fw_engine);
if (FORCEWAKE_RENDER & fw_engine) fw_domains_posting_read(dev_priv);
__raw_i915_write32(dev_priv, FORCEWAKE_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_VLV */
__raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
if (!IS_CHERRYVIEW(dev_priv->dev)) if (!IS_CHERRYVIEW(dev_priv->dev))
gen6_gt_check_fifodbg(dev_priv); gen6_gt_check_fifodbg(dev_priv);
} }
static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
_MASKED_BIT_DISABLE(0xffff));
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
_MASKED_BIT_DISABLE(0xffff));
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
_MASKED_BIT_DISABLE(0xffff));
}
static void
__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
{
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_RENDER_GEN9) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_RENDER_GEN9) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Render to ack.\n");
}
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_GEN9) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_MEDIA_GEN9) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Media to ack.\n");
}
/* Check for Blitter Engine */
if (FORCEWAKE_BLITTER & fw_engine) {
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_BLITTER_GEN9) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
_MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
if (wait_for_atomic((__raw_i915_read32(dev_priv,
FORCEWAKE_ACK_BLITTER_GEN9) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
}
}
static void
__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
{
/* Check for Render Engine */
if (FORCEWAKE_RENDER & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* Check for Media Engine */
if (FORCEWAKE_MEDIA & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* Check for Blitter Engine */
if (FORCEWAKE_BLITTER & fw_engine)
__raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
_MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
}
static void gen6_force_wake_timer(unsigned long arg) static void gen6_force_wake_timer(unsigned long arg)
{ {
struct intel_uncore_forcewake_domain *domain = (void *)arg; struct intel_uncore_forcewake_domain *domain = (void *)arg;
...@@ -402,16 +311,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) ...@@ -402,16 +311,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
if (fw) if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
if (IS_VALLEYVIEW(dev)) fw_domains_reset(dev_priv, FORCEWAKE_ALL);
vlv_force_wake_reset(dev_priv);
else if (IS_GEN6(dev) || IS_GEN7(dev))
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
__gen7_gt_force_wake_mt_reset(dev_priv);
if (IS_GEN9(dev))
__gen9_gt_force_wake_mt_reset(dev_priv);
if (restore) { /* If reset with a user forcewake, try to restore */ if (restore) { /* If reset with a user forcewake, try to restore */
if (fw) if (fw)
...@@ -526,7 +426,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, ...@@ -526,7 +426,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
continue; continue;
domain->wake_count++; domain->wake_count++;
mod_timer_pinned(&domain->timer, jiffies + 1); fw_domain_arm_timer(domain);
} }
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
...@@ -535,12 +435,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, ...@@ -535,12 +435,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv,
void assert_force_wake_inactive(struct drm_i915_private *dev_priv) void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
{ {
struct intel_uncore_forcewake_domain *domain; struct intel_uncore_forcewake_domain *domain;
int i; int id;
if (!dev_priv->uncore.funcs.force_wake_get) if (!dev_priv->uncore.funcs.force_wake_get)
return; return;
for_each_fw_domain(domain, dev_priv, i) for_each_fw_domain(domain, dev_priv, id)
WARN_ON(domain->wake_count); WARN_ON(domain->wake_count);
} }
...@@ -708,20 +608,20 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv, ...@@ -708,20 +608,20 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
unsigned fw_domains) unsigned fw_domains)
{ {
struct intel_uncore_forcewake_domain *domain; struct intel_uncore_forcewake_domain *domain;
int i; int id;
if (WARN_ON(!fw_domains)) if (WARN_ON(!fw_domains))
return; return;
/* Ideally GCC would be constant-fold and eliminate this loop */ /* Ideally GCC would be constant-fold and eliminate this loop */
for_each_fw_domain_mask(domain, fw_domains, dev_priv, i) { for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) {
if (domain->wake_count) { if (domain->wake_count) {
fw_domains &= ~(1 << i); fw_domains &= ~(1 << id);
continue; continue;
} }
domain->wake_count++; domain->wake_count++;
mod_timer_pinned(&domain->timer, jiffies + 1); fw_domain_arm_timer(domain);
} }
if (fw_domains) if (fw_domains)
...@@ -1037,27 +937,78 @@ do { \ ...@@ -1037,27 +937,78 @@ do { \
dev_priv->uncore.funcs.mmio_readq = x##_read64; \ dev_priv->uncore.funcs.mmio_readq = x##_read64; \
} while (0) } while (0)
static void fw_domain_init(struct drm_i915_private *dev_priv,
u32 domain_id, u32 reg_set, u32 reg_ack)
{
struct intel_uncore_forcewake_domain *d;
if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
return;
d = &dev_priv->uncore.fw_domain[domain_id];
WARN_ON(d->wake_count);
d->wake_count = 0;
d->reg_set = reg_set;
d->reg_ack = reg_ack;
if (IS_GEN6(dev_priv)) {
d->val_reset = 0;
d->val_set = FORCEWAKE_KERNEL;
d->val_clear = 0;
} else {
d->val_reset = _MASKED_BIT_DISABLE(0xffff);
d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
}
if (IS_VALLEYVIEW(dev_priv))
d->reg_post = FORCEWAKE_ACK_VLV;
else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
d->reg_post = ECOBUS;
else
d->reg_post = 0;
d->i915 = dev_priv;
d->id = domain_id;
setup_timer(&d->timer, gen6_force_wake_timer, (unsigned long)d);
dev_priv->uncore.fw_domains |= (1 << domain_id);
}
void intel_uncore_init(struct drm_device *dev) void intel_uncore_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_uncore_forcewake_domain *domain;
int i;
__intel_uncore_early_sanitize(dev, false); __intel_uncore_early_sanitize(dev, false);
if (IS_GEN9(dev)) { if (IS_GEN9(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get; dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put; dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
dev_priv->uncore.fw_domains = FORCEWAKE_RENDER | fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_BLITTER | FORCEWAKE_MEDIA; FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(dev)) { } else if (IS_VALLEYVIEW(dev)) {
dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
dev_priv->uncore.fw_domains = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
dev_priv->uncore.funcs.force_wake_get = __gen7_gt_force_wake_mt_get; dev_priv->uncore.funcs.force_wake_get =
dev_priv->uncore.funcs.force_wake_put = __gen7_gt_force_wake_mt_put; fw_domains_get_with_thread_status;
dev_priv->uncore.fw_domains = FORCEWAKE_RENDER; dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(dev)) { } else if (IS_IVYBRIDGE(dev)) {
u32 ecobus; u32 ecobus;
...@@ -1070,40 +1021,32 @@ void intel_uncore_init(struct drm_device *dev) ...@@ -1070,40 +1021,32 @@ void intel_uncore_init(struct drm_device *dev)
* (correctly) interpreted by the test below as MT * (correctly) interpreted by the test below as MT
* forcewake being disabled. * forcewake being disabled.
*/ */
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put =
fw_domains_put_with_fifo;
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
__gen7_gt_force_wake_mt_get(dev_priv, FORCEWAKE_ALL); fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
ecobus = __raw_i915_read32(dev_priv, ECOBUS); ecobus = __raw_i915_read32(dev_priv, ECOBUS);
__gen7_gt_force_wake_mt_put(dev_priv, FORCEWAKE_ALL); fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
if (ecobus & FORCEWAKE_MT_ENABLE) { if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
dev_priv->uncore.funcs.force_wake_get =
__gen7_gt_force_wake_mt_get;
dev_priv->uncore.funcs.force_wake_put =
__gen7_gt_force_wake_mt_put;
} else {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n"); DRM_INFO("when using vblank-synced partial screen updates.\n");
dev_priv->uncore.funcs.force_wake_get = fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
__gen6_gt_force_wake_get; FORCEWAKE, FORCEWAKE_ACK);
dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put;
} }
dev_priv->uncore.fw_domains = FORCEWAKE_RENDER;
} else if (IS_GEN6(dev)) { } else if (IS_GEN6(dev)) {
dev_priv->uncore.funcs.force_wake_get = dev_priv->uncore.funcs.force_wake_get =
__gen6_gt_force_wake_get; fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put = dev_priv->uncore.funcs.force_wake_put =
__gen6_gt_force_wake_put; fw_domains_put_with_fifo;
dev_priv->uncore.fw_domains = FORCEWAKE_RENDER; fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
} FORCEWAKE, FORCEWAKE_ACK);
for_each_fw_domain(domain, dev_priv, i) {
domain->i915 = dev_priv;
domain->id = i;
setup_timer(&domain->timer, gen6_force_wake_timer,
(unsigned long)domain);
} }
switch (INTEL_INFO(dev)->gen) { switch (INTEL_INFO(dev)->gen) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment