Commit 2e81bc61 authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Tvrtko Ursulin

drm/i915: skip forcewake actions on forcewake-less uncore

We always call some of the setup/cleanup functions for forcewake, even
if the feature is not actually available. Skipping these operations if
forcewake is not available saves us some operations on older gens and
prepares us for having a forcewake-less display uncore.

v2: do not make suspend/resume functions forcewake-specific (Chris,
Tvrtko), use GEM_BUG_ON in internal forcewake-only functions (Tvrtko)
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190620010021.20637-5-daniele.ceraolospurio@intel.com
parent 01385758
...@@ -485,12 +485,10 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore) ...@@ -485,12 +485,10 @@ check_for_unclaimed_mmio(struct intel_uncore *uncore)
return ret; return ret;
} }
static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, static void forcewake_early_sanitize(struct intel_uncore *uncore,
unsigned int restore_forcewake) unsigned int restore_forcewake)
{ {
/* clear out unclaimed reg detection bit */ GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
if (check_for_unclaimed_mmio(uncore))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* WaDisableShadowRegForCpd:chv */ /* WaDisableShadowRegForCpd:chv */
if (IS_CHERRYVIEW(uncore->i915)) { if (IS_CHERRYVIEW(uncore->i915)) {
...@@ -515,6 +513,9 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore, ...@@ -515,6 +513,9 @@ static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
void intel_uncore_suspend(struct intel_uncore *uncore) void intel_uncore_suspend(struct intel_uncore *uncore)
{ {
if (!intel_uncore_has_forcewake(uncore))
return;
iosf_mbi_punit_acquire(); iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
&uncore->pmic_bus_access_nb); &uncore->pmic_bus_access_nb);
...@@ -526,14 +527,23 @@ void intel_uncore_resume_early(struct intel_uncore *uncore) ...@@ -526,14 +527,23 @@ void intel_uncore_resume_early(struct intel_uncore *uncore)
{ {
unsigned int restore_forcewake; unsigned int restore_forcewake;
if (intel_uncore_unclaimed_mmio(uncore))
DRM_DEBUG("unclaimed mmio detected on resume, clearing\n");
if (!intel_uncore_has_forcewake(uncore))
return;
restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved); restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
__intel_uncore_early_sanitize(uncore, restore_forcewake); forcewake_early_sanitize(uncore, restore_forcewake);
iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
} }
void intel_uncore_runtime_resume(struct intel_uncore *uncore) void intel_uncore_runtime_resume(struct intel_uncore *uncore)
{ {
if (!intel_uncore_has_forcewake(uncore))
return;
iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
} }
...@@ -1348,8 +1358,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore) ...@@ -1348,8 +1358,7 @@ static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{ {
struct drm_i915_private *i915 = uncore->i915; struct drm_i915_private *i915 = uncore->i915;
if (!intel_uncore_has_forcewake(uncore)) GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
return;
if (INTEL_GEN(i915) >= 11) { if (INTEL_GEN(i915) >= 11) {
int i; int i;
...@@ -1542,36 +1551,29 @@ void intel_uncore_init_early(struct intel_uncore *uncore, ...@@ -1542,36 +1551,29 @@ void intel_uncore_init_early(struct intel_uncore *uncore,
uncore->rpm = &i915->runtime_pm; uncore->rpm = &i915->runtime_pm;
} }
int intel_uncore_init_mmio(struct intel_uncore *uncore) static void uncore_raw_init(struct intel_uncore *uncore)
{ {
struct drm_i915_private *i915 = uncore->i915; GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
int ret;
ret = uncore_mmio_setup(uncore); if (IS_GEN(uncore->i915, 5)) {
if (ret) ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
return ret; ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
} else {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
}
}
i915_check_vgpu(i915); static void uncore_forcewake_init(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915)) GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
uncore->flags |= UNCORE_HAS_FORCEWAKE;
intel_uncore_fw_domains_init(uncore); intel_uncore_fw_domains_init(uncore);
__intel_uncore_early_sanitize(uncore, 0); forcewake_early_sanitize(uncore, 0);
uncore->unclaimed_mmio_check = 1;
uncore->pmic_bus_access_nb.notifier_call =
i915_pmic_bus_access_notifier;
if (!intel_uncore_has_forcewake(uncore)) { if (IS_GEN_RANGE(i915, 6, 7)) {
if (IS_GEN(i915, 5)) {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
} else {
ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
}
} else if (IS_GEN_RANGE(i915, 6, 7)) {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6); ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
if (IS_VALLEYVIEW(i915)) { if (IS_VALLEYVIEW(i915)) {
...@@ -1585,7 +1587,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) ...@@ -1585,7 +1587,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges); ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else { } else {
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8); ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
ASSIGN_READ_MMIO_VFUNCS(uncore, gen6); ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
...@@ -1600,6 +1601,31 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) ...@@ -1600,6 +1601,31 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
} }
uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
int intel_uncore_init_mmio(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
int ret;
ret = uncore_mmio_setup(uncore);
if (ret)
return ret;
i915_check_vgpu(i915);
if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
uncore->flags |= UNCORE_HAS_FORCEWAKE;
uncore->unclaimed_mmio_check = 1;
if (!intel_uncore_has_forcewake(uncore))
uncore_raw_init(uncore);
else
uncore_forcewake_init(uncore);
/* make sure fw funcs are set if and only if we have fw*/ /* make sure fw funcs are set if and only if we have fw*/
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get); GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put); GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
...@@ -1615,7 +1641,9 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) ...@@ -1615,7 +1641,9 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
if (IS_GEN_RANGE(i915, 6, 7)) if (IS_GEN_RANGE(i915, 6, 7))
uncore->flags |= UNCORE_HAS_FIFO; uncore->flags |= UNCORE_HAS_FIFO;
iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb); /* clear out unclaimed reg detection bit */
if (check_for_unclaimed_mmio(uncore))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
return 0; return 0;
} }
...@@ -1628,41 +1656,44 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) ...@@ -1628,41 +1656,44 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore) void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
{ {
struct drm_i915_private *i915 = uncore->i915; struct drm_i915_private *i915 = uncore->i915;
enum forcewake_domains fw_domains = uncore->fw_domains;
enum forcewake_domain_id domain_id;
int i;
if (INTEL_GEN(i915) >= 11) { if (!intel_uncore_has_forcewake(uncore) || INTEL_GEN(i915) < 11)
enum forcewake_domains fw_domains = uncore->fw_domains; return;
enum forcewake_domain_id domain_id;
int i;
for (i = 0; i < I915_MAX_VCS; i++) { for (i = 0; i < I915_MAX_VCS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i; domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
if (HAS_ENGINE(i915, _VCS(i))) if (HAS_ENGINE(i915, _VCS(i)))
continue; continue;
if (fw_domains & BIT(domain_id)) if (fw_domains & BIT(domain_id))
fw_domain_fini(uncore, domain_id); fw_domain_fini(uncore, domain_id);
} }
for (i = 0; i < I915_MAX_VECS; i++) { for (i = 0; i < I915_MAX_VECS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i; domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
if (HAS_ENGINE(i915, _VECS(i))) if (HAS_ENGINE(i915, _VECS(i)))
continue; continue;
if (fw_domains & BIT(domain_id)) if (fw_domains & BIT(domain_id))
fw_domain_fini(uncore, domain_id); fw_domain_fini(uncore, domain_id);
}
} }
} }
void intel_uncore_fini_mmio(struct intel_uncore *uncore) void intel_uncore_fini_mmio(struct intel_uncore *uncore)
{ {
iosf_mbi_punit_acquire(); if (intel_uncore_has_forcewake(uncore)) {
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( iosf_mbi_punit_acquire();
&uncore->pmic_bus_access_nb); iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
intel_uncore_forcewake_reset(uncore); &uncore->pmic_bus_access_nb);
iosf_mbi_punit_release(); intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
}
uncore_mmio_cleanup(uncore); uncore_mmio_cleanup(uncore);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment