Commit 408464b4 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Pull checking rps->pm_events under the irq_lock

Avoid angering kcsan by serialising the read of the pm_events with the
write in rps_disable_interrupts.

[ 6268.713419] BUG: KCSAN: data-race in intel_rps_park [i915] / rps_work [i915]
[ 6268.713437]
[ 6268.713449] write to 0xffff8881eda8efac of 4 bytes by task 1127 on cpu 3:
[ 6268.713680]  intel_rps_park+0x136/0x260 [i915]
[ 6268.713905]  __gt_park+0x61/0xa0 [i915]
[ 6268.714128]  ____intel_wakeref_put_last+0x42/0x90 [i915]
[ 6268.714352]  __intel_wakeref_put_work+0xd3/0xf0 [i915]
[ 6268.714369]  process_one_work+0x3b1/0x690
[ 6268.714384]  worker_thread+0x80/0x670
[ 6268.714398]  kthread+0x19a/0x1e0
[ 6268.714412]  ret_from_fork+0x1f/0x30
[ 6268.714423]
[ 6268.714435] read to 0xffff8881eda8efac of 4 bytes by task 950 on cpu 2:
[ 6268.714664]  rps_work+0xc2/0x680 [i915]
[ 6268.714679]  process_one_work+0x3b1/0x690
[ 6268.714693]  worker_thread+0x80/0x670
[ 6268.714707]  kthread+0x19a/0x1e0
[ 6268.714720]  ret_from_fork+0x1f/0x30

v2: Mark all reads and writes of rpm->pm_events.

The flow of enabling/disabling rps is stronly ordered, so the writes and
interrupt generation are also strongly ordered -- just this may not be
visible to the compiler, so provide annotations.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200311092624.10012-1-chris@chris-wilson.co.uk
parent 29e6ecf3
...@@ -57,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val) ...@@ -57,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
if (val < rps->max_freq_softlimit) if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
mask &= rps->pm_events; mask &= READ_ONCE(rps->pm_events);
return rps_pm_sanitize_mask(rps, ~mask); return rps_pm_sanitize_mask(rps, ~mask);
} }
...@@ -70,17 +70,19 @@ static void rps_reset_ei(struct intel_rps *rps) ...@@ -70,17 +70,19 @@ static void rps_reset_ei(struct intel_rps *rps)
static void rps_enable_interrupts(struct intel_rps *rps) static void rps_enable_interrupts(struct intel_rps *rps)
{ {
struct intel_gt *gt = rps_to_gt(rps); struct intel_gt *gt = rps_to_gt(rps);
u32 events;
rps_reset_ei(rps); rps_reset_ei(rps);
if (IS_VALLEYVIEW(gt->i915)) if (IS_VALLEYVIEW(gt->i915))
/* WaGsvRC0ResidencyMethod:vlv */ /* WaGsvRC0ResidencyMethod:vlv */
rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; events = GEN6_PM_RP_UP_EI_EXPIRED;
else else
rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | events = (GEN6_PM_RP_UP_THRESHOLD |
GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_DOWN_TIMEOUT); GEN6_PM_RP_DOWN_TIMEOUT);
WRITE_ONCE(rps->pm_events, events);
spin_lock_irq(&gt->irq_lock); spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events); gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(&gt->irq_lock); spin_unlock_irq(&gt->irq_lock);
...@@ -117,8 +119,7 @@ static void rps_disable_interrupts(struct intel_rps *rps) ...@@ -117,8 +119,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
{ {
struct intel_gt *gt = rps_to_gt(rps); struct intel_gt *gt = rps_to_gt(rps);
rps->pm_events = 0; WRITE_ONCE(rps->pm_events, 0);
set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u)); set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(&gt->irq_lock); spin_lock_irq(&gt->irq_lock);
...@@ -1459,12 +1460,12 @@ static void rps_work(struct work_struct *work) ...@@ -1459,12 +1460,12 @@ static void rps_work(struct work_struct *work)
u32 pm_iir = 0; u32 pm_iir = 0;
spin_lock_irq(&gt->irq_lock); spin_lock_irq(&gt->irq_lock);
pm_iir = fetch_and_zero(&rps->pm_iir); pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
client_boost = atomic_read(&rps->num_waiters); client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(&gt->irq_lock); spin_unlock_irq(&gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */ /* Make sure we didn't queue anything we're not going to process. */
if ((pm_iir & rps->pm_events) == 0 && !client_boost) if (!pm_iir && !client_boost)
goto out; goto out;
mutex_lock(&rps->lock); mutex_lock(&rps->lock);
...@@ -1560,11 +1561,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) ...@@ -1560,11 +1561,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
{ {
struct intel_gt *gt = rps_to_gt(rps); struct intel_gt *gt = rps_to_gt(rps);
u32 events;
if (pm_iir & rps->pm_events) { events = pm_iir & READ_ONCE(rps->pm_events);
if (events) {
spin_lock(&gt->irq_lock); spin_lock(&gt->irq_lock);
gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
rps->pm_iir |= pm_iir & rps->pm_events; gen6_gt_pm_mask_irq(gt, events);
rps->pm_iir |= events;
schedule_work(&rps->work); schedule_work(&rps->work);
spin_unlock(&gt->irq_lock); spin_unlock(&gt->irq_lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment