Commit 99264a61 authored by Daniel Vetter's avatar Daniel Vetter

drm/vblank: Fixup and document timestamp update/read barriers

This was a bit too much cargo-culted, so lets make it solid:
- vblank->count doesn't need to be an atomic, writes are always done
  under the protection of dev->vblank_time_lock. Switch to an unsigned
  long instead and update comments. Note that atomic_read is just a
  normal read of a volatile variable, so no need to audit all the
  read-side access specifically.

- The barriers for the vblank counter seqlock weren't complete: The
  read-side was missing the first barrier between the counter read and
  the timestamp read, it only had a barrier between the ts and the
  counter read. We need both.

- Barriers weren't properly documented. Since barriers only work if
  you have them on boths sides of the transaction it's prudent to
  reference where the other side is. To avoid duplicating the
  write-side comment 3 times extract a little store_vblank() helper.
  In that helper also assert that we do indeed hold
  dev->vblank_time_lock, since in some cases the lock is acquired a
  few functions up in the callchain.

Spotted while reviewing a patch from Chris Wilson to add a fastpath to
the vblank_wait ioctl.

v2: Add comment to better explain how store_vblank works, suggested by
Chris.

v3: Peter noticed that as-is the 2nd smp_wmb is redundant with the
implicit barrier in the spin_unlock. But that can only be proven by
auditing all callers and my point in extracting this little helper was
to localize all the locking into just one place. Hence I think that
additional optimization is too risky.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mario Kleiner <mario.kleiner.de@gmail.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Michel Dänzer <michel@daenzer.net>
Cc: Peter Hurley <peter@hurleysoftware.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-and-tested-by: default avatarMario Kleiner <mario.kleiner.de@gmail.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
parent a3c6d686
......@@ -74,6 +74,36 @@ module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
static void store_vblank(struct drm_device *dev, int crtc,
unsigned vblank_count_inc,
struct timeval *t_vblank)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
u32 tslot;
assert_spin_locked(&dev->vblank_time_lock);
if (t_vblank) {
/* All writers hold the spinlock, but readers are serialized by
* the latching of vblank->count below.
*/
tslot = vblank->count + vblank_count_inc;
vblanktimestamp(dev, crtc, tslot) = *t_vblank;
}
/*
* vblank timestamp updates are protected on the write side with
* vblank_time_lock, but on the read side done locklessly using a
* sequence-lock on the vblank counter. Ensure correct ordering using
* memory barrriers. We need the barrier both before and also after the
* counter update to synchronize with the next timestamp write.
* The read-side barriers for this are in drm_vblank_count_and_time.
*/
smp_wmb();
vblank->count += vblank_count_inc;
smp_wmb();
}
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
......@@ -93,7 +123,7 @@ module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
u32 cur_vblank, diff, tslot;
u32 cur_vblank, diff;
bool rc;
struct timeval t_vblank;
......@@ -129,18 +159,12 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
if (diff == 0)
return;
/* Reinitialize corresponding vblank timestamp if high-precision query
* available. Skip this step if query unsupported or failed. Will
* reinitialize delayed at next vblank interrupt in that case.
/*
* Only reinitialize corresponding vblank timestamp if high-precision query
* available and didn't fail. Will reinitialize delayed at next vblank
* interrupt in that case.
*/
if (rc) {
tslot = atomic_read(&vblank->count) + diff;
vblanktimestamp(dev, crtc, tslot) = t_vblank;
}
smp_mb__before_atomic();
atomic_add(diff, &vblank->count);
smp_mb__after_atomic();
store_vblank(dev, crtc, diff, rc ? &t_vblank : NULL);
}
/*
......@@ -218,7 +242,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
*/
vblcount = atomic_read(&vblank->count);
vblcount = vblank->count;
diff_ns = timeval_to_ns(&tvblank) -
timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
......@@ -234,17 +258,8 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
* available. In that case we can't account for this and just
* hope for the best.
*/
if (vblrc && (abs64(diff_ns) > 1000000)) {
/* Store new timestamp in ringbuffer. */
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
smp_mb__before_atomic();
atomic_inc(&vblank->count);
smp_mb__after_atomic();
}
if (vblrc && (abs64(diff_ns) > 1000000))
store_vblank(dev, crtc, 1, &tvblank);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
......@@ -852,7 +867,7 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
if (WARN_ON(crtc >= dev->num_crtcs))
return 0;
return atomic_read(&vblank->count);
return vblank->count;
}
EXPORT_SYMBOL(drm_vblank_count);
......@@ -897,16 +912,17 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
if (WARN_ON(crtc >= dev->num_crtcs))
return 0;
/* Read timestamp from slot of _vblank_time ringbuffer
* that corresponds to current vblank count. Retry if
* count has incremented during readout. This works like
* a seqlock.
/*
* Vblank timestamps are read lockless. To ensure consistency the vblank
* counter is rechecked and ordering is ensured using memory barriers.
* This works like a seqlock. The write-side barriers are in store_vblank.
*/
do {
cur_vblank = atomic_read(&vblank->count);
cur_vblank = vblank->count;
smp_rmb();
*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
smp_rmb();
} while (cur_vblank != atomic_read(&vblank->count));
} while (cur_vblank != vblank->count);
return cur_vblank;
}
......@@ -1715,7 +1731,7 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
*/
/* Get current timestamp and count. */
vblcount = atomic_read(&vblank->count);
vblcount = vblank->count;
drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
/* Compute time difference to timestamp of last vblank */
......@@ -1731,20 +1747,11 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
* e.g., due to spurious vblank interrupts. We need to
* ignore those for accounting.
*/
if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
/* Store new timestamp in ringbuffer. */
vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
/* Increment cooked vblank count. This also atomically commits
* the timestamp computed above.
*/
smp_mb__before_atomic();
atomic_inc(&vblank->count);
smp_mb__after_atomic();
} else {
if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS)
store_vblank(dev, crtc, 1, &tvblank);
else
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
}
spin_unlock(&dev->vblank_time_lock);
......
......@@ -686,9 +686,13 @@ struct drm_pending_vblank_event {
struct drm_vblank_crtc {
struct drm_device *dev; /* pointer to the drm_device */
wait_queue_head_t queue; /**< VBLANK wait queue */
struct timeval time[DRM_VBLANKTIME_RBSIZE]; /**< timestamp of current count */
struct timer_list disable_timer; /* delayed disable timer */
atomic_t count; /**< number of VBLANK interrupts */
/* vblank counter, protected by dev->vblank_time_lock for writes */
unsigned long count;
/* vblank timestamps, protected by dev->vblank_time_lock for writes */
struct timeval time[DRM_VBLANKTIME_RBSIZE];
atomic_t refcount; /* number of users of vblank interruptsper crtc */
u32 last; /* protected by dev->vbl_lock, used */
/* for wraparound handling */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment