Commit 67d93ffc authored by Íñigo Huguet's avatar Íñigo Huguet Committed by Jakub Kicinski

ptp: vclock: use mutex to fix "sleep on atomic" bug

vclocks were using spinlocks to protect access to its timecounter and
cyclecounter. Access to timecounter/cyclecounter is backed by the same
driver callbacks that are used for non-virtual PHCs, but the usage of
the spinlock imposes a new limitation that didn't exist previously: now
they're called in atomic context so they mustn't sleep.

Some drivers like sfc or ice may sleep on these callbacks, causing
errors like "BUG: scheduling while atomic: ptp5/25223/0x00000002"

Fix it replacing the vclock's spinlock by a mutex. It fix the mentioned
bug and it doesn't introduce longer delays.

I've tested synchronizing various different combinations of clocks:
- vclock->sysclock
- sysclock->vclock
- vclock->vclock
- hardware PHC in different NIC -> vclock
- created 4 vclocks and launch 4 parallel phc2sys processes with
  lockdep enabled

In all cases, comparing the delays reported by phc2sys, they are in the
same range of values than before applying the patch.

Link: https://lore.kernel.org/netdev/69d0ff33-bd32-6aa5-d36c-fbdc3c01337c@redhat.com/
Fixes: 5d43f951 ("ptp: add ptp virtual clock driver framework")
Reported-by: default avatarYalin Li <yalli@redhat.com>
Suggested-by: default avatarRichard Cochran <richardcochran@gmail.com>
Tested-by: default avatarMiroslav Lichvar <mlichvar@redhat.com>
Signed-off-by: default avatarÍñigo Huguet <ihuguet@redhat.com>
Acked-by: default avatarRichard Cochran <richardcochran@gmail.com>
Link: https://lore.kernel.org/r/20230221130616.21837-1-ihuguet@redhat.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 5b7c4cab
......@@ -66,7 +66,7 @@ struct ptp_vclock {
struct hlist_node vclock_hash_node;
struct cyclecounter cc;
struct timecounter tc;
spinlock_t lock; /* protects tc/cc */
struct mutex lock; /* protects tc/cc */
};
/*
......
......@@ -43,16 +43,16 @@ static void ptp_vclock_hash_del(struct ptp_vclock *vclock)
static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
unsigned long flags;
s64 adj;
adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT;
adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR);
spin_lock_irqsave(&vclock->lock, flags);
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
timecounter_read(&vclock->tc);
vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj;
spin_unlock_irqrestore(&vclock->lock, flags);
mutex_unlock(&vclock->lock);
return 0;
}
......@@ -60,11 +60,11 @@ static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
unsigned long flags;
spin_lock_irqsave(&vclock->lock, flags);
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
timecounter_adjtime(&vclock->tc, delta);
spin_unlock_irqrestore(&vclock->lock, flags);
mutex_unlock(&vclock->lock);
return 0;
}
......@@ -73,12 +73,12 @@ static int ptp_vclock_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
unsigned long flags;
u64 ns;
spin_lock_irqsave(&vclock->lock, flags);
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
ns = timecounter_read(&vclock->tc);
spin_unlock_irqrestore(&vclock->lock, flags);
mutex_unlock(&vclock->lock);
*ts = ns_to_timespec64(ns);
return 0;
......@@ -91,7 +91,6 @@ static int ptp_vclock_gettimex(struct ptp_clock_info *ptp,
struct ptp_vclock *vclock = info_to_vclock(ptp);
struct ptp_clock *pptp = vclock->pclock;
struct timespec64 pts;
unsigned long flags;
int err;
u64 ns;
......@@ -99,9 +98,10 @@ static int ptp_vclock_gettimex(struct ptp_clock_info *ptp,
if (err)
return err;
spin_lock_irqsave(&vclock->lock, flags);
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
ns = timecounter_cyc2time(&vclock->tc, timespec64_to_ns(&pts));
spin_unlock_irqrestore(&vclock->lock, flags);
mutex_unlock(&vclock->lock);
*ts = ns_to_timespec64(ns);
......@@ -113,11 +113,11 @@ static int ptp_vclock_settime(struct ptp_clock_info *ptp,
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
u64 ns = timespec64_to_ns(ts);
unsigned long flags;
spin_lock_irqsave(&vclock->lock, flags);
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
timecounter_init(&vclock->tc, &vclock->cc, ns);
spin_unlock_irqrestore(&vclock->lock, flags);
mutex_unlock(&vclock->lock);
return 0;
}
......@@ -127,7 +127,6 @@ static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp,
{
struct ptp_vclock *vclock = info_to_vclock(ptp);
struct ptp_clock *pptp = vclock->pclock;
unsigned long flags;
int err;
u64 ns;
......@@ -135,9 +134,10 @@ static int ptp_vclock_getcrosststamp(struct ptp_clock_info *ptp,
if (err)
return err;
spin_lock_irqsave(&vclock->lock, flags);
if (mutex_lock_interruptible(&vclock->lock))
return -EINTR;
ns = timecounter_cyc2time(&vclock->tc, ktime_to_ns(xtstamp->device));
spin_unlock_irqrestore(&vclock->lock, flags);
mutex_unlock(&vclock->lock);
xtstamp->device = ns_to_ktime(ns);
......@@ -205,7 +205,7 @@ struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock)
INIT_HLIST_NODE(&vclock->vclock_hash_node);
spin_lock_init(&vclock->lock);
mutex_init(&vclock->lock);
vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev);
if (IS_ERR_OR_NULL(vclock->clock)) {
......@@ -269,7 +269,6 @@ ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index)
{
unsigned int hash = vclock_index % HASH_SIZE(vclock_hash);
struct ptp_vclock *vclock;
unsigned long flags;
u64 ns;
u64 vclock_ns = 0;
......@@ -281,9 +280,10 @@ ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, int vclock_index)
if (vclock->clock->index != vclock_index)
continue;
spin_lock_irqsave(&vclock->lock, flags);
if (mutex_lock_interruptible(&vclock->lock))
break;
vclock_ns = timecounter_cyc2time(&vclock->tc, ns);
spin_unlock_irqrestore(&vclock->lock, flags);
mutex_unlock(&vclock->lock);
break;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment