Commit 44c32039 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Saeed Mahameed says:
====================
pull-request: mlx5-next 2021-02-16

The patches in this pr are already submitted and reviewed through the
netdev and rdma mailing lists.

The series includes mlx5 HW bits and definitions for mlx5 real time clock
translation and handling in the mlx5 driver clock module to enable and
support such mode [1]

[1] https://patchwork.kernel.org/project/netdevbpf/patch/20210212223042.449816-7-saeed@kernel.org/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 18af77c5 432119de
......@@ -57,6 +57,7 @@
#include "en/fs.h"
#include "en/qos.h"
#include "lib/hv_vhca.h"
#include "lib/clock.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
......@@ -393,6 +394,7 @@ struct mlx5e_txqsq {
u32 rate_limit;
struct work_struct recover_work;
struct mlx5e_ptpsq *ptpsq;
cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
......@@ -655,6 +657,7 @@ struct mlx5e_rq {
/* XDP read-mostly */
struct xdp_rxq_info xdp_rxq;
cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
enum mlx5e_channel_state {
......
......@@ -3,7 +3,6 @@
#include "en/ptp.h"
#include "en/txrx.h"
#include "lib/clock.h"
struct mlx5e_skb_cb_hwtstamp {
ktime_t cqe_hwtstamp;
......@@ -70,6 +69,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
int budget)
{
struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
struct mlx5e_txqsq *sq = &ptpsq->txqsq;
ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
......@@ -77,7 +77,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
goto out;
}
hwtstamp = mlx5_timecounter_cyc2time(ptpsq->txqsq.clock, get_cqe_ts(cqe));
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
ptpsq->cq_stats->cqe++;
......@@ -183,6 +183,9 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
sq->stop_room = param->stop_room;
sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
mlx5_real_time_cyc2time :
mlx5_timecounter_cyc2time;
node = dev_to_node(mlx5_core_dma_dev(mdev));
......
......@@ -26,6 +26,13 @@
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
static inline
ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
{
return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time,
clock, cqe_ts);
}
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
......
......@@ -449,6 +449,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
rq->xsk_pool = xsk_pool;
rq->ptp_cyc2time = mlx5_is_real_time_rq(mdev) ?
mlx5_real_time_cyc2time :
mlx5_timecounter_cyc2time;
if (rq->xsk_pool)
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
......@@ -680,7 +683,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
struct mlx5_core_dev *mdev = rq->mdev;
u8 ts_format;
void *in;
void *rqc;
void *wq;
......@@ -693,6 +696,9 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
if (!in)
return -ENOMEM;
ts_format = mlx5_is_real_time_rq(mdev) ?
MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME :
MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
......@@ -700,6 +706,7 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, ts_format, ts_format);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
......@@ -1179,6 +1186,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
sq->stop_room = param->stop_room;
sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
mlx5_real_time_cyc2time :
mlx5_timecounter_cyc2time;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
......@@ -1212,6 +1222,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
struct mlx5e_create_sq_param *csp,
u32 *sqn)
{
u8 ts_format;
void *in;
void *sqc;
void *wq;
......@@ -1224,6 +1235,9 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
if (!in)
return -ENOMEM;
ts_format = mlx5_is_real_time_sq(mdev) ?
MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME :
MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
......@@ -1232,6 +1246,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
MLX5_SET(sqc, sqc, cqn, csp->cqn);
MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
MLX5_SET(sqc, sqc, ts_format, ts_format);
if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
......
......@@ -47,7 +47,6 @@
#include "fpga/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
#include "lib/clock.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
......@@ -1062,9 +1061,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
}
if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
......@@ -1667,9 +1665,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
}
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
skb_hwtstamps(skb)->hwtstamp =
mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
......
......@@ -39,7 +39,6 @@
#include "en/txrx.h"
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
#include "lib/clock.h"
#include "en/ptp.h"
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
......@@ -802,7 +801,7 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct skb_shared_hwtstamps hwts = {};
u64 ts = get_cqe_ts(cqe);
hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
if (sq->ptpsq)
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
hwts.hwtstamp, sq->ptpsq->cq_stats);
......
......@@ -67,39 +67,68 @@ enum {
MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
};
static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts)
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
}
static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
{
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
if (!MLX5_CAP_MCAM_REG(dev, mtutc))
return -EOPNOTSUPP;
return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
MLX5_REG_MTUTC, 0, 1);
}
static u64 mlx5_read_time(struct mlx5_core_dev *dev,
struct ptp_system_timestamp *sts,
bool real_time)
{
u32 timer_h, timer_h1, timer_l;
timer_h = ioread32be(&dev->iseg->internal_timer_h);
timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
&dev->iseg->internal_timer_h);
ptp_read_system_prets(sts);
timer_l = ioread32be(&dev->iseg->internal_timer_l);
timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
&dev->iseg->internal_timer_l);
ptp_read_system_postts(sts);
timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
&dev->iseg->internal_timer_h);
if (timer_h != timer_h1) {
/* wrap around */
ptp_read_system_prets(sts);
timer_l = ioread32be(&dev->iseg->internal_timer_l);
timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
&dev->iseg->internal_timer_l);
ptp_read_system_postts(sts);
}
return (u64)timer_l | (u64)timer_h1 << 32;
return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
(u64)timer_l | (u64)timer_h1 << 32;
}
static u64 read_internal_timer(const struct cyclecounter *cc)
{
struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
return mlx5_read_time(mdev, NULL, false) & cc->mask;
}
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
struct mlx5_clock *clock = &mdev->clock;
struct mlx5_timer *timer;
u32 sign;
if (!clock_info)
......@@ -109,10 +138,11 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
smp_store_mb(clock_info->sign,
sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
clock_info->cycles = clock->tc.cycle_last;
clock_info->mult = clock->cycles.mult;
clock_info->nsec = clock->tc.nsec;
clock_info->frac = clock->tc.frac;
timer = &clock->timer;
clock_info->cycles = timer->tc.cycle_last;
clock_info->mult = timer->cycles.mult;
clock_info->nsec = timer->tc.nsec;
clock_info->frac = timer->tc.frac;
smp_store_release(&clock_info->sign,
sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
......@@ -151,92 +181,184 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
clock = container_of(dwork, struct mlx5_clock, overflow_work);
timer = container_of(dwork, struct mlx5_timer, overflow_work);
clock = container_of(timer, struct mlx5_clock, timer);
mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
timecounter_read(&timer->tc);
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
}
static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
const struct timespec64 *ts)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
if (!mlx5_modify_mtutc_allowed(mdev))
return 0;
if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
return -EINVAL;
MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
u64 ns = timespec64_to_ns(ts);
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
err = mlx5_ptp_settime_real_time(mdev, ts);
if (err)
return err;
write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
}
static
struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
struct ptp_system_timestamp *sts)
{
struct timespec64 ts;
u64 time;
time = mlx5_read_time(mdev, sts, true);
ts = ns_to_timespec64(time);
return ts;
}
static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
u64 cycles, ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
if (mlx5_real_time_mode(mdev)) {
*ts = mlx5_ptp_gettimex_real_time(mdev, sts);
goto out;
}
write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_internal_timer(mdev, sts);
ns = timecounter_cyc2time(&clock->tc, cycles);
cycles = mlx5_read_time(mdev, sts, false);
ns = timecounter_cyc2time(&timer->tc, cycles);
write_sequnlock_irqrestore(&clock->lock, flags);
*ts = ns_to_timespec64(ns);
out:
return 0;
}
static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
if (!mlx5_modify_mtutc_allowed(mdev))
return 0;
/* HW time adjustment range is s16. If out of range, settime instead */
if (delta < S16_MIN || delta > S16_MAX) {
struct timespec64 ts;
s64 ns;
ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
ns = timespec64_to_ns(&ts) + delta;
ts = ns_to_timespec64(ns);
return mlx5_ptp_settime_real_time(mdev, &ts);
}
MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
MLX5_SET(mtutc_reg, in, time_adjustment, delta);
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
err = mlx5_ptp_adjtime_real_time(mdev, delta);
if (err)
return err;
write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
timecounter_adjtime(&timer->tc, delta);
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
}
static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
if (!mlx5_modify_mtutc_allowed(mdev))
return 0;
MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
int neg_adj = 0;
u32 diff;
u64 adj;
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
err = mlx5_ptp_adjfreq_real_time(mdev, delta);
if (err)
return err;
if (delta < 0) {
neg_adj = 1;
delta = -delta;
}
adj = clock->nominal_c_mult;
adj = timer->nominal_c_mult;
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
timecounter_read(&timer->tc);
timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
timer->nominal_c_mult + diff;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
......@@ -305,6 +427,45 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
MLX5_EVENT_MODE_REPETETIVE & on);
}
static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
{
struct mlx5_clock *clock = &mdev->clock;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta;
struct mlx5_timer *timer;
unsigned long flags;
timer = &clock->timer;
cycles_now = mlx5_read_time(mdev, NULL, false);
write_seqlock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
nsec_delta = target_ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
timer->cycles.mult);
write_sequnlock_irqrestore(&clock->lock, flags);
return cycles_now + cycles_delta;
}
static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev,
s64 sec, u32 nsec)
{
struct timespec64 ts;
s64 target_ns;
ts.tv_sec = sec;
ts.tv_nsec = nsec;
target_ns = timespec64_to_ns(&ts);
return find_target_cycles(mdev, target_ns);
}
static u64 perout_conf_real_time(s64 sec, u32 nsec)
{
return (u64)nsec | (u64)sec << 32;
}
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
......@@ -314,11 +475,9 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
u64 nsec_now, nsec_delta, time_stamp = 0;
u64 cycles_now, cycles_delta;
struct timespec64 ts;
unsigned long flags;
u32 field_select = 0;
u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
......@@ -335,12 +494,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
rq->perout.index);
field_select = MLX5_MTPPS_FS_ENABLE;
if (on) {
bool rt_mode = mlx5_real_time_mode(mdev);
u32 nsec;
s64 sec;
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
if (pin < 0)
return -EBUSY;
if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
......@@ -350,23 +513,18 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if ((ns >> 1) != 500000000LL)
return -EINVAL;
ts.tv_sec = rq->perout.start.sec;
ts.tv_nsec = rq->perout.start.nsec;
ns = timespec64_to_ns(&ts);
cycles_now = mlx5_read_internal_timer(mdev, NULL);
write_seqlock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
write_sequnlock_irqrestore(&clock->lock, flags);
time_stamp = cycles_now + cycles_delta;
field_select = MLX5_MTPPS_FS_PIN_MODE |
nsec = rq->perout.start.nsec;
sec = rq->perout.start.sec;
if (rt_mode && sec > U32_MAX)
return -EINVAL;
time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) :
perout_conf_internal_timer(mdev, sec, nsec);
field_select |= MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_ENABLE |
MLX5_MTPPS_FS_TIME_STAMP;
} else {
field_select = MLX5_MTPPS_FS_ENABLE;
}
MLX5_SET(mtpps_reg, in, pin, pin);
......@@ -537,25 +695,50 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
}
static void ts_next_sec(struct timespec64 *ts)
{
ts->tv_sec += 1;
ts->tv_nsec = 0;
}
static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
struct mlx5_clock *clock)
{
bool rt_mode = mlx5_real_time_mode(mdev);
struct timespec64 ts;
s64 target_ns;
if (rt_mode)
ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
else
mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
ts_next_sec(&ts);
target_ns = timespec64_to_ns(&ts);
return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) :
find_target_cycles(mdev, target_ns);
}
static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
struct ptp_clock_event ptp_event;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta, ns;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
struct mlx5_core_dev *mdev;
struct timespec64 ts;
unsigned long flags;
u64 ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
ptp_event.timestamp =
ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
mlx5_real_time_cyc2time(clock,
be64_to_cpu(eqe->data.pps.time_stamp)) :
mlx5_timecounter_cyc2time(clock,
be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
......@@ -569,17 +752,9 @@ static int mlx5_pps_event(struct notifier_block *nb,
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
cycles_now = mlx5_read_internal_timer(mdev, NULL);
ts.tv_sec += 1;
ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts);
ns = perout_conf_next_event_timer(mdev, clock);
write_seqlock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
clock->pps_info.start[pin] = cycles_now + cycles_delta;
clock->pps_info.start[pin] = ns;
write_sequnlock_irqrestore(&clock->lock, flags);
schedule_work(&clock->pps_info.out_work);
break;
......@@ -591,29 +766,32 @@ static int mlx5_pps_event(struct notifier_block *nb,
return NOTIFY_OK;
}
void mlx5_init_clock(struct mlx5_core_dev *mdev)
static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
u64 overflow_cycles;
u64 ns;
u64 frac = 0;
struct mlx5_timer *timer = &clock->timer;
u32 dev_freq;
dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
if (!dev_freq) {
mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
return;
}
seqlock_init(&clock->lock);
clock->cycles.read = read_internal_timer;
clock->cycles.shift = MLX5_CYCLES_SHIFT;
clock->cycles.mult = clocksource_khz2mult(dev_freq,
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
timecounter_init(&clock->tc, &clock->cycles,
timer->cycles.read = read_internal_timer;
timer->cycles.shift = MLX5_CYCLES_SHIFT;
timer->cycles.mult = clocksource_khz2mult(dev_freq,
timer->cycles.shift);
timer->nominal_c_mult = timer->cycles.mult;
timer->cycles.mask = CLOCKSOURCE_MASK(41);
timecounter_init(&timer->tc, &timer->cycles,
ktime_to_ns(ktime_get_real()));
}
static void mlx5_init_overflow_period(struct mlx5_clock *clock)
{
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
struct mlx5_timer *timer = &clock->timer;
u64 overflow_cycles;
u64 frac = 0;
u64 ns;
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least twice every wrap around.
......@@ -622,32 +800,77 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
* multiplied by clock multiplier where the result doesn't exceed
* 64bits.
*/
overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
frac, &frac);
do_div(ns, NSEC_PER_SEC / HZ);
clock->overflow_period = ns;
timer->overflow_period = ns;
mdev->clock_info =
(struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
if (mdev->clock_info) {
mdev->clock_info->nsec = clock->tc.nsec;
mdev->clock_info->cycles = clock->tc.cycle_last;
mdev->clock_info->mask = clock->cycles.mask;
mdev->clock_info->mult = clock->nominal_c_mult;
mdev->clock_info->shift = clock->cycles.shift;
mdev->clock_info->frac = clock->tc.frac;
mdev->clock_info->overflow_period = clock->overflow_period;
INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
if (timer->overflow_period)
schedule_delayed_work(&timer->overflow_work, 0);
else
mlx5_core_warn(mdev,
"invalid overflow period, overflow_work is not scheduled\n");
if (clock_info)
clock_info->overflow_period = timer->overflow_period;
}
static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
struct mlx5_ib_clock_info *info;
struct mlx5_timer *timer;
mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
if (!mdev->clock_info) {
mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
return;
}
info = mdev->clock_info;
timer = &clock->timer;
info->nsec = timer->tc.nsec;
info->cycles = timer->tc.cycle_last;
info->mask = timer->cycles.mask;
info->mult = timer->nominal_c_mult;
info->shift = timer->cycles.shift;
info->frac = timer->tc.frac;
}
static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
mlx5_timecounter_init(mdev);
mlx5_init_clock_info(mdev);
mlx5_init_overflow_period(clock);
clock->ptp_info = mlx5_ptp_clock_info;
if (mlx5_real_time_mode(mdev)) {
struct timespec64 ts;
ktime_get_real_ts64(&ts);
mlx5_ptp_settime(&clock->ptp_info, &ts);
}
}
void mlx5_init_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
return;
}
seqlock_init(&clock->lock);
mlx5_init_timer_clock(mdev);
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
if (clock->overflow_period)
schedule_delayed_work(&clock->overflow_work, 0);
else
mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
......@@ -684,7 +907,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
}
cancel_work_sync(&clock->pps_info.out_work);
cancel_delayed_work_sync(&clock->overflow_work);
cancel_delayed_work_sync(&clock->timer.overflow_work);
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
......
......@@ -33,6 +33,24 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev)
{
u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format);
return (rq_ts_format_cap == MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
rq_ts_format_cap == MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME);
}
static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev)
{
u8 sq_ts_format_cap = MLX5_CAP_GEN(mdev, sq_ts_format);
return (sq_ts_format_cap == MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
sq_ts_format_cap == MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME);
}
typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64);
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
......@@ -45,17 +63,27 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
{
struct mlx5_timer *timer = &clock->timer;
unsigned int seq;
u64 nsec;
do {
seq = read_seqbegin(&clock->lock);
nsec = timecounter_cyc2time(&clock->tc, timestamp);
nsec = timecounter_cyc2time(&timer->tc, timestamp);
} while (read_seqretry(&clock->lock, seq));
return ns_to_ktime(nsec);
}
#define REAL_TIME_TO_NS(hi, low) (((u64)hi) * NSEC_PER_SEC + ((u64)low))
static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
{
u64 time = REAL_TIME_TO_NS(timestamp >> 32, timestamp & 0xFFFFFFFF);
return ns_to_ktime(time);
}
#else
static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
......@@ -69,6 +97,12 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
{
return 0;
}
static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
{
return 0;
}
#endif
#endif
......@@ -582,7 +582,10 @@ struct mlx5_init_seg {
__be32 internal_timer_l;
__be32 rsvd3[2];
__be32 health_counter;
__be32 rsvd4[1019];
__be32 rsvd4[11];
__be32 real_time_h;
__be32 real_time_l;
__be32 rsvd5[1006];
__be64 ieee1588_clk;
__be32 ieee1588_clk_type;
__be32 clr_intx;
......
......@@ -143,6 +143,7 @@ enum {
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
MLX5_REG_MTUTC = 0x9055,
MLX5_REG_MPEGC = 0x9056,
MLX5_REG_MCQS = 0x9060,
MLX5_REG_MCQI = 0x9061,
......@@ -675,18 +676,22 @@ struct mlx5_pps {
u8 enabled;
};
struct mlx5_clock {
struct mlx5_nb pps_nb;
seqlock_t lock;
struct mlx5_timer {
struct cyclecounter cycles;
struct timecounter tc;
struct hwtstamp_config hwtstamp_config;
u32 nominal_c_mult;
unsigned long overflow_period;
struct delayed_work overflow_work;
};
struct mlx5_clock {
struct mlx5_nb pps_nb;
seqlock_t lock;
struct hwtstamp_config hwtstamp_config;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
struct mlx5_pps pps_info;
struct mlx5_timer timer;
};
struct mlx5_dm;
......
......@@ -937,11 +937,18 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 reserved_at_200[0x600];
};
enum {
MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
MLX5_QP_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
};
struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1];
u8 reserved_at_1[0x3];
u8 sw_r_roce_src_udp_port[0x1];
u8 reserved_at_5[0x1b];
u8 reserved_at_5[0x19];
u8 qp_ts_format[0x2];
u8 reserved_at_20[0x60];
......@@ -1258,6 +1265,18 @@ enum {
MLX5_STEERING_FORMAT_CONNECTX_6DX = 1,
};
enum {
MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
};
enum {
MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING = 0x0,
MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME = 0x1,
MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME = 0x2,
};
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x1f];
u8 vhca_resource_manager[0x1];
......@@ -1571,7 +1590,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 general_obj_types[0x40];
u8 reserved_at_440[0x4];
u8 sq_ts_format[0x2];
u8 rq_ts_format[0x2];
u8 steering_format_version[0x4];
u8 create_qp_start_hint[0x18];
......@@ -2875,6 +2895,12 @@ enum {
MLX5_QPC_CS_RES_UP_TO_64B = 0x2,
};
enum {
MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
MLX5_QPC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
};
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
u8 lag_tx_port_affinity[0x4];
......@@ -2903,7 +2929,9 @@ struct mlx5_ifc_qpc_bits {
u8 log_rq_stride[0x3];
u8 no_sq[0x1];
u8 log_sq_size[0x4];
u8 reserved_at_55[0x6];
u8 reserved_at_55[0x3];
u8 ts_format[0x2];
u8 reserved_at_5a[0x1];
u8 rlky[0x1];
u8 ulp_stateless_offload_mode[0x4];
......@@ -3319,6 +3347,12 @@ enum {
MLX5_SQC_STATE_ERR = 0x3,
};
enum {
MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
};
struct mlx5_ifc_sqc_bits {
u8 rlky[0x1];
u8 cd_master[0x1];
......@@ -3330,7 +3364,9 @@ struct mlx5_ifc_sqc_bits {
u8 reg_umr[0x1];
u8 allow_swp[0x1];
u8 hairpin[0x1];
u8 reserved_at_f[0x11];
u8 reserved_at_f[0xb];
u8 ts_format[0x2];
u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
......@@ -3422,6 +3458,12 @@ enum {
MLX5_RQC_STATE_ERR = 0x3,
};
enum {
MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING = 0x0,
MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT = 0x1,
MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME = 0x2,
};
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
u8 delay_drop_en[0x1];
......@@ -3432,7 +3474,9 @@ struct mlx5_ifc_rqc_bits {
u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
u8 hairpin[0x1];
u8 reserved_at_f[0x11];
u8 reserved_at_f[0xb];
u8 ts_format[0x2];
u8 reserved_at_1c[0x4];
u8 reserved_at_20[0x8];
u8 user_index[0x18];
......@@ -5913,6 +5957,18 @@ struct mlx5_ifc_dealloc_modify_header_context_in_bits {
u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_modify_header_context_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 modify_header_id[0x20];
u8 reserved_at_60[0xa0];
};
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
......@@ -9103,6 +9159,28 @@ struct mlx5_ifc_mpegc_reg_bits {
u8 reserved_at_60[0x100];
};
enum {
MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE = 0x1,
MLX5_MTUTC_OPERATION_ADJUST_TIME = 0x2,
MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC = 0x3,
};
struct mlx5_ifc_mtutc_reg_bits {
u8 reserved_at_0[0x1c];
u8 operation[0x4];
u8 freq_adjustment[0x20];
u8 reserved_at_40[0x40];
u8 utc_sec[0x20];
u8 reserved_at_a0[0x2];
u8 utc_nsec[0x1e];
u8 time_adjustment[0x20];
};
struct mlx5_ifc_pcam_enhanced_features_bits {
u8 reserved_at_0[0x68];
u8 fec_50G_per_lane_in_pplm[0x1];
......@@ -9161,7 +9239,9 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x6e];
u8 reserved_at_0[0x6b];
u8 ptpcyc2realtime_modify[0x1];
u8 reserved_at_6c[0x2];
u8 pci_status_and_power[0x1];
u8 reserved_at_6f[0x5];
u8 mark_tx_action_cnp[0x1];
......@@ -9184,7 +9264,8 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_95_to_87[0x9];
u8 mpegc[0x1];
u8 regs_85_to_68[0x12];
u8 mtutc[0x1];
u8 regs_84_to_68[0x11];
u8 tracer_registers[0x4];
u8 regs_63_to_32[0x20];
......@@ -9917,6 +9998,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcda_reg_bits mcda_reg;
struct mlx5_ifc_mirc_reg_bits mirc_reg;
struct mlx5_ifc_mfrl_reg_bits mfrl_reg;
struct mlx5_ifc_mtutc_reg_bits mtutc_reg;
u8 reserved_at_0[0x60e0];
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment