Commit 44a8535f authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-cn10k-ptp'

From: Naveen Mamindlapalli <naveenm@marvell.com>
To: <kuba@kernel.org>, <davem@davemloft.net>, <edumazet@google.com>,
	<pabeni@redhat.com>, <richardcochran@gmail.com>,
	<netdev@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<sgoutham@marvell.com>, <hkelam@marvell.com>
Cc: Naveen Mamindlapalli <naveenm@marvell.com>
Subject: [net-next PATCH 0/4] Add PTP support for CN10K silicon
Date: Sat, 10 Sep 2022 13:24:12 +0530	[thread overview]
Message-ID: <20220910075416.22887-1-naveenm@marvell.com> (raw)

This patchset adds PTP support for CN10K silicon, specifically
to workaround few hardware issues and to add 1-step mode.

Patchset overview:

Patch #1 returns correct ptp timestamp in nanoseconds captured
         when external timestamp event occurs.

Patch #2 adds 1-step mode support.

Patch #3 implements software workaround to generate PPS output properly.

Patch #4 provides a software workaround for the rollover register default
         value, which causes ptp to return the wrong timestamp.
====================
Acked-by: default avatarRichard Cochran <richardcochran@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5947b7f7 85a5f963
......@@ -1471,6 +1471,7 @@ enum ptp_op {
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
PTP_OP_EXTTS_ON = 4,
};
struct ptp_req {
......@@ -1478,6 +1479,7 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
int extts_on;
};
struct ptp_rsp {
......
......@@ -9,6 +9,8 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include "ptp.h"
#include "mbox.h"
......@@ -50,12 +52,23 @@
#define PTP_CLOCK_COMP 0xF18ULL
#define PTP_TIMESTAMP 0xF20ULL
#define PTP_CLOCK_SEC 0xFD0ULL
#define PTP_SEC_ROLLOVER 0xFD8ULL
#define CYCLE_MULT 1000
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
static bool is_ptp_dev_cnf10kb(struct ptp *ptp)
{
return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false;
}
static bool is_ptp_dev_cn10k(struct ptp *ptp)
{
return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false;
}
static bool cn10k_ptp_errata(struct ptp *ptp)
{
if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
......@@ -72,6 +85,43 @@ static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
return false;
}
static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
{
struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
ktime_t curr_ts = ktime_get();
ktime_t delta_ns, period_ns;
u64 ptp_clock_hi;
/* calculate the elapsed time since last restart */
delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
/* if the ptp clock value has crossed 0.5 seconds,
* its too late to update pps threshold value, so
* update threshold after 1 second.
*/
ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
if (ptp_clock_hi > 500000000) {
period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
} else {
writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
}
hrtimer_forward_now(hrtimer, period_ns);
ptp->last_ts = curr_ts;
return HRTIMER_RESTART;
}
static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
{
ktime_t period_ns;
period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
ptp->last_ts = ktime_get();
}
static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
{
u64 sec, sec1, nsec;
......@@ -246,6 +296,10 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* sclk is in MHz */
ptp->clock_rate = sclk * 1000000;
/* Program the seconds rollover value to 1 second */
if (is_ptp_dev_cnf10kb(ptp))
writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
/* Enable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
......@@ -270,6 +324,18 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* Set 50% duty cycle for 1Hz output */
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
if (cn10k_ptp_errata(ptp)) {
/* The ptp_clock_hi rollsover to zero once clock cycle before it
* reaches one second boundary. so, program the pps_lo_incr in
* such a way that the pps threshold value comparison at one
* second boundary will succeed and pps edge changes. After each
* one second boundary, the hrtimer handler will be invoked and
* reprograms the pps threshold value.
*/
ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
ptp->reg_base + PTP_PPS_LO_INCR);
}
if (cn10k_ptp_errata(ptp))
clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
......@@ -282,18 +348,43 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
{
u64 timestamp;
if (is_ptp_dev_cn10k(ptp)) {
timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
*clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
} else {
*clk = readq(ptp->reg_base + PTP_TIMESTAMP);
}
return 0;
}
static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
{
if (!cn10k_ptp_errata(ptp))
writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
return 0;
}
static int ptp_extts_on(struct ptp *ptp, int on)
{
u64 ptp_clock_hi;
if (cn10k_ptp_errata(ptp)) {
if (on) {
ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
} else {
if (hrtimer_active(&ptp->hrtimer))
hrtimer_cancel(&ptp->hrtimer);
}
}
return 0;
}
static int ptp_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
......@@ -329,6 +420,11 @@ static int ptp_probe(struct pci_dev *pdev,
else
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
if (cn10k_ptp_errata(ptp)) {
hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ptp->hrtimer.function = ptp_reset_thresh;
}
return 0;
error_free:
......@@ -353,6 +449,9 @@ static void ptp_remove(struct pci_dev *pdev)
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
hrtimer_cancel(&ptp->hrtimer);
if (IS_ERR_OR_NULL(ptp))
return;
......@@ -420,6 +519,9 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
case PTP_OP_EXTTS_ON:
err = ptp_extts_on(rvu->ptp, req->extts_on);
break;
default:
err = -EINVAL;
break;
......
......@@ -17,7 +17,10 @@ struct ptp {
void __iomem *reg_base;
u64 (*read_ptp_tstmp)(struct ptp *ptp);
spinlock_t ptp_lock; /* lock */
struct hrtimer hrtimer;
ktime_t last_ts;
u32 clock_rate;
u32 clock_period;
};
struct ptp *ptp_get(void);
......
......@@ -415,11 +415,26 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
return;
cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
if (enable)
if (enable) {
cfg |= RPMX_RX_TS_PREPEND;
else
cfg |= RPMX_TX_PTP_1S_SUPPORT;
} else {
cfg &= ~RPMX_RX_TS_PREPEND;
cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
}
rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
if (enable) {
cfg |= RPMX_ONESTEP_ENABLE;
cfg &= ~RPMX_TS_BINARY_MODE;
} else {
cfg &= ~RPMX_ONESTEP_ENABLE;
}
rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
}
int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
......
......@@ -16,6 +16,7 @@
/* Registers */
#define RPMX_CMRX_CFG 0x00
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
......@@ -72,6 +73,10 @@
#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
#define RPM_DEFAULT_PAUSE_TIME 0x7FF
#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
#define RPMX_TS_BINARY_MODE BIT_ULL(11)
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
......
......@@ -4296,8 +4296,14 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
cfg |= 1ULL;
if (!is_rvu_otx2(rvu))
cfg |= NIX_PTP_1STEP_EN;
rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
if (is_block_implemented(hw, blkaddr)) {
err = nix_setup_txschq(rvu, nix_hw, blkaddr);
......
......@@ -266,6 +266,7 @@
#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
#define NIX_AF_SEB_CFG (0x05F0)
#define NIX_PTP_1STEP_EN BIT_ULL(2)
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
......
......@@ -243,6 +243,7 @@ struct otx2_hw {
#define CN10K_MBOX 1
#define CN10K_LMTST 2
#define CN10K_RPM 3
#define CN10K_PTP_ONESTEP 4
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
......@@ -276,6 +277,13 @@ struct refill_work {
struct otx2_nic *pf;
};
/* PTPv2 originTimestamp structure */
struct ptpv2_tstamp {
__be16 seconds_msb; /* 16 bits + */
__be32 seconds_lsb; /* 32 bits = 48 bits*/
__be32 nanoseconds;
} __packed;
struct otx2_ptp {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
......@@ -291,6 +299,9 @@ struct otx2_ptp {
struct ptp_pin_desc extts_config;
u64 (*convert_rx_ptp_tstmp)(u64 timestamp);
u64 (*convert_tx_ptp_tstmp)(u64 timestamp);
struct delayed_work synctstamp_work;
u64 tstamp;
u32 base_ns;
};
#define OTX2_HW_TIMESTAMP_LEN 8
......@@ -363,6 +374,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
u64 flags;
u64 *cq_op_addr;
......@@ -494,6 +506,7 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_MBOX, &hw->cap_flag);
__set_bit(CN10K_LMTST, &hw->cap_flag);
__set_bit(CN10K_RPM, &hw->cap_flag);
__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
}
}
......
......@@ -963,10 +963,12 @@ static int otx2_get_ts_info(struct net_device *netdev,
info->phc_index = otx2_ptp_clock_index(pfvf);
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
if (test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
info->tx_types |= BIT(HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
BIT(HWTSTAMP_FILTER_ALL);
return 0;
}
......
......@@ -2038,8 +2038,19 @@ int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
if (pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)
pfvf->flags &= ~OTX2_FLAG_PTP_ONESTEP_SYNC;
cancel_delayed_work(&pfvf->ptp->synctstamp_work);
otx2_config_hw_tx_tstamp(pfvf, false);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
if (!test_bit(CN10K_PTP_ONESTEP, &pfvf->hw.cap_flag))
return -ERANGE;
pfvf->flags |= OTX2_FLAG_PTP_ONESTEP_SYNC;
schedule_delayed_work(&pfvf->ptp->synctstamp_work,
msecs_to_jiffies(500));
fallthrough;
case HWTSTAMP_TX_ON:
otx2_config_hw_tx_tstamp(pfvf, true);
break;
......
......@@ -10,6 +10,33 @@
#include "otx2_common.h"
#include "otx2_ptp.h"
static u64 otx2_ptp_get_clock(struct otx2_ptp *ptp)
{
struct ptp_req *req;
struct ptp_rsp *rsp;
int err;
if (!ptp->nic)
return 0;
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req)
return 0;
req->op = PTP_OP_GET_CLOCK;
err = otx2_sync_mbox_msg(&ptp->nic->mbox);
if (err)
return 0;
rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
&req->hdr);
if (IS_ERR(rsp))
return 0;
return rsp->clk;
}
static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
......@@ -46,32 +73,28 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
static u64 ptp_cc_read(const struct cyclecounter *cc)
static int ptp_extts_on(struct otx2_ptp *ptp, int on)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
struct ptp_req *req;
struct ptp_rsp *rsp;
int err;
if (!ptp->nic)
return 0;
return -ENODEV;
req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
if (!req)
return 0;
return -ENOMEM;
req->op = PTP_OP_GET_CLOCK;
req->op = PTP_OP_EXTTS_ON;
req->extts_on = on;
err = otx2_sync_mbox_msg(&ptp->nic->mbox);
if (err)
return 0;
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
&req->hdr);
if (IS_ERR(rsp))
return 0;
static u64 ptp_cc_read(const struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
return rsp->clk;
return otx2_ptp_get_clock(ptp);
}
static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
......@@ -101,6 +124,15 @@ static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
return rsp->clk;
}
static void otx2_get_ptpclock(struct otx2_ptp *ptp, u64 *tstamp)
{
struct otx2_nic *pfvf = ptp->nic;
mutex_lock(&pfvf->mbox.lock);
*tstamp = timecounter_read(&ptp->time_counter);
mutex_unlock(&pfvf->mbox.lock);
}
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
......@@ -119,14 +151,10 @@ static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
struct otx2_nic *pfvf = ptp->nic;
u64 nsec;
mutex_lock(&pfvf->mbox.lock);
nsec = timecounter_read(&ptp->time_counter);
mutex_unlock(&pfvf->mbox.lock);
u64 tstamp;
*ts = ns_to_timespec64(nsec);
otx2_get_ptpclock(ptp, &tstamp);
*ts = ns_to_timespec64(tstamp);
return 0;
}
......@@ -178,8 +206,6 @@ static void otx2_ptp_extts_check(struct work_struct *work)
event.index = 0;
event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
ptp_clock_event(ptp->ptp_clock, &event);
ptp->last_extts = tstmp;
new_thresh = tstmp % 500000000;
if (ptp->thresh != new_thresh) {
mutex_lock(&ptp->nic->mbox.lock);
......@@ -187,10 +213,28 @@ static void otx2_ptp_extts_check(struct work_struct *work)
mutex_unlock(&ptp->nic->mbox.lock);
ptp->thresh = new_thresh;
}
ptp->last_extts = tstmp;
}
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
}
static void otx2_sync_tstamp(struct work_struct *work)
{
struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
synctstamp_work.work);
struct otx2_nic *pfvf = ptp->nic;
u64 tstamp;
mutex_lock(&pfvf->mbox.lock);
tstamp = otx2_ptp_get_clock(ptp);
mutex_unlock(&pfvf->mbox.lock);
ptp->tstamp = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
ptp->base_ns = tstamp % NSEC_PER_SEC;
schedule_delayed_work(&ptp->synctstamp_work, msecs_to_jiffies(250));
}
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
......@@ -207,10 +251,13 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
if (on)
if (on) {
ptp_extts_on(ptp, on);
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
else
} else {
ptp_extts_on(ptp, on);
cancel_delayed_work_sync(&ptp->extts_work);
}
return 0;
default:
break;
......@@ -302,6 +349,8 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
ptp_ptr->convert_tx_ptp_tstmp = &cn10k_ptp_convert_timestamp;
}
INIT_DELAYED_WORK(&ptp_ptr->synctstamp_work, otx2_sync_tstamp);
pfvf->ptp = ptp_ptr;
error:
......@@ -316,6 +365,8 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
if (!ptp)
return;
cancel_delayed_work(&pfvf->ptp->synctstamp_work);
ptp_clock_unregister(ptp->ptp_clock);
kfree(ptp);
pfvf->ptp = NULL;
......
......@@ -236,8 +236,15 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
u64 offset : 16; /* W0 */
u64 rsvd_51_16 : 36;
u64 start_offset : 8;
u64 rsvd_11_8 : 4;
u64 rsvd_12 : 1;
u64 udp_csum_crt : 1;
u64 update64 : 1;
u64 rsvd_15_16 : 1;
u64 base_ns : 32;
u64 step_type : 1;
u64 rsvd_51_49 : 3;
u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
......
......@@ -19,6 +19,12 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
#define PTP_PORT 0x13F
/* PTPv2 header Original Timestamp starts at byte offset 34 and
* contains 6 byte seconds field and 4 byte nano seconds field.
*/
#define PTP_SYNC_SEC_OFFSET 34
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
......@@ -686,7 +692,8 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
}
static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
int alg, u64 iova)
int alg, u64 iova, int ptp_offset,
u64 base_ns, int udp_csum)
{
struct nix_sqe_mem_s *mem;
......@@ -696,6 +703,13 @@ static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
mem->wmem = 1; /* wait for the memory operation */
mem->addr = iova;
if (ptp_offset) {
mem->start_offset = ptp_offset;
mem->udp_csum_crt = udp_csum;
mem->base_ns = base_ns;
mem->step_type = 1;
}
*offset += sizeof(*mem);
}
......@@ -952,16 +966,102 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
static bool otx2_validate_network_transport(struct sk_buff *skb)
{
if ((ip_hdr(skb)->protocol == IPPROTO_UDP) ||
(ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) {
struct udphdr *udph = udp_hdr(skb);
if (udph->source == htons(PTP_PORT) &&
udph->dest == htons(PTP_PORT))
return true;
}
return false;
}
static bool otx2_ptp_is_sync(struct sk_buff *skb, int *offset, int *udp_csum)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
u16 nix_offload_hlen = 0, inner_vhlen = 0;
u8 *data = skb->data, *msgtype;
__be16 proto = eth->h_proto;
int network_depth = 0;
/* NIX is programmed to offload outer VLAN header
* in case of single vlan protocol field holds Network header ETH_IP/V6
* in case of stacked vlan protocol field holds Inner vlan (8100)
*/
if (skb->dev->features & NETIF_F_HW_VLAN_CTAG_TX &&
skb->dev->features & NETIF_F_HW_VLAN_STAG_TX) {
if (skb->vlan_proto == htons(ETH_P_8021AD)) {
/* Get vlan protocol */
proto = __vlan_get_protocol(skb, eth->h_proto, NULL);
/* SKB APIs like skb_transport_offset does not include
* offloaded vlan header length. Need to explicitly add
* the length
*/
nix_offload_hlen = VLAN_HLEN;
inner_vhlen = VLAN_HLEN;
} else if (skb->vlan_proto == htons(ETH_P_8021Q)) {
nix_offload_hlen = VLAN_HLEN;
}
} else if (eth_type_vlan(eth->h_proto)) {
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
}
switch (ntohs(proto)) {
case ETH_P_1588:
if (network_depth)
*offset = network_depth;
else
*offset = ETH_HLEN + nix_offload_hlen +
inner_vhlen;
break;
case ETH_P_IP:
case ETH_P_IPV6:
if (!otx2_validate_network_transport(skb))
return false;
*udp_csum = 1;
*offset = nix_offload_hlen + skb_transport_offset(skb) +
sizeof(struct udphdr);
}
msgtype = data + *offset;
/* Check PTP messageId is SYNC or not */
return (*msgtype & 0xf) == 0;
}
static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
struct otx2_snd_queue *sq, int *offset)
{
struct ptpv2_tstamp *origin_tstamp;
int ptp_offset = 0, udp_csum = 0;
struct timespec64 ts;
u64 iova;
if (!skb_shinfo(skb)->gso_size &&
skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
if (unlikely(!skb_shinfo(skb)->gso_size &&
(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) {
if (unlikely(pfvf->flags & OTX2_FLAG_PTP_ONESTEP_SYNC)) {
if (otx2_ptp_is_sync(skb, &ptp_offset, &udp_csum)) {
origin_tstamp = (struct ptpv2_tstamp *)
((u8 *)skb->data + ptp_offset +
PTP_SYNC_SEC_OFFSET);
ts = ns_to_timespec64(pfvf->ptp->tstamp);
origin_tstamp->seconds_msb = htons((ts.tv_sec >> 32) & 0xffff);
origin_tstamp->seconds_lsb = htonl(ts.tv_sec & 0xffffffff);
origin_tstamp->nanoseconds = htonl(ts.tv_nsec);
/* Point to correction field in PTP packet */
ptp_offset += 8;
}
} else {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
iova = sq->timestamps->iova + (sq->head * sizeof(u64));
otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova,
ptp_offset, pfvf->ptp->base_ns, udp_csum);
} else {
skb_tx_timestamp(skb);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment