Commit 63c7234f authored by Jakub Kicinski's avatar Jakub Kicinski

Revert "octeon_ep_vf: add octeon_ep_vf driver"

This reverts commit c902ba32.
This reverts commit 50648968.
This reverts commit 77cef1e0.
This reverts commit 8f8d322b.
This reverts commit 6ca7b548.
This reverts commit db468f92.
This reverts commit 5f8c64c2.
This reverts commit ebdc193b.

The driver needs more work.
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 5e5401d6
...@@ -42,7 +42,6 @@ Contents: ...@@ -42,7 +42,6 @@ Contents:
intel/ice intel/ice
marvell/octeontx2 marvell/octeontx2
marvell/octeon_ep marvell/octeon_ep
marvell/octeon_ep_vf
mellanox/mlx5/index mellanox/mlx5/index
microsoft/netvsc microsoft/netvsc
neterion/s2io neterion/s2io
......
.. SPDX-License-Identifier: GPL-2.0+
=======================================================================
Linux kernel networking driver for Marvell's Octeon PCI Endpoint NIC VF
=======================================================================
Network driver for Marvell's Octeon PCI EndPoint NIC VF.
Copyright (c) 2020 Marvell International Ltd.
Overview
========
This driver implements networking functionality of Marvell's Octeon PCI
EndPoint NIC VF.
Supported Devices
=================
Currently, this driver support following devices:
* Network controller: Cavium, Inc. Device b203
* Network controller: Cavium, Inc. Device b403
* Network controller: Cavium, Inc. Device b103
* Network controller: Cavium, Inc. Device b903
* Network controller: Cavium, Inc. Device ba03
* Network controller: Cavium, Inc. Device bc03
* Network controller: Cavium, Inc. Device bd03
...@@ -12861,15 +12861,6 @@ L: netdev@vger.kernel.org ...@@ -12861,15 +12861,6 @@ L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/marvell/octeon_ep F: drivers/net/ethernet/marvell/octeon_ep
MARVELL OCTEON ENDPOINT VF DRIVER
M: Veerasenareddy Burru <vburru@marvell.com>
M: Sathesh Edara <sedara@marvell.com>
M: Shinas Rasheed <srasheed@marvell.com>
M: Satananda Burla <sburla@marvell.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/marvell/octeon_ep_vf
MARVELL OCTEONTX2 PHYSICAL FUNCTION DRIVER MARVELL OCTEONTX2 PHYSICAL FUNCTION DRIVER
M: Sunil Goutham <sgoutham@marvell.com> M: Sunil Goutham <sgoutham@marvell.com>
M: Geetha sowjanya <gakula@marvell.com> M: Geetha sowjanya <gakula@marvell.com>
......
...@@ -180,7 +180,6 @@ config SKY2_DEBUG ...@@ -180,7 +180,6 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig" source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
source "drivers/net/ethernet/marvell/octeon_ep/Kconfig" source "drivers/net/ethernet/marvell/octeon_ep/Kconfig"
source "drivers/net/ethernet/marvell/octeon_ep_vf/Kconfig"
source "drivers/net/ethernet/marvell/prestera/Kconfig" source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL endif # NET_VENDOR_MARVELL
...@@ -12,6 +12,5 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o ...@@ -12,6 +12,5 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeon_ep/ obj-y += octeon_ep/
obj-y += octeon_ep_vf/
obj-y += octeontx2/ obj-y += octeontx2/
obj-y += prestera/ obj-y += prestera/
# SPDX-License-Identifier: GPL-2.0-only
#
# Marvell's Octeon PCI Endpoint NIC VF Driver Configuration
#
config OCTEON_EP_VF
tristate "Marvell Octeon PCI Endpoint NIC VF Driver"
depends on 64BIT
depends on PCI
help
This driver supports networking functionality of Marvell's
Octeon PCI Endpoint NIC VF.
To know the list of devices supported by this driver, refer
documentation in
<file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep_vf.rst>.
To compile this drivers as a module, choose M here. Name of the
module is octeon_ep_vf.
# SPDX-License-Identifier: GPL-2.0
#
# Network driver for Marvell's Octeon PCI Endpoint NIC VF
#
obj-$(CONFIG_OCTEON_EP_VF) += octeon_ep_vf.o
octeon_ep_vf-y := octep_vf_main.o octep_vf_cn9k.o octep_vf_cnxk.o \
octep_vf_tx.o octep_vf_rx.o octep_vf_mbox.o \
octep_vf_ethtool.o
// SPDX-License-Identifier: GPL-2.0
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
#include "octep_vf_regs_cn9k.h"
/* Dump useful hardware IQ/OQ CSRs for debug purpose */
static void cn93_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
{
struct device *dev = &oct->pdev->dev;
dev_info(dev, "IQ-%d register dump\n", qno);
dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_IN_INSTR_DBELL(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(qno)));
dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_IN_CONTROL(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(qno)));
dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_IN_ENABLE(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(qno)));
dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_IN_INSTR_BADDR(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(qno)));
dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(qno)));
dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_IN_CNTS(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(qno)));
dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_IN_INT_LEVELS(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(qno)));
dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_IN_PKT_CNT(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(qno)));
dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_IN_BYTE_CNT(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(qno)));
dev_info(dev, "OQ-%d register dump\n", qno);
dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(qno)));
dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_OUT_CONTROL(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(qno)));
dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_OUT_ENABLE(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(qno)));
dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(qno)));
dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_OUT_CNTS(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CNTS(qno)));
dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_OUT_INT_LEVELS(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(qno)));
dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_OUT_PKT_CNT(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(qno)));
dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
qno, CN93_VF_SDP_R_OUT_BYTE_CNT(qno),
octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_BYTE_CNT(qno)));
}
/* Reset Hardware Tx queue */
static int cn93_vf_reset_iq(struct octep_vf_device *oct, int q_no)
{
u64 val = 0ULL;
dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
/* Disable the Tx/Instruction Ring */
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(q_no), val);
/* clear the Instruction Ring packet/byte counts and doorbell CSRs */
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q_no), val);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_PKT_CNT(q_no), val);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_BYTE_CNT(q_no), val);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
val = 0xFFFFFFFF;
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no));
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CNTS(q_no), val & 0xFFFFFFFF);
return 0;
}
/* Reset Hardware Rx queue */
static void cn93_vf_reset_oq(struct octep_vf_device *oct, int q_no)
{
u64 val = 0ULL;
/* Disable Output (Rx) Ring */
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(q_no), val);
/* Clear count CSRs */
val = octep_vf_read_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no));
octep_vf_write_csr(oct, CN93_VF_SDP_R_OUT_CNTS(q_no), val);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
}
/* Reset all hardware Tx/Rx queues */
static void octep_vf_reset_io_queues_cn93(struct octep_vf_device *oct)
{
struct pci_dev *pdev = oct->pdev;
int q;
dev_dbg(&pdev->dev, "Reset OCTEP_CN93 VF IO Queues\n");
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
cn93_vf_reset_iq(oct, q);
cn93_vf_reset_oq(oct, q);
}
}
/* Initialize configuration limits and initial active config */
static void octep_vf_init_config_cn93_vf(struct octep_vf_device *oct)
{
struct octep_vf_config *conf = oct->conf;
u64 reg_val;
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(0));
conf->ring_cfg.max_io_rings = (reg_val >> CN93_VF_R_IN_CTL_RPVF_POS) &
CN93_VF_R_IN_CTL_RPVF_MASK;
conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
conf->iq.db_min = OCTEP_VF_DB_MIN;
conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
}
/* Setup registers for a hardware Tx Queue */
static void octep_vf_setup_iq_regs_cn93(struct octep_vf_device *oct, int iq_no)
{
struct octep_vf_iq *iq = oct->iq[iq_no];
u32 reset_instr_cnt;
u64 reg_val;
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
/* wait for IDLE to set to 1 */
if (!(reg_val & CN93_VF_R_IN_CTL_IDLE)) {
do {
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no));
} while (!(reg_val & CN93_VF_R_IN_CTL_IDLE));
}
reg_val |= CN93_VF_R_IN_CTL_RDSIZE;
reg_val |= CN93_VF_R_IN_CTL_IS_64B;
reg_val |= CN93_VF_R_IN_CTL_ESR;
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
/* Write the start of the input queue's ring and its size */
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
/* Remember the doorbell & instruction count register addr for this queue */
iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no);
iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no);
iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no);
/* Store the current instruction counter (used in flush_iq calculation) */
reset_instr_cnt = readl(iq->inst_cnt_reg);
writel(reset_instr_cnt, iq->inst_cnt_reg);
/* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
}
/* Setup registers for a hardware Rx Queue */
static void octep_vf_setup_oq_regs_cn93(struct octep_vf_device *oct, int oq_no)
{
struct octep_vf_oq *oq = oct->oq[oq_no];
u32 time_threshold = 0;
u64 oq_ctl = 0ULL;
u64 reg_val;
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
/* wait for IDLE to set to 1 */
if (!(reg_val & CN93_VF_R_OUT_CTL_IDLE)) {
do {
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
} while (!(reg_val & CN93_VF_R_OUT_CTL_IDLE));
}
reg_val &= ~(CN93_VF_R_OUT_CTL_IMODE);
reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_P);
reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_P);
reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_I);
reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_I);
reg_val &= ~(CN93_VF_R_OUT_CTL_ES_I);
reg_val &= ~(CN93_VF_R_OUT_CTL_ROR_D);
reg_val &= ~(CN93_VF_R_OUT_CTL_NSR_D);
reg_val &= ~(CN93_VF_R_OUT_CTL_ES_D);
reg_val |= (CN93_VF_R_OUT_CTL_ES_P);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
oq_ctl = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no));
oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0)
oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0)
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
oq->pkts_sent_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_CNTS(oq_no);
oq->pkts_credit_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
}
/* Setup registers for a VF mailbox */
static void octep_vf_setup_mbox_regs_cn93(struct octep_vf_device *oct, int q_no)
{
struct octep_vf_mbox *mbox = oct->mbox;
/* PF to VF DATA reg. VF reads from this reg */
mbox->mbox_read_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
/* VF mbox interrupt reg */
mbox->mbox_int_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_PF_VF_INT(q_no);
/* VF to PF DATA reg. VF writes into this reg */
mbox->mbox_write_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
}
/* Mailbox Interrupt handler */
static void cn93_handle_vf_mbox_intr(struct octep_vf_device *oct)
{
if (oct->mbox)
schedule_work(&oct->mbox->wk.work);
else
dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
}
/* Tx/Rx queue interrupt handler */
static irqreturn_t octep_vf_ioq_intr_handler_cn93(void *data)
{
struct octep_vf_ioq_vector *vector = (struct octep_vf_ioq_vector *)data;
struct octep_vf_oq *oq = vector->oq;
struct octep_vf_device *oct = vector->octep_vf_dev;
u64 reg_val = 0ULL;
/* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
if (oq->q_no == 0) {
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0));
if (reg_val & CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
cn93_handle_vf_mbox_intr(oct);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
}
}
napi_schedule_irqoff(oq->napi);
return IRQ_HANDLED;
}
/* Re-initialize Octeon hardware registers */
static void octep_vf_reinit_regs_cn93(struct octep_vf_device *oct)
{
u32 i;
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
oct->hw_ops.setup_iq_regs(oct, i);
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
oct->hw_ops.setup_oq_regs(oct, i);
oct->hw_ops.enable_interrupts(oct);
oct->hw_ops.enable_io_queues(oct);
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
}
/* Enable all interrupts */
static void octep_vf_enable_interrupts_cn93(struct octep_vf_device *oct)
{
int num_rings, q;
u64 reg_val;
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
for (q = 0; q < num_rings; q++) {
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
reg_val |= (0x1ULL << 62);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
reg_val |= (0x1ULL << 62);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
}
/* Enable PF to VF mbox interrupt by setting 2nd bit*/
octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0),
CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
}
/* Disable all interrupts */
static void octep_vf_disable_interrupts_cn93(struct octep_vf_device *oct)
{
int num_rings, q;
u64 reg_val;
/* Disable PF to VF mbox interrupt by setting 2nd bit*/
if (oct->mbox)
octep_vf_write_csr64(oct, CN93_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
for (q = 0; q < num_rings; q++) {
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q));
reg_val &= ~(0x1ULL << 62);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q));
reg_val &= ~(0x1ULL << 62);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
}
}
/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
static u32 octep_vf_update_iq_read_index_cn93(struct octep_vf_iq *iq)
{
u32 pkt_in_done = readl(iq->inst_cnt_reg);
u32 last_done, new_idx;
last_done = pkt_in_done - iq->pkt_in_done;
iq->pkt_in_done = pkt_in_done;
new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
return new_idx;
}
/* Enable a hardware Tx Queue */
static void octep_vf_enable_iq_cn93(struct octep_vf_device *oct, int iq_no)
{
u64 loop = HZ;
u64 reg_val;
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
while (octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
loop--) {
schedule_timeout_interruptible(1);
}
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no));
reg_val |= (0x1ULL << 62);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
reg_val |= 0x1ULL;
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
}
/* Enable a hardware Rx Queue */
static void octep_vf_enable_oq_cn93(struct octep_vf_device *oct, int oq_no)
{
u64 reg_val = 0ULL;
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no));
reg_val |= (0x1ULL << 62);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
reg_val |= 0x1ULL;
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
}
/* Enable all hardware Tx/Rx Queues assigned to VF */
static void octep_vf_enable_io_queues_cn93(struct octep_vf_device *oct)
{
u8 q;
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
octep_vf_enable_iq_cn93(oct, q);
octep_vf_enable_oq_cn93(oct, q);
}
}
/* Disable a hardware Tx Queue assigned to VF */
static void octep_vf_disable_iq_cn93(struct octep_vf_device *oct, int iq_no)
{
u64 reg_val = 0ULL;
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no));
reg_val &= ~0x1ULL;
octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
}
/* Disable a hardware Rx Queue assigned to VF */
static void octep_vf_disable_oq_cn93(struct octep_vf_device *oct, int oq_no)
{
u64 reg_val = 0ULL;
reg_val = octep_vf_read_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no));
reg_val &= ~0x1ULL;
octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
}
/* Disable all hardware Tx/Rx Queues assigned to VF */
static void octep_vf_disable_io_queues_cn93(struct octep_vf_device *oct)
{
int q = 0;
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
octep_vf_disable_iq_cn93(oct, q);
octep_vf_disable_oq_cn93(oct, q);
}
}
/* Dump hardware registers (including Tx/Rx queues) for debugging. */
static void octep_vf_dump_registers_cn93(struct octep_vf_device *oct)
{
u8 num_rings, q;
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
for (q = 0; q < num_rings; q++)
cn93_vf_dump_q_regs(oct, q);
}
/**
* octep_vf_device_setup_cn93() - Setup Octeon device.
*
* @oct: Octeon device private data structure.
*
* - initialize hardware operations.
* - get target side pcie port number for the device.
* - set initial configuration and max limits.
*/
void octep_vf_device_setup_cn93(struct octep_vf_device *oct)
{
oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cn93;
oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cn93;
oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cn93;
oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cn93;
oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cn93;
oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cn93;
oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cn93;
oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cn93;
oct->hw_ops.enable_iq = octep_vf_enable_iq_cn93;
oct->hw_ops.enable_oq = octep_vf_enable_oq_cn93;
oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cn93;
oct->hw_ops.disable_iq = octep_vf_disable_iq_cn93;
oct->hw_ops.disable_oq = octep_vf_disable_oq_cn93;
oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cn93;
oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cn93;
oct->hw_ops.dump_registers = octep_vf_dump_registers_cn93;
octep_vf_init_config_cn93_vf(oct);
}
// SPDX-License-Identifier: GPL-2.0
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
#include "octep_vf_regs_cnxk.h"
/* Dump useful hardware IQ/OQ CSRs for debug purpose */
static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno)
{
struct device *dev = &oct->pdev->dev;
dev_info(dev, "IQ-%d register dump\n", qno);
dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno)));
dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_IN_CONTROL(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno)));
dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_IN_ENABLE(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno)));
dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno)));
dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno)));
dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_IN_CNTS(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(qno)));
dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_IN_INT_LEVELS(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(qno)));
dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_IN_PKT_CNT(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(qno)));
dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_IN_BYTE_CNT(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(qno)));
dev_info(dev, "OQ-%d register dump\n", qno);
dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno)));
dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_OUT_CONTROL(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(qno)));
dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_OUT_ENABLE(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(qno)));
dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno)));
dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno)));
dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_OUT_CNTS(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CNTS(qno)));
dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno)));
dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_OUT_PKT_CNT(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(qno)));
dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno)));
dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
qno, CNXK_VF_SDP_R_ERR_TYPE(qno),
octep_vf_read_csr64(oct, CNXK_VF_SDP_R_ERR_TYPE(qno)));
}
/* Reset Hardware Tx queue */
static int cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no)
{
u64 val = 0ULL;
dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no);
/* Disable the Tx/Instruction Ring */
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val);
/* clear the Instruction Ring packet/byte counts and doorbell CSRs */
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val);
val = 0xFFFFFFFF;
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val);
val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no));
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no), val & 0xFFFFFFFF);
return 0;
}
/* Reset Hardware Rx queue */
static void cnxk_vf_reset_oq(struct octep_vf_device *oct, int q_no)
{
u64 val = 0ULL;
/* Disable Output (Rx) Ring */
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(q_no), val);
/* Clear count CSRs */
val = octep_vf_read_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no));
octep_vf_write_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no), val);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
}
/* Reset all hardware Tx/Rx queues */
static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct)
{
struct pci_dev *pdev = oct->pdev;
int q;
dev_dbg(&pdev->dev, "Reset OCTEP_CNXK VF IO Queues\n");
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
cnxk_vf_reset_iq(oct, q);
cnxk_vf_reset_oq(oct, q);
}
}
/* Initialize configuration limits and initial active config */
static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct)
{
struct octep_vf_config *conf = oct->conf;
u64 reg_val;
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0));
conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) &
CNXK_VF_R_IN_CTL_RPVF_MASK;
conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings;
conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS;
conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR;
conf->iq.db_min = OCTEP_VF_DB_MIN;
conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD;
conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS;
conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE;
conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD;
conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD;
conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD;
conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN;
conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings;
}
/* Setup registers for a hardware Tx Queue */
static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no)
{
struct octep_vf_iq *iq = oct->iq[iq_no];
u32 reset_instr_cnt;
u64 reg_val;
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
/* wait for IDLE to set to 1 */
if (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)) {
do {
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no));
} while (!(reg_val & CNXK_VF_R_IN_CTL_IDLE));
}
reg_val |= CNXK_VF_R_IN_CTL_RDSIZE;
reg_val |= CNXK_VF_R_IN_CTL_IS_64B;
reg_val |= CNXK_VF_R_IN_CTL_ESR;
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no), reg_val);
/* Write the start of the input queue's ring and its size */
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count);
/* Remember the doorbell & instruction count register addr for this queue */
iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no);
iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no);
iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no);
/* Store the current instruction counter (used in flush_iq calculation) */
reset_instr_cnt = readl(iq->inst_cnt_reg);
writel(reset_instr_cnt, iq->inst_cnt_reg);
/* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
}
/* Setup registers for a hardware Rx Queue */
static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no)
{
struct octep_vf_oq *oq = oct->oq[oq_no];
u32 time_threshold = 0;
u64 oq_ctl = 0ULL;
u64 reg_val;
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
/* wait for IDLE to set to 1 */
if (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)) {
do {
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
} while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE));
}
reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE);
reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P);
reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_P);
reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_I);
reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_I);
reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_I);
reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_D);
reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_D);
reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_D);
reg_val |= (CNXK_VF_R_OUT_CTL_ES_P);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count);
oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no));
/* Clear the ISIZE and BSIZE (22-0) */
oq_ctl &= ~0x7fffffULL;
/* Populate the BSIZE (15-0) */
oq_ctl |= (oq->buffer_size & 0xffff);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
/* Get the mapped address of the pkt_sent and pkts_credit regs */
oq->pkts_sent_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_CNTS(oq_no);
oq->pkts_credit_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no);
time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
/* set watermark for backpressure */
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no));
reg_val &= ~0xFFFFFFFFULL;
reg_val |= CFG_GET_OQ_WMARK(oct->conf);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val);
}
/* Setup registers for a VF mailbox */
static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no)
{
struct octep_vf_mbox *mbox = oct->mbox;
/* PF to VF DATA reg. VF reads from this reg */
mbox->mbox_read_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_DATA(q_no);
/* VF mbox interrupt reg */
mbox->mbox_int_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_INT(q_no);
/* VF to PF DATA reg. VF writes into this reg */
mbox->mbox_write_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_VF_PF_DATA(q_no);
}
/* Mailbox Interrupt handler */
static void cnxk_handle_vf_mbox_intr(struct octep_vf_device *oct)
{
if (oct->mbox)
schedule_work(&oct->mbox->wk.work);
else
dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n");
}
/* Tx/Rx queue interrupt handler */
static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data)
{
struct octep_vf_ioq_vector *vector = (struct octep_vf_ioq_vector *)data;
struct octep_vf_oq *oq = vector->oq;
struct octep_vf_device *oct = vector->octep_vf_dev;
u64 reg_val = 0ULL;
/* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */
if (oq->q_no == 0) {
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0));
if (reg_val & CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS) {
cnxk_handle_vf_mbox_intr(oct);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val);
}
}
napi_schedule_irqoff(oq->napi);
return IRQ_HANDLED;
}
/* Re-initialize Octeon hardware registers */
static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct)
{
u32 i;
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
oct->hw_ops.setup_iq_regs(oct, i);
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
oct->hw_ops.setup_oq_regs(oct, i);
oct->hw_ops.enable_interrupts(oct);
oct->hw_ops.enable_io_queues(oct);
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
}
/* Enable all interrupts */
static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct)
{
int num_rings, q;
u64 reg_val;
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
for (q = 0; q < num_rings; q++) {
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
reg_val |= (0x1ULL << 62);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
reg_val |= (0x1ULL << 62);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
}
/* Enable PF to VF mbox interrupt by setting 2nd bit*/
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0),
CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB);
}
/* Disable all interrupts */
static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct)
{
int num_rings, q;
u64 reg_val;
/* Disable PF to VF mbox interrupt by setting 2nd bit*/
if (oct->mbox)
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0);
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
for (q = 0; q < num_rings; q++) {
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q));
reg_val &= ~(0x1ULL << 62);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val);
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q));
reg_val &= ~(0x1ULL << 62);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val);
}
}
/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq)
{
u32 pkt_in_done = readl(iq->inst_cnt_reg);
u32 last_done, new_idx;
last_done = pkt_in_done - iq->pkt_in_done;
iq->pkt_in_done = pkt_in_done;
new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count;
return new_idx;
}
/* Enable a hardware Tx Queue */
static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
{
u64 loop = HZ;
u64 reg_val;
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
while (octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no)) &&
loop--) {
schedule_timeout_interruptible(1);
}
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no));
reg_val |= (0x1ULL << 62);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
reg_val |= 0x1ULL;
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
}
/* Enable a hardware Rx Queue */
static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
{
u64 reg_val = 0ULL;
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no));
reg_val |= (0x1ULL << 62);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
reg_val |= 0x1ULL;
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
}
/* Enable all hardware Tx/Rx Queues assigned to VF */
static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct)
{
u8 q;
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
octep_vf_enable_iq_cnxk(oct, q);
octep_vf_enable_oq_cnxk(oct, q);
}
}
/* Disable a hardware Tx Queue assigned to VF */
static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no)
{
u64 reg_val = 0ULL;
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no));
reg_val &= ~0x1ULL;
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val);
}
/* Disable a hardware Rx Queue assigned to VF */
static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no)
{
u64 reg_val = 0ULL;
reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no));
reg_val &= ~0x1ULL;
octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val);
}
/* Disable all hardware Tx/Rx Queues assigned to VF */
static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct)
{
int q = 0;
for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
octep_vf_disable_iq_cnxk(oct, q);
octep_vf_disable_oq_cnxk(oct, q);
}
}
/* Dump hardware registers (including Tx/Rx queues) for debugging. */
static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct)
{
u8 num_rings, q;
num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
for (q = 0; q < num_rings; q++)
cnxk_vf_dump_q_regs(oct, q);
}
/**
* octep_vf_device_setup_cnxk() - Setup Octeon device.
*
* @oct: Octeon device private data structure.
*
* - initialize hardware operations.
* - get target side pcie port number for the device.
* - set initial configuration and max limits.
*/
void octep_vf_device_setup_cnxk(struct octep_vf_device *oct)
{
oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk;
oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk;
oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk;
oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk;
oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk;
oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk;
oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk;
oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk;
oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk;
oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk;
oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk;
oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk;
oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk;
oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk;
oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk;
oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk;
octep_vf_init_config_cnxk_vf(oct);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#ifndef _OCTEP_VF_CONFIG_H_
#define _OCTEP_VF_CONFIG_H_
/* Tx instruction types by length */
#define OCTEP_VF_32BYTE_INSTR 32
#define OCTEP_VF_64BYTE_INSTR 64
/* Tx Queue: maximum descriptors per ring */
#define OCTEP_VF_IQ_MAX_DESCRIPTORS 1024
/* Minimum input (Tx) requests to be enqueued to ring doorbell */
#define OCTEP_VF_DB_MIN 8
/* Packet threshold for Tx queue interrupt */
#define OCTEP_VF_IQ_INTR_THRESHOLD 0x0
/* Minimum watermark for backpressure */
#define OCTEP_VF_OQ_WMARK_MIN 256
/* Rx Queue: maximum descriptors per ring */
#define OCTEP_VF_OQ_MAX_DESCRIPTORS 1024
/* Rx buffer size: Use page size buffers.
* Build skb from allocated page buffer once the packet is received.
* When a gathered packet is received, make head page as skb head and
* page buffers in consecutive Rx descriptors as fragments.
*/
#define OCTEP_VF_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE))
#define OCTEP_VF_OQ_PKTS_PER_INTR 128
#define OCTEP_VF_OQ_REFILL_THRESHOLD (OCTEP_VF_OQ_MAX_DESCRIPTORS / 4)
#define OCTEP_VF_OQ_INTR_PKT_THRESHOLD 1
#define OCTEP_VF_OQ_INTR_TIME_THRESHOLD 10
#define OCTEP_VF_MSIX_NAME_SIZE (IFNAMSIZ + 32)
/* Tx Queue wake threshold
* wakeup a stopped Tx queue if minimum 2 descriptors are available.
* Even a skb with fragments consume only one Tx queue descriptor entry.
*/
#define OCTEP_VF_WAKE_QUEUE_THRESHOLD 2
/* Minimum MTU supported by Octeon network interface */
#define OCTEP_VF_MIN_MTU ETH_MIN_MTU
/* Maximum MTU supported by Octeon interface*/
#define OCTEP_VF_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN))
/* Default MTU */
#define OCTEP_VF_DEFAULT_MTU 1500
/* Macros to get octeon config params */
#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
#define CFG_GET_IQ_INSTR_SIZE(cfg) (64)
#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold)
#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs)
#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size)
#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
#define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark)
#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->ring_cfg.active_io_rings)
#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->ring_cfg.max_io_rings)
#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us)
#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us)
#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix)
/* Hardware Tx Queue configuration. */
struct octep_vf_iq_config {
/* Size of the Input queue (number of commands) */
u16 num_descs;
/* Command size - 32 or 64 bytes */
u16 instr_type;
/* Minimum number of commands pending to be posted to Octeon before driver
* hits the Input queue doorbell.
*/
u16 db_min;
/* Trigger the IQ interrupt when processed cmd count reaches
* this level.
*/
u32 intr_threshold;
};
/* Hardware Rx Queue configuration. */
struct octep_vf_oq_config {
/* Size of Output queue (number of descriptors) */
u16 num_descs;
/* Size of buffer in this Output queue. */
u16 buf_size;
/* The number of buffers that were consumed during packet processing
* by the driver on this Output queue before the driver attempts to
* replenish the descriptor ring with new buffers.
*/
u16 refill_threshold;
/* Interrupt Coalescing (Packet Count). Octeon will interrupt the host
* only if it sent as many packets as specified by this field.
* The driver usually does not use packet count interrupt coalescing.
*/
u32 oq_intr_pkt;
/* Interrupt Coalescing (Time Interval). Octeon will interrupt the host
* if at least one packet was sent in the time interval specified by
* this field. The driver uses time interval interrupt coalescing by
* default. The time is specified in microseconds.
*/
u32 oq_intr_time;
/* Water mark for backpressure.
* Output queue sends backpressure signal to source when
* free buffer count falls below wmark.
*/
u32 wmark;
};
/* Tx/Rx configuration */
struct octep_vf_ring_config {
/* Max number of IOQs */
u16 max_io_rings;
/* Number of active IOQs */
u16 active_io_rings;
};
/* Octeon MSI-x config. */
struct octep_vf_msix_config {
/* Number of IOQ interrupts */
u16 ioq_msix;
};
/* Data Structure to hold configuration limits and active config */
struct octep_vf_config {
/* Input Queue attributes. */
struct octep_vf_iq_config iq;
/* Output Queue attributes. */
struct octep_vf_oq_config oq;
/* MSI-X interrupt config */
struct octep_vf_msix_config msix_cfg;
/* NIC VF ring Configuration */
struct octep_vf_ring_config ring_cfg;
};
#endif /* _OCTEP_VF_CONFIG_H_ */
// SPDX-License-Identifier: GPL-2.0
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
static const char octep_vf_gstrings_global_stats[][ETH_GSTRING_LEN] = {
"rx_packets",
"tx_packets",
"rx_bytes",
"tx_bytes",
"rx_alloc_errors",
"tx_busy_errors",
"rx_dropped",
"tx_dropped",
"tx_hw_pkts",
"tx_hw_octs",
"tx_hw_bcast",
"tx_hw_mcast",
"rx_hw_pkts",
"rx_hw_bytes",
"rx_hw_bcast",
"rx_hw_mcast",
"rx_dropped_pkts_fifo_full",
"rx_dropped_bytes_fifo_full",
"rx_err_pkts",
};
#define OCTEP_VF_GLOBAL_STATS_CNT (sizeof(octep_vf_gstrings_global_stats) / ETH_GSTRING_LEN)
static const char octep_vf_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = {
"tx_packets_posted[Q-%u]",
"tx_packets_completed[Q-%u]",
"tx_bytes[Q-%u]",
"tx_busy[Q-%u]",
};
#define OCTEP_VF_TX_Q_STATS_CNT (sizeof(octep_vf_gstrings_tx_q_stats) / ETH_GSTRING_LEN)
static const char octep_vf_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = {
"rx_packets[Q-%u]",
"rx_bytes[Q-%u]",
"rx_alloc_errors[Q-%u]",
};
#define OCTEP_VF_RX_Q_STATS_CNT (sizeof(octep_vf_gstrings_rx_q_stats) / ETH_GSTRING_LEN)
static void octep_vf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct octep_vf_device *oct = netdev_priv(netdev);
strscpy(info->driver, OCTEP_VF_DRV_NAME, sizeof(info->driver));
strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info));
}
static void octep_vf_get_strings(struct net_device *netdev,
u32 stringset, u8 *data)
{
struct octep_vf_device *oct = netdev_priv(netdev);
u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
char *strings = (char *)data;
int i, j;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < OCTEP_VF_GLOBAL_STATS_CNT; i++) {
snprintf(strings, ETH_GSTRING_LEN,
octep_vf_gstrings_global_stats[i]);
strings += ETH_GSTRING_LEN;
}
for (i = 0; i < num_queues; i++) {
for (j = 0; j < OCTEP_VF_TX_Q_STATS_CNT; j++) {
snprintf(strings, ETH_GSTRING_LEN,
octep_vf_gstrings_tx_q_stats[j], i);
strings += ETH_GSTRING_LEN;
}
}
for (i = 0; i < num_queues; i++) {
for (j = 0; j < OCTEP_VF_RX_Q_STATS_CNT; j++) {
snprintf(strings, ETH_GSTRING_LEN,
octep_vf_gstrings_rx_q_stats[j], i);
strings += ETH_GSTRING_LEN;
}
}
break;
default:
break;
}
}
static int octep_vf_get_sset_count(struct net_device *netdev, int sset)
{
struct octep_vf_device *oct = netdev_priv(netdev);
u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
switch (sset) {
case ETH_SS_STATS:
return OCTEP_VF_GLOBAL_STATS_CNT + (num_queues *
(OCTEP_VF_TX_Q_STATS_CNT + OCTEP_VF_RX_Q_STATS_CNT));
break;
default:
return -EOPNOTSUPP;
}
}
static void octep_vf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct octep_vf_device *oct = netdev_priv(netdev);
struct octep_vf_iface_tx_stats *iface_tx_stats;
struct octep_vf_iface_rx_stats *iface_rx_stats;
u64 rx_alloc_errors, tx_busy_errors;
u64 rx_packets, rx_bytes;
u64 tx_packets, tx_bytes;
int q, i;
rx_packets = 0;
rx_bytes = 0;
tx_packets = 0;
tx_bytes = 0;
rx_alloc_errors = 0;
tx_busy_errors = 0;
tx_packets = 0;
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
octep_vf_get_if_stats(oct);
iface_tx_stats = &oct->iface_tx_stats;
iface_rx_stats = &oct->iface_rx_stats;
for (q = 0; q < oct->num_oqs; q++) {
struct octep_vf_iq *iq = oct->iq[q];
struct octep_vf_oq *oq = oct->oq[q];
tx_packets += iq->stats.instr_completed;
tx_bytes += iq->stats.bytes_sent;
tx_busy_errors += iq->stats.tx_busy;
rx_packets += oq->stats.packets;
rx_bytes += oq->stats.bytes;
rx_alloc_errors += oq->stats.alloc_failures;
}
i = 0;
data[i++] = rx_packets;
data[i++] = tx_packets;
data[i++] = rx_bytes;
data[i++] = tx_bytes;
data[i++] = rx_alloc_errors;
data[i++] = tx_busy_errors;
data[i++] = iface_rx_stats->dropped_pkts_fifo_full +
iface_rx_stats->err_pkts;
data[i++] = iface_tx_stats->dropped;
data[i++] = iface_tx_stats->pkts;
data[i++] = iface_tx_stats->octs;
data[i++] = iface_tx_stats->bcst;
data[i++] = iface_tx_stats->mcst;
data[i++] = iface_rx_stats->pkts;
data[i++] = iface_rx_stats->octets;
data[i++] = iface_rx_stats->mcast_pkts;
data[i++] = iface_rx_stats->bcast_pkts;
data[i++] = iface_rx_stats->dropped_pkts_fifo_full;
data[i++] = iface_rx_stats->dropped_octets_fifo_full;
data[i++] = iface_rx_stats->err_pkts;
/* Per Tx Queue stats */
for (q = 0; q < oct->num_iqs; q++) {
struct octep_vf_iq *iq = oct->iq[q];
data[i++] = iq->stats.instr_posted;
data[i++] = iq->stats.instr_completed;
data[i++] = iq->stats.bytes_sent;
data[i++] = iq->stats.tx_busy;
}
/* Per Rx Queue stats */
for (q = 0; q < oct->num_oqs; q++) {
struct octep_vf_oq *oq = oct->oq[q];
data[i++] = oq->stats.packets;
data[i++] = oq->stats.bytes;
data[i++] = oq->stats.alloc_failures;
}
}
#define OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(octep_vf_speeds, ksettings, name) \
{ \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_T)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_R)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_CR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_KR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_LR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_10GBASE_SR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_CR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_KR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_25GBASE_SR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_CR4)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_KR4)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_LR4)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_40GBASE_SR4)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR2)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR2)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR2)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_CR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_KR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_LR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_50GBASE_SR)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_CR4)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_KR4)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_LR4)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \
if ((octep_vf_speeds) & BIT(OCTEP_VF_LINK_MODE_100GBASE_SR4)) \
ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \
}
static int octep_vf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct octep_vf_device *oct = netdev_priv(netdev);
struct octep_vf_iface_link_info *link_info;
u32 advertised_modes, supported_modes;
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
octep_vf_get_link_info(oct);
advertised_modes = oct->link_info.advertised_modes;
supported_modes = oct->link_info.supported_modes;
link_info = &oct->link_info;
OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported);
OCTEP_VF_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising);
if (link_info->autoneg) {
if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED)
ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
if (link_info->autoneg & OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED) {
ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
cmd->base.autoneg = AUTONEG_ENABLE;
} else {
cmd->base.autoneg = AUTONEG_DISABLE;
}
} else {
cmd->base.autoneg = AUTONEG_DISABLE;
}
cmd->base.port = PORT_FIBRE;
ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
if (netif_carrier_ok(netdev)) {
cmd->base.speed = link_info->speed;
cmd->base.duplex = DUPLEX_FULL;
} else {
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static const struct ethtool_ops octep_vf_ethtool_ops = {
.get_drvinfo = octep_vf_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = octep_vf_get_strings,
.get_sset_count = octep_vf_get_sset_count,
.get_ethtool_stats = octep_vf_get_ethtool_stats,
.get_link_ksettings = octep_vf_get_link_ksettings,
};
void octep_vf_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &octep_vf_ethtool_ops;
}
// SPDX-License-Identifier: GPL-2.0
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/aer.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/vmalloc.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
struct workqueue_struct *octep_vf_wq;
/* Supported Devices */
static const struct pci_device_id octep_vf_pci_id_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN98_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KA_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KA_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF10KB_VF)},
{PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN10KB_VF)},
{0, },
};
MODULE_DEVICE_TABLE(pci, octep_vf_pci_id_tbl);
MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>");
MODULE_DESCRIPTION(OCTEP_VF_DRV_STRING);
MODULE_LICENSE("GPL");
/**
* octep_vf_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info.
*
* @oct: Octeon device private data structure.
*
* Allocate resources to hold per Tx/Rx queue interrupt info.
* This is the information passed to interrupt handler, from which napi poll
* is scheduled and includes quick access to private data of Tx/Rx queue
* corresponding to the interrupt being handled.
*
* Return: 0, on successful allocation of resources for all queue interrupts.
* -1, if failed to allocate any resource.
*/
static int octep_vf_alloc_ioq_vectors(struct octep_vf_device *oct)
{
struct octep_vf_ioq_vector *ioq_vector;
int i;
for (i = 0; i < oct->num_oqs; i++) {
oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i]));
if (!oct->ioq_vector[i])
goto free_ioq_vector;
ioq_vector = oct->ioq_vector[i];
ioq_vector->iq = oct->iq[i];
ioq_vector->oq = oct->oq[i];
ioq_vector->octep_vf_dev = oct;
}
dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs);
return 0;
free_ioq_vector:
while (i) {
i--;
vfree(oct->ioq_vector[i]);
oct->ioq_vector[i] = NULL;
}
return -1;
}
/**
* octep_vf_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_free_ioq_vectors(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
if (oct->ioq_vector[i]) {
vfree(oct->ioq_vector[i]);
oct->ioq_vector[i] = NULL;
}
}
netdev_info(oct->netdev, "Freed IOQ Vectors\n");
}
/**
* octep_vf_enable_msix_range() - enable MSI-x interrupts.
*
* @oct: Octeon device private data structure.
*
* Allocate and enable all MSI-x interrupts (queue and non-queue interrupts)
* for the Octeon device.
*
* Return: 0, on successfully enabling all MSI-x interrupts.
* -1, if failed to enable any MSI-x interrupt.
*/
static int octep_vf_enable_msix_range(struct octep_vf_device *oct)
{
int num_msix, msix_allocated;
int i;
/* Generic interrupts apart from input/output queues */
//num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf);
num_msix = oct->num_oqs;
oct->msix_entries = kcalloc(num_msix, sizeof(struct msix_entry), GFP_KERNEL);
if (!oct->msix_entries)
goto msix_alloc_err;
for (i = 0; i < num_msix; i++)
oct->msix_entries[i].entry = i;
msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries,
num_msix, num_msix);
if (msix_allocated != num_msix) {
dev_err(&oct->pdev->dev,
"Failed to enable %d msix irqs; got only %d\n",
num_msix, msix_allocated);
goto enable_msix_err;
}
oct->num_irqs = msix_allocated;
dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n");
return 0;
enable_msix_err:
if (msix_allocated > 0)
pci_disable_msix(oct->pdev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
msix_alloc_err:
return -1;
}
/**
* octep_vf_disable_msix() - disable MSI-x interrupts.
*
* @oct: Octeon device private data structure.
*
* Disable MSI-x on the Octeon device.
*/
static void octep_vf_disable_msix(struct octep_vf_device *oct)
{
pci_disable_msix(oct->pdev);
kfree(oct->msix_entries);
oct->msix_entries = NULL;
dev_info(&oct->pdev->dev, "Disabled MSI-X\n");
}
/**
* octep_vf_ioq_intr_handler() - handler for all Tx/Rx queue interrupts.
*
* @irq: Interrupt number.
* @data: interrupt data contains pointers to Tx/Rx queue private data
* and correspong NAPI context.
*
* this is common handler for all non-queue (generic) interrupts.
*/
static irqreturn_t octep_vf_ioq_intr_handler(int irq, void *data)
{
struct octep_vf_ioq_vector *ioq_vector = data;
struct octep_vf_device *oct = ioq_vector->octep_vf_dev;
return oct->hw_ops.ioq_intr_handler(ioq_vector);
}
/**
* octep_vf_request_irqs() - Register interrupt handlers.
*
* @oct: Octeon device private data structure.
*
* Register handlers for all queue and non-queue interrupts.
*
* Return: 0, on successful registration of all interrupt handlers.
* -1, on any error.
*/
static int octep_vf_request_irqs(struct octep_vf_device *oct)
{
struct net_device *netdev = oct->netdev;
struct octep_vf_ioq_vector *ioq_vector;
struct msix_entry *msix_entry;
int ret, i;
/* Request IRQs for Tx/Rx queues */
for (i = 0; i < oct->num_oqs; i++) {
ioq_vector = oct->ioq_vector[i];
msix_entry = &oct->msix_entries[i];
snprintf(ioq_vector->name, sizeof(ioq_vector->name),
"%s-q%d", netdev->name, i);
ret = request_irq(msix_entry->vector,
octep_vf_ioq_intr_handler, 0,
ioq_vector->name, ioq_vector);
if (ret) {
netdev_err(netdev,
"request_irq failed for Q-%d; err=%d",
i, ret);
goto ioq_irq_err;
}
cpumask_set_cpu(i % num_online_cpus(),
&ioq_vector->affinity_mask);
irq_set_affinity_hint(msix_entry->vector,
&ioq_vector->affinity_mask);
}
return 0;
ioq_irq_err:
while (i) {
--i;
free_irq(oct->msix_entries[i].vector, oct);
}
return -1;
}
/**
* octep_vf_free_irqs() - free all registered interrupts.
*
* @oct: Octeon device private data structure.
*
* Free all queue and non-queue interrupts of the Octeon device.
*/
static void octep_vf_free_irqs(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_irqs; i++) {
irq_set_affinity_hint(oct->msix_entries[i].vector, NULL);
free_irq(oct->msix_entries[i].vector, oct->ioq_vector[i]);
}
netdev_info(oct->netdev, "IRQs freed\n");
}
/**
* octep_vf_setup_irqs() - setup interrupts for the Octeon device.
*
* @oct: Octeon device private data structure.
*
* Allocate data structures to hold per interrupt information, allocate/enable
* MSI-x interrupt and register interrupt handlers.
*
* Return: 0, on successful allocation and registration of all interrupts.
* -1, on any error.
*/
static int octep_vf_setup_irqs(struct octep_vf_device *oct)
{
if (octep_vf_alloc_ioq_vectors(oct))
goto ioq_vector_err;
if (octep_vf_enable_msix_range(oct))
goto enable_msix_err;
if (octep_vf_request_irqs(oct))
goto request_irq_err;
return 0;
request_irq_err:
octep_vf_disable_msix(oct);
enable_msix_err:
octep_vf_free_ioq_vectors(oct);
ioq_vector_err:
return -1;
}
/**
* octep_vf_clean_irqs() - free all interrupts and its resources.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_clean_irqs(struct octep_vf_device *oct)
{
octep_vf_free_irqs(oct);
octep_vf_disable_msix(oct);
octep_vf_free_ioq_vectors(oct);
}
/**
* octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
*
* @iq: Octeon Tx queue data structure.
* @oq: Octeon Rx queue data structure.
*/
static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
{
u32 pkts_pend = oq->pkts_pending;
netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
if (iq->pkts_processed) {
writel(iq->pkts_processed, iq->inst_cnt_reg);
iq->pkt_in_done -= iq->pkts_processed;
iq->pkts_processed = 0;
}
if (oq->last_pkt_count - pkts_pend) {
writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
oq->last_pkt_count = pkts_pend;
}
/* Flush the previous wrties before writing to RESEND bit */
smp_wmb();
writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
}
/**
* octep_vf_napi_poll() - NAPI poll function for Tx/Rx.
*
* @napi: pointer to napi context.
* @budget: max number of packets to be processed in single invocation.
*/
static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
{
struct octep_vf_ioq_vector *ioq_vector =
container_of(napi, struct octep_vf_ioq_vector, napi);
u32 tx_pending, rx_done;
tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, budget);
rx_done = octep_vf_oq_process_rx(ioq_vector->oq, budget);
/* need more polling if tx completion processing is still pending or
* processed at least 'budget' number of rx packets.
*/
if (tx_pending || rx_done >= budget)
return budget;
napi_complete(napi);
octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
return rx_done;
}
/**
* octep_vf_napi_add() - Add NAPI poll for all Tx/Rx queues.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_napi_add(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, octep_vf_napi_poll);
oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
}
}
/**
* octep_vf_napi_delete() - delete NAPI poll callback for all Tx/Rx queues.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_napi_delete(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i);
netif_napi_del(&oct->ioq_vector[i]->napi);
oct->oq[i]->napi = NULL;
}
}
/**
* octep_vf_napi_enable() - enable NAPI for all Tx/Rx queues.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_napi_enable(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i);
napi_enable(&oct->ioq_vector[i]->napi);
}
}
/**
* octep_vf_napi_disable() - disable NAPI for all Tx/Rx queues.
*
* @oct: Octeon device private data structure.
*/
static void octep_vf_napi_disable(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i);
napi_disable(&oct->ioq_vector[i]->napi);
}
}
static void octep_vf_link_up(struct net_device *netdev)
{
netif_carrier_on(netdev);
netif_tx_start_all_queues(netdev);
}
static void octep_vf_set_rx_state(struct octep_vf_device *oct, bool up)
{
int err;
err = octep_vf_mbox_set_rx_state(oct, up);
if (err)
netdev_err(oct->netdev, "Set Rx state to %d failed with err:%d\n", up, err);
}
static int octep_vf_get_link_status(struct octep_vf_device *oct)
{
int err;
err = octep_vf_mbox_get_link_status(oct, &oct->link_info.oper_up);
if (err)
netdev_err(oct->netdev, "Get link status failed with err:%d\n", err);
return oct->link_info.oper_up;
}
static void octep_vf_set_link_status(struct octep_vf_device *oct, bool up)
{
int err;
err = octep_vf_mbox_set_link_status(oct, up);
if (err) {
netdev_err(oct->netdev, "Set link status to %d failed with err:%d\n", up, err);
return;
}
oct->link_info.oper_up = up;
}
/**
* octep_vf_open() - start the octeon network device.
*
* @netdev: pointer to kernel network device.
*
* setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues
* and interrupts..
*
* Return: 0, on successfully setting up device and bring it up.
* -1, on any error.
*/
static int octep_vf_open(struct net_device *netdev)
{
struct octep_vf_device *oct = netdev_priv(netdev);
int err, ret;
netdev_info(netdev, "Starting netdev ...\n");
netif_carrier_off(netdev);
oct->hw_ops.reset_io_queues(oct);
if (octep_vf_setup_iqs(oct))
goto setup_iq_err;
if (octep_vf_setup_oqs(oct))
goto setup_oq_err;
if (octep_vf_setup_irqs(oct))
goto setup_irq_err;
err = netif_set_real_num_tx_queues(netdev, oct->num_oqs);
if (err)
goto set_queues_err;
err = netif_set_real_num_rx_queues(netdev, oct->num_iqs);
if (err)
goto set_queues_err;
octep_vf_napi_add(oct);
octep_vf_napi_enable(oct);
oct->link_info.admin_up = 1;
octep_vf_set_rx_state(oct, true);
ret = octep_vf_get_link_status(oct);
if (!ret)
octep_vf_set_link_status(oct, true);
/* Enable the input and output queues for this Octeon device */
oct->hw_ops.enable_io_queues(oct);
/* Enable Octeon device interrupts */
oct->hw_ops.enable_interrupts(oct);
octep_vf_oq_dbell_init(oct);
ret = octep_vf_get_link_status(oct);
if (ret)
octep_vf_link_up(netdev);
return 0;
set_queues_err:
octep_vf_napi_disable(oct);
octep_vf_napi_delete(oct);
octep_vf_clean_irqs(oct);
setup_irq_err:
octep_vf_free_oqs(oct);
setup_oq_err:
octep_vf_free_iqs(oct);
setup_iq_err:
return -1;
}
/**
* octep_vf_stop() - stop the octeon network device.
*
* @netdev: pointer to kernel network device.
*
* stop the device Tx/Rx operations, bring down the link and
* free up all resources allocated for Tx/Rx queues and interrupts.
*/
static int octep_vf_stop(struct net_device *netdev)
{
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_info(netdev, "Stopping the device ...\n");
/* Stop Tx from stack */
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
octep_vf_set_link_status(oct, false);
octep_vf_set_rx_state(oct, false);
oct->link_info.admin_up = 0;
oct->link_info.oper_up = 0;
oct->hw_ops.disable_interrupts(oct);
octep_vf_napi_disable(oct);
octep_vf_napi_delete(oct);
octep_vf_clean_irqs(oct);
octep_vf_clean_iqs(oct);
oct->hw_ops.disable_io_queues(oct);
oct->hw_ops.reset_io_queues(oct);
octep_vf_free_oqs(oct);
octep_vf_free_iqs(oct);
netdev_info(netdev, "Device stopped !!\n");
return 0;
}
/**
* octep_vf_iq_full_check() - check if a Tx queue is full.
*
* @iq: Octeon Tx queue data structure.
*
* Return: 0, if the Tx queue is not full.
* 1, if the Tx queue is full.
*/
static int octep_vf_iq_full_check(struct octep_vf_iq *iq)
{
if (likely((IQ_INSTR_SPACE(iq)) >
OCTEP_VF_WAKE_QUEUE_THRESHOLD))
return 0;
/* Stop the queue if unable to send */
netif_stop_subqueue(iq->netdev, iq->q_no);
/* check again and restart the queue, in case NAPI has just freed
* enough Tx ring entries.
*/
if (unlikely(IQ_INSTR_SPACE(iq) >
OCTEP_VF_WAKE_QUEUE_THRESHOLD)) {
netif_start_subqueue(iq->netdev, iq->q_no);
iq->stats.restart_cnt++;
return 0;
}
return 1;
}
/**
* octep_vf_start_xmit() - Enqueue packet to Octoen hardware Tx Queue.
*
* @skb: packet skbuff pointer.
* @netdev: kernel network device.
*
* Return: NETDEV_TX_BUSY, if Tx Queue is full.
* NETDEV_TX_OK, if successfully enqueued to hardware Tx queue.
*/
static netdev_tx_t octep_vf_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct octep_vf_device *oct = netdev_priv(netdev);
netdev_features_t feat = netdev->features;
struct octep_vf_tx_sglist_desc *sglist;
struct octep_vf_tx_buffer *tx_buffer;
struct octep_vf_tx_desc_hw *hw_desc;
struct skb_shared_info *shinfo;
struct octep_vf_instr_hdr *ih;
struct octep_vf_iq *iq;
skb_frag_t *frag;
u16 nr_frags, si;
int xmit_more;
u16 q_no, wi;
if (skb_put_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
q_no = skb_get_queue_mapping(skb);
if (q_no >= oct->num_iqs) {
netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no);
q_no = q_no % oct->num_iqs;
}
iq = oct->iq[q_no];
if (octep_vf_iq_full_check(iq)) {
iq->stats.tx_busy++;
return NETDEV_TX_BUSY;
}
shinfo = skb_shinfo(skb);
nr_frags = shinfo->nr_frags;
wi = iq->host_write_index;
hw_desc = &iq->desc_ring[wi];
hw_desc->ih64 = 0;
tx_buffer = iq->buff_info + wi;
tx_buffer->skb = skb;
ih = &hw_desc->ih;
ih->tlen = skb->len;
ih->pkind = oct->fw_info.pkind;
ih->fsz = oct->fw_info.fsz;
ih->tlen = skb->len + ih->fsz;
if (!nr_frags) {
tx_buffer->gather = 0;
tx_buffer->dma = dma_map_single(iq->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(iq->dev, tx_buffer->dma))
goto dma_map_err;
hw_desc->dptr = tx_buffer->dma;
} else {
/* Scatter/Gather */
dma_addr_t dma;
u16 len;
sglist = tx_buffer->sglist;
ih->gsz = nr_frags + 1;
ih->gather = 1;
tx_buffer->gather = 1;
len = skb_headlen(skb);
dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(iq->dev, dma))
goto dma_map_err;
memset(sglist, 0, OCTEP_VF_SGLIST_SIZE_PER_PKT);
sglist[0].len[3] = len;
sglist[0].dma_ptr[0] = dma;
si = 1; /* entry 0 is main skb, mapped above */
frag = &shinfo->frags[0];
while (nr_frags--) {
len = skb_frag_size(frag);
dma = skb_frag_dma_map(iq->dev, frag, 0,
len, DMA_TO_DEVICE);
if (dma_mapping_error(iq->dev, dma))
goto dma_map_sg_err;
sglist[si >> 2].len[3 - (si & 3)] = len;
sglist[si >> 2].dma_ptr[si & 3] = dma;
frag++;
si++;
}
hw_desc->dptr = tx_buffer->sglist_dma;
}
if (oct->fw_info.tx_ol_flags) {
if ((feat & (NETIF_F_TSO)) && (skb_is_gso(skb))) {
hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
hw_desc->txm.ol_flags |= OCTEP_VF_TX_OFFLOAD_TSO;
hw_desc->txm.gso_size = skb_shinfo(skb)->gso_size;
hw_desc->txm.gso_segs = skb_shinfo(skb)->gso_segs;
} else if (feat & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
hw_desc->txm.ol_flags = OCTEP_VF_TX_OFFLOAD_CKSUM;
}
/* due to ESR txm will be swappeed by hw */
hw_desc->txm64[0] = (__force u64)cpu_to_be64(hw_desc->txm64[0]);
}
netdev_tx_sent_queue(iq->netdev_q, skb->len);
xmit_more = netdev_xmit_more();
skb_tx_timestamp(skb);
iq->fill_cnt++;
wi++;
iq->host_write_index = wi & iq->ring_size_mask;
if (xmit_more &&
(IQ_INSTR_PENDING(iq) <
(iq->max_count - OCTEP_VF_WAKE_QUEUE_THRESHOLD)) &&
iq->fill_cnt < iq->fill_threshold)
return NETDEV_TX_OK;
/* Flush the hw descriptors before writing to doorbell */
smp_wmb();
writel(iq->fill_cnt, iq->doorbell_reg);
iq->stats.instr_posted += iq->fill_cnt;
iq->fill_cnt = 0;
return NETDEV_TX_OK;
dma_map_sg_err:
if (si > 0) {
dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
sglist[0].len[0], DMA_TO_DEVICE);
sglist[0].len[0] = 0;
}
while (si > 1) {
dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
sglist[si >> 2].len[si & 3] = 0;
si--;
}
tx_buffer->gather = 0;
dma_map_err:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
int octep_vf_get_if_stats(struct octep_vf_device *oct)
{
struct octep_vf_iface_rxtx_stats vf_stats;
int ret, size;
memset(&vf_stats, 0, sizeof(struct octep_vf_iface_rxtx_stats));
ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_STATS,
(u8 *)&vf_stats, &size);
if (!ret) {
memcpy(&oct->iface_rx_stats, &vf_stats.iface_rx_stats,
sizeof(struct octep_vf_iface_rx_stats));
memcpy(&oct->iface_tx_stats, &vf_stats.iface_tx_stats,
sizeof(struct octep_vf_iface_tx_stats));
}
return ret;
}
int octep_vf_get_link_info(struct octep_vf_device *oct)
{
int ret, size;
ret = octep_vf_mbox_bulk_read(oct, OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
(u8 *)&oct->link_info, &size);
if (ret) {
dev_err(&oct->pdev->dev, "Get VF link info failed via VF Mbox\n");
return ret;
}
return 0;
}
/**
* octep_vf_get_stats64() - Get Octeon network device statistics.
*
* @netdev: kernel network device.
* @stats: pointer to stats structure to be filled in.
*/
static void octep_vf_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct octep_vf_device *oct = netdev_priv(netdev);
u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
int q;
tx_packets = 0;
tx_bytes = 0;
rx_packets = 0;
rx_bytes = 0;
for (q = 0; q < oct->num_oqs; q++) {
struct octep_vf_iq *iq = oct->iq[q];
struct octep_vf_oq *oq = oct->oq[q];
tx_packets += iq->stats.instr_completed;
tx_bytes += iq->stats.bytes_sent;
rx_packets += oq->stats.packets;
rx_bytes += oq->stats.bytes;
}
stats->tx_packets = tx_packets;
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
stats->rx_bytes = rx_bytes;
if (!octep_vf_get_if_stats(oct)) {
stats->multicast = oct->iface_rx_stats.mcast_pkts;
stats->rx_errors = oct->iface_rx_stats.err_pkts;
}
}
/**
* octep_vf_tx_timeout_task - work queue task to Handle Tx queue timeout.
*
* @work: pointer to Tx queue timeout work_struct
*
* Stop and start the device so that it frees up all queue resources
* and restarts the queues, that potentially clears a Tx queue timeout
* condition.
**/
static void octep_vf_tx_timeout_task(struct work_struct *work)
{
struct octep_vf_device *oct = container_of(work, struct octep_vf_device,
tx_timeout_task);
struct net_device *netdev = oct->netdev;
rtnl_lock();
if (netif_running(netdev)) {
octep_vf_stop(netdev);
octep_vf_open(netdev);
}
rtnl_unlock();
}
/**
* octep_vf_tx_timeout() - Handle Tx Queue timeout.
*
* @netdev: pointer to kernel network device.
* @txqueue: Timed out Tx queue number.
*
* Schedule a work to handle Tx queue timeout.
*/
static void octep_vf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct octep_vf_device *oct = netdev_priv(netdev);
queue_work(octep_vf_wq, &oct->tx_timeout_task);
}
static int octep_vf_set_mac(struct net_device *netdev, void *p)
{
struct octep_vf_device *oct = netdev_priv(netdev);
struct sockaddr *addr = (struct sockaddr *)p;
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
err = octep_vf_mbox_set_mac_addr(oct, addr->sa_data);
if (err)
return err;
memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN);
eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct octep_vf_device *oct = netdev_priv(netdev);
struct octep_vf_iface_link_info *link_info;
int err;
link_info = &oct->link_info;
if (link_info->mtu == new_mtu)
return 0;
err = octep_vf_mbox_set_mtu(oct, new_mtu);
if (!err) {
oct->link_info.mtu = new_mtu;
netdev->mtu = new_mtu;
}
return err;
}
static int octep_vf_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct octep_vf_device *oct = netdev_priv(netdev);
u16 rx_offloads = 0, tx_offloads = 0;
int err;
/* We only support features received from firmware */
if ((features & netdev->hw_features) != features)
return -EINVAL;
if (features & NETIF_F_TSO)
tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
if (features & NETIF_F_TSO6)
tx_offloads |= OCTEP_VF_TX_OFFLOAD_TSO;
if (features & NETIF_F_IP_CSUM)
tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
if (features & NETIF_F_IPV6_CSUM)
tx_offloads |= OCTEP_VF_TX_OFFLOAD_CKSUM;
if (features & NETIF_F_RXCSUM)
rx_offloads |= OCTEP_VF_RX_OFFLOAD_CKSUM;
err = octep_vf_mbox_set_offloads(oct, tx_offloads, rx_offloads);
if (!err)
netdev->features = features;
return err;
}
static const struct net_device_ops octep_vf_netdev_ops = {
.ndo_open = octep_vf_open,
.ndo_stop = octep_vf_stop,
.ndo_start_xmit = octep_vf_start_xmit,
.ndo_get_stats64 = octep_vf_get_stats64,
.ndo_tx_timeout = octep_vf_tx_timeout,
.ndo_set_mac_address = octep_vf_set_mac,
.ndo_change_mtu = octep_vf_change_mtu,
.ndo_set_features = octep_vf_set_features,
};
static const char *octep_vf_devid_to_str(struct octep_vf_device *oct)
{
switch (oct->chip_id) {
case OCTEP_PCI_DEVICE_ID_CN93_VF:
return "CN93XX";
case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
return "CNF95N";
case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
return "CN10KA";
case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
return "CNF10KA";
case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
return "CNF10KB";
case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
return "CN10KB";
default:
return "Unsupported";
}
}
/**
* octep_vf_device_setup() - Setup Octeon Device.
*
* @oct: Octeon device private data structure.
*
* Setup Octeon device hardware operations, configuration, etc ...
*/
int octep_vf_device_setup(struct octep_vf_device *oct)
{
struct pci_dev *pdev = oct->pdev;
/* allocate memory for oct->conf */
oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL);
if (!oct->conf)
return -ENOMEM;
/* Map BAR region 0 */
oct->mmio.hw_addr = ioremap(pci_resource_start(oct->pdev, 0),
pci_resource_len(oct->pdev, 0));
if (!oct->mmio.hw_addr) {
dev_err(&pdev->dev,
"Failed to remap BAR0; start=0x%llx len=0x%llx\n",
pci_resource_start(oct->pdev, 0),
pci_resource_len(oct->pdev, 0));
goto ioremap_err;
}
oct->mmio.mapped = 1;
oct->chip_id = pdev->device;
oct->rev_id = pdev->revision;
dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device);
switch (oct->chip_id) {
case OCTEP_PCI_DEVICE_ID_CN93_VF:
case OCTEP_PCI_DEVICE_ID_CNF95N_VF:
case OCTEP_PCI_DEVICE_ID_CN98_VF:
dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
OCTEP_VF_MINOR_REV(oct));
octep_vf_device_setup_cn93(oct);
break;
case OCTEP_PCI_DEVICE_ID_CNF10KA_VF:
case OCTEP_PCI_DEVICE_ID_CN10KA_VF:
case OCTEP_PCI_DEVICE_ID_CNF10KB_VF:
case OCTEP_PCI_DEVICE_ID_CN10KB_VF:
dev_info(&pdev->dev, "Setting up OCTEON %s VF PASS%d.%d\n",
octep_vf_devid_to_str(oct), OCTEP_VF_MAJOR_REV(oct),
OCTEP_VF_MINOR_REV(oct));
octep_vf_device_setup_cnxk(oct);
break;
default:
dev_err(&pdev->dev, "Unsupported device\n");
goto unsupported_dev;
}
return 0;
unsupported_dev:
iounmap(oct->mmio.hw_addr);
ioremap_err:
kfree(oct->conf);
return -EOPNOTSUPP;
}
/**
* octep_vf_device_cleanup() - Cleanup Octeon Device.
*
* @oct: Octeon device private data structure.
*
* Cleanup Octeon device allocated resources.
*/
static void octep_vf_device_cleanup(struct octep_vf_device *oct)
{
dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
if (oct->mmio.mapped)
iounmap(oct->mmio.hw_addr);
kfree(oct->conf);
oct->conf = NULL;
}
static int octep_vf_get_mac_addr(struct octep_vf_device *oct, u8 *addr)
{
return octep_vf_mbox_get_mac_addr(oct, addr);
}
/**
* octep_vf_probe() - Octeon PCI device probe handler.
*
* @pdev: PCI device structure.
* @ent: entry in Octeon PCI device ID table.
*
* Initializes and enables the Octeon PCI device for network operations.
* Initializes Octeon private data structure and registers a network device.
*/
static int octep_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct octep_vf_device *octep_vf_dev;
struct net_device *netdev;
int err;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Failed to enable PCI device\n");
return err;
}
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "Failed to set DMA mask !!\n");
goto err_dma_mask;
}
err = pci_request_mem_regions(pdev, OCTEP_VF_DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Failed to map PCI memory regions\n");
goto err_pci_regions;
}
pci_set_master(pdev);
netdev = alloc_etherdev_mq(sizeof(struct octep_vf_device),
OCTEP_VF_MAX_QUEUES);
if (!netdev) {
dev_err(&pdev->dev, "Failed to allocate netdev\n");
err = -ENOMEM;
goto err_alloc_netdev;
}
SET_NETDEV_DEV(netdev, &pdev->dev);
octep_vf_dev = netdev_priv(netdev);
octep_vf_dev->netdev = netdev;
octep_vf_dev->pdev = pdev;
octep_vf_dev->dev = &pdev->dev;
pci_set_drvdata(pdev, octep_vf_dev);
err = octep_vf_device_setup(octep_vf_dev);
if (err) {
dev_err(&pdev->dev, "Device setup failed\n");
goto err_octep_vf_config;
}
INIT_WORK(&octep_vf_dev->tx_timeout_task, octep_vf_tx_timeout_task);
netdev->netdev_ops = &octep_vf_netdev_ops;
octep_vf_set_ethtool_ops(netdev);
netif_carrier_off(netdev);
if (octep_vf_setup_mbox(octep_vf_dev)) {
dev_err(&pdev->dev, "VF Mailbox setup failed\n");
err = -ENOMEM;
goto err_setup_mbox;
}
if (octep_vf_mbox_version_check(octep_vf_dev)) {
dev_err(&pdev->dev, "PF VF Mailbox version mismatch\n");
err = -EINVAL;
goto err_mbox_version;
}
if (octep_vf_mbox_get_fw_info(octep_vf_dev)) {
dev_err(&pdev->dev, "unable to get fw info\n");
err = -EINVAL;
goto err_mbox_version;
}
netdev->hw_features = NETIF_F_SG;
if (OCTEP_VF_TX_IP_CSUM(octep_vf_dev->fw_info.tx_ol_flags))
netdev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
if (OCTEP_VF_RX_IP_CSUM(octep_vf_dev->fw_info.rx_ol_flags))
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->min_mtu = OCTEP_VF_MIN_MTU;
netdev->max_mtu = OCTEP_VF_MAX_MTU;
netdev->mtu = OCTEP_VF_DEFAULT_MTU;
if (OCTEP_VF_TX_TSO(octep_vf_dev->fw_info.tx_ol_flags)) {
netdev->hw_features |= NETIF_F_TSO;
netif_set_tso_max_size(netdev, netdev->max_mtu);
}
netdev->features |= netdev->hw_features;
octep_vf_get_mac_addr(octep_vf_dev, octep_vf_dev->mac_addr);
eth_hw_addr_set(netdev, octep_vf_dev->mac_addr);
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
goto err_register_dev;
}
dev_info(&pdev->dev, "Device probe successful\n");
return 0;
err_register_dev:
err_mbox_version:
octep_vf_delete_mbox(octep_vf_dev);
err_setup_mbox:
octep_vf_device_cleanup(octep_vf_dev);
err_octep_vf_config:
free_netdev(netdev);
err_alloc_netdev:
pci_release_mem_regions(pdev);
err_pci_regions:
err_dma_mask:
pci_disable_device(pdev);
dev_err(&pdev->dev, "Device probe failed\n");
return err;
}
/**
* octep_vf_remove() - Remove Octeon PCI device from driver control.
*
* @pdev: PCI device structure of the Octeon device.
*
* Cleanup all resources allocated for the Octeon device.
* Unregister from network device and disable the PCI device.
*/
static void octep_vf_remove(struct pci_dev *pdev)
{
struct octep_vf_device *oct = pci_get_drvdata(pdev);
struct net_device *netdev;
if (!oct)
return;
octep_vf_mbox_dev_remove(oct);
cancel_work_sync(&oct->tx_timeout_task);
netdev = oct->netdev;
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
octep_vf_delete_mbox(oct);
octep_vf_device_cleanup(oct);
pci_release_mem_regions(pdev);
free_netdev(netdev);
pci_disable_device(pdev);
}
static struct pci_driver octep_vf_driver = {
.name = OCTEP_VF_DRV_NAME,
.id_table = octep_vf_pci_id_tbl,
.probe = octep_vf_probe,
.remove = octep_vf_remove,
};
/**
* octep_vf_init_module() - Module initialization.
*
* create common resource for the driver and register PCI driver.
*/
static int __init octep_vf_init_module(void)
{
int ret;
pr_info("%s: Loading %s ...\n", OCTEP_VF_DRV_NAME, OCTEP_VF_DRV_STRING);
/* work queue for all deferred tasks */
octep_vf_wq = create_singlethread_workqueue(OCTEP_VF_DRV_NAME);
if (!octep_vf_wq) {
pr_err("%s: Failed to create common workqueue\n",
OCTEP_VF_DRV_NAME);
return -ENOMEM;
}
ret = pci_register_driver(&octep_vf_driver);
if (ret < 0) {
pr_err("%s: Failed to register PCI driver; err=%d\n",
OCTEP_VF_DRV_NAME, ret);
return ret;
}
pr_info("%s: Loaded successfully !\n", OCTEP_VF_DRV_NAME);
return ret;
}
/**
* octep_vf_exit_module() - Module exit routine.
*
* unregister the driver with PCI subsystem and cleanup common resources.
*/
static void __exit octep_vf_exit_module(void)
{
pr_info("%s: Unloading ...\n", OCTEP_VF_DRV_NAME);
pci_unregister_driver(&octep_vf_driver);
destroy_workqueue(octep_vf_wq);
pr_info("%s: Unloading complete\n", OCTEP_VF_DRV_NAME);
}
module_init(octep_vf_init_module);
module_exit(octep_vf_exit_module);
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#ifndef _OCTEP_VF_MAIN_H_
#define _OCTEP_VF_MAIN_H_
#include "octep_vf_tx.h"
#include "octep_vf_rx.h"
#include "octep_vf_mbox.h"
#define OCTEP_VF_DRV_NAME "octeon_ep_vf"
#define OCTEP_VF_DRV_STRING "Marvell Octeon EndPoint NIC VF Driver"
#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 //93xx VF
#define OCTEP_PCI_DEVICE_ID_CNF95N_VF 0xB403 //95N VF
#define OCTEP_PCI_DEVICE_ID_CN98_VF 0xB103
#define OCTEP_PCI_DEVICE_ID_CN10KA_VF 0xB903
#define OCTEP_PCI_DEVICE_ID_CNF10KA_VF 0xBA03
#define OCTEP_PCI_DEVICE_ID_CNF10KB_VF 0xBC03
#define OCTEP_PCI_DEVICE_ID_CN10KB_VF 0xBD03
#define OCTEP_VF_MAX_QUEUES 63
#define OCTEP_VF_MAX_IQ OCTEP_VF_MAX_QUEUES
#define OCTEP_VF_MAX_OQ OCTEP_VF_MAX_QUEUES
#define OCTEP_VF_MAX_MSIX_VECTORS OCTEP_VF_MAX_OQ
#define OCTEP_VF_IQ_INTR_RESEND_BIT 59
#define OCTEP_VF_OQ_INTR_RESEND_BIT 59
#define IQ_INSTR_PENDING(iq) ({ typeof(iq) iq__ = (iq); \
((iq__)->host_write_index - (iq__)->flush_index) & \
(iq__)->ring_size_mask; \
})
#define IQ_INSTR_SPACE(iq) ({ typeof(iq) iq_ = (iq); \
(iq_)->max_count - IQ_INSTR_PENDING(iq_); \
})
#ifndef UINT64_MAX
#define UINT64_MAX ((u64)(~((u64)0))) /* 0xFFFFFFFFFFFFFFFF */
#endif
/* PCI address space mapping information.
* Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
* Octeon gets mapped to different physical address spaces in
* the kernel.
*/
struct octep_vf_mmio {
/* The physical address to which the PCI address space is mapped. */
u8 __iomem *hw_addr;
/* Flag indicating the mapping was successful. */
int mapped;
};
struct octep_vf_hw_ops {
void (*setup_iq_regs)(struct octep_vf_device *oct, int q);
void (*setup_oq_regs)(struct octep_vf_device *oct, int q);
void (*setup_mbox_regs)(struct octep_vf_device *oct, int mbox);
irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
void (*reinit_regs)(struct octep_vf_device *oct);
u32 (*update_iq_read_idx)(struct octep_vf_iq *iq);
void (*enable_interrupts)(struct octep_vf_device *oct);
void (*disable_interrupts)(struct octep_vf_device *oct);
void (*enable_io_queues)(struct octep_vf_device *oct);
void (*disable_io_queues)(struct octep_vf_device *oct);
void (*enable_iq)(struct octep_vf_device *oct, int q);
void (*disable_iq)(struct octep_vf_device *oct, int q);
void (*enable_oq)(struct octep_vf_device *oct, int q);
void (*disable_oq)(struct octep_vf_device *oct, int q);
void (*reset_io_queues)(struct octep_vf_device *oct);
void (*dump_registers)(struct octep_vf_device *oct);
};
/* Octeon mailbox data */
struct octep_vf_mbox_data {
/* Holds the offset of received data via mailbox. */
u32 data_index;
/* Holds the received data via mailbox. */
u8 recv_data[OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE];
};
/* wrappers around work structs */
struct octep_vf_mbox_wk {
struct work_struct work;
void *ctxptr;
};
/* Octeon device mailbox */
struct octep_vf_mbox {
/* A mutex to protect access to this q_mbox. */
struct mutex lock;
u32 state;
/* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
u8 __iomem *mbox_int_reg;
/* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
* SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
*/
u8 __iomem *mbox_write_reg;
/* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
* SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
*/
u8 __iomem *mbox_read_reg;
/* Octeon mailbox data */
struct octep_vf_mbox_data mbox_data;
/* Octeon mailbox work handler to process Mbox messages */
struct octep_vf_mbox_wk wk;
};
/* Tx/Rx queue vector per interrupt. */
struct octep_vf_ioq_vector {
char name[OCTEP_VF_MSIX_NAME_SIZE];
struct napi_struct napi;
struct octep_vf_device *octep_vf_dev;
struct octep_vf_iq *iq;
struct octep_vf_oq *oq;
cpumask_t affinity_mask;
};
/* Octeon hardware/firmware offload capability flags. */
#define OCTEP_VF_CAP_TX_CHECKSUM BIT(0)
#define OCTEP_VF_CAP_RX_CHECKSUM BIT(1)
#define OCTEP_VF_CAP_TSO BIT(2)
/* Link modes */
enum octep_vf_link_mode_bit_indices {
OCTEP_VF_LINK_MODE_10GBASE_T = 0,
OCTEP_VF_LINK_MODE_10GBASE_R,
OCTEP_VF_LINK_MODE_10GBASE_CR,
OCTEP_VF_LINK_MODE_10GBASE_KR,
OCTEP_VF_LINK_MODE_10GBASE_LR,
OCTEP_VF_LINK_MODE_10GBASE_SR,
OCTEP_VF_LINK_MODE_25GBASE_CR,
OCTEP_VF_LINK_MODE_25GBASE_KR,
OCTEP_VF_LINK_MODE_25GBASE_SR,
OCTEP_VF_LINK_MODE_40GBASE_CR4,
OCTEP_VF_LINK_MODE_40GBASE_KR4,
OCTEP_VF_LINK_MODE_40GBASE_LR4,
OCTEP_VF_LINK_MODE_40GBASE_SR4,
OCTEP_VF_LINK_MODE_50GBASE_CR2,
OCTEP_VF_LINK_MODE_50GBASE_KR2,
OCTEP_VF_LINK_MODE_50GBASE_SR2,
OCTEP_VF_LINK_MODE_50GBASE_CR,
OCTEP_VF_LINK_MODE_50GBASE_KR,
OCTEP_VF_LINK_MODE_50GBASE_LR,
OCTEP_VF_LINK_MODE_50GBASE_SR,
OCTEP_VF_LINK_MODE_100GBASE_CR4,
OCTEP_VF_LINK_MODE_100GBASE_KR4,
OCTEP_VF_LINK_MODE_100GBASE_LR4,
OCTEP_VF_LINK_MODE_100GBASE_SR4,
OCTEP_VF_LINK_MODE_NBITS
};
/* Hardware interface link state information. */
struct octep_vf_iface_link_info {
/* Bitmap of Supported link speeds/modes. */
u64 supported_modes;
/* Bitmap of Advertised link speeds/modes. */
u64 advertised_modes;
/* Negotiated link speed in Mbps. */
u32 speed;
/* MTU */
u16 mtu;
/* Autonegotiation state. */
#define OCTEP_VF_LINK_MODE_AUTONEG_SUPPORTED BIT(0)
#define OCTEP_VF_LINK_MODE_AUTONEG_ADVERTISED BIT(1)
u8 autoneg;
/* Pause frames setting. */
#define OCTEP_VF_LINK_MODE_PAUSE_SUPPORTED BIT(0)
#define OCTEP_VF_LINK_MODE_PAUSE_ADVERTISED BIT(1)
u8 pause;
/* Admin state of the link (ifconfig <iface> up/down */
u8 admin_up;
/* Operational state of the link: physical link is up down */
u8 oper_up;
};
/* Hardware interface stats information. */
struct octep_vf_iface_rxtx_stats {
/* Hardware Interface Rx statistics */
struct octep_vf_iface_rx_stats iface_rx_stats;
/* Hardware Interface Tx statistics */
struct octep_vf_iface_tx_stats iface_tx_stats;
};
struct octep_vf_fw_info {
/* pkind value to be used in every Tx hardware descriptor */
u8 pkind;
/* front size data */
u8 fsz;
/* supported rx offloads OCTEP_VF_RX_OFFLOAD_* */
u16 rx_ol_flags;
/* supported tx offloads OCTEP_VF_TX_OFFLOAD_* */
u16 tx_ol_flags;
};
/* The Octeon device specific private data structure.
* Each Octeon device has this structure to represent all its components.
*/
struct octep_vf_device {
struct octep_vf_config *conf;
/* Octeon Chip type. */
u16 chip_id;
u16 rev_id;
/* Device capabilities enabled */
u64 caps_enabled;
/* Device capabilities supported */
u64 caps_supported;
/* Pointer to basic Linux device */
struct device *dev;
/* Linux PCI device pointer */
struct pci_dev *pdev;
/* Netdev corresponding to the Octeon device */
struct net_device *netdev;
/* memory mapped io range */
struct octep_vf_mmio mmio;
/* MAC address */
u8 mac_addr[ETH_ALEN];
/* Tx queues (IQ: Instruction Queue) */
u16 num_iqs;
/* Pointers to Octeon Tx queues */
struct octep_vf_iq *iq[OCTEP_VF_MAX_IQ];
/* Rx queues (OQ: Output Queue) */
u16 num_oqs;
/* Pointers to Octeon Rx queues */
struct octep_vf_oq *oq[OCTEP_VF_MAX_OQ];
/* Hardware port number of the PCIe interface */
u16 pcie_port;
/* Hardware operations */
struct octep_vf_hw_ops hw_ops;
/* IRQ info */
u16 num_irqs;
u16 num_non_ioq_irqs;
char *non_ioq_irq_names;
struct msix_entry *msix_entries;
/* IOq information of it's corresponding MSI-X interrupt. */
struct octep_vf_ioq_vector *ioq_vector[OCTEP_VF_MAX_QUEUES];
/* Hardware Interface Tx statistics */
struct octep_vf_iface_tx_stats iface_tx_stats;
/* Hardware Interface Rx statistics */
struct octep_vf_iface_rx_stats iface_rx_stats;
/* Hardware Interface Link info like supported modes, aneg support */
struct octep_vf_iface_link_info link_info;
/* Mailbox to talk to VFs */
struct octep_vf_mbox *mbox;
/* Work entry to handle Tx timeout */
struct work_struct tx_timeout_task;
/* offset for iface stats */
u32 ctrl_mbox_ifstats_offset;
/* Negotiated Mbox version */
u32 mbox_neg_ver;
/* firmware info */
struct octep_vf_fw_info fw_info;
};
static inline u16 OCTEP_VF_MAJOR_REV(struct octep_vf_device *oct)
{
u16 rev = (oct->rev_id & 0xC) >> 2;
return (rev == 0) ? 1 : rev;
}
static inline u16 OCTEP_VF_MINOR_REV(struct octep_vf_device *oct)
{
return (oct->rev_id & 0x3);
}
/* Octeon CSR read/write access APIs */
#define octep_vf_write_csr(octep_vf_dev, reg_off, value) \
writel(value, (octep_vf_dev)->mmio.hw_addr + (reg_off))
#define octep_vf_write_csr64(octep_vf_dev, reg_off, val64) \
writeq(val64, (octep_vf_dev)->mmio.hw_addr + (reg_off))
#define octep_vf_read_csr(octep_vf_dev, reg_off) \
readl((octep_vf_dev)->mmio.hw_addr + (reg_off))
#define octep_vf_read_csr64(octep_vf_dev, reg_off) \
readq((octep_vf_dev)->mmio.hw_addr + (reg_off))
extern struct workqueue_struct *octep_vf_wq;
int octep_vf_device_setup(struct octep_vf_device *oct);
int octep_vf_setup_iqs(struct octep_vf_device *oct);
void octep_vf_free_iqs(struct octep_vf_device *oct);
void octep_vf_clean_iqs(struct octep_vf_device *oct);
int octep_vf_setup_oqs(struct octep_vf_device *oct);
void octep_vf_free_oqs(struct octep_vf_device *oct);
void octep_vf_oq_dbell_init(struct octep_vf_device *oct);
void octep_vf_device_setup_cn93(struct octep_vf_device *oct);
void octep_vf_device_setup_cnxk(struct octep_vf_device *oct);
int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget);
int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget);
void octep_vf_set_ethtool_ops(struct net_device *netdev);
int octep_vf_get_link_info(struct octep_vf_device *oct);
int octep_vf_get_if_stats(struct octep_vf_device *oct);
void octep_vf_mbox_work(struct work_struct *work);
#endif /* _OCTEP_VF_MAIN_H_ */
// SPDX-License-Identifier: GPL-2.0
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
/* When a new command is implemented, the below table should be updated
* with new command and it's version info.
*/
static u32 pfvf_cmd_versions[OCTEP_PFVF_MBOX_CMD_MAX] = {
[0 ... OCTEP_PFVF_MBOX_CMD_DEV_REMOVE] = OCTEP_PFVF_MBOX_VERSION_V1,
[OCTEP_PFVF_MBOX_CMD_GET_FW_INFO ... OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS] =
OCTEP_PFVF_MBOX_VERSION_V2
};
int octep_vf_setup_mbox(struct octep_vf_device *oct)
{
int ring = 0;
oct->mbox = vzalloc(sizeof(*oct->mbox));
if (!oct->mbox)
return -1;
mutex_init(&oct->mbox->lock);
oct->hw_ops.setup_mbox_regs(oct, ring);
INIT_WORK(&oct->mbox->wk.work, octep_vf_mbox_work);
oct->mbox->wk.ctxptr = oct;
oct->mbox_neg_ver = OCTEP_PFVF_MBOX_VERSION_CURRENT;
dev_info(&oct->pdev->dev, "setup vf mbox successfully\n");
return 0;
}
void octep_vf_delete_mbox(struct octep_vf_device *oct)
{
if (oct->mbox) {
if (work_pending(&oct->mbox->wk.work))
cancel_work_sync(&oct->mbox->wk.work);
mutex_destroy(&oct->mbox->lock);
vfree(oct->mbox);
oct->mbox = NULL;
dev_info(&oct->pdev->dev, "Deleted vf mbox successfully\n");
}
}
int octep_vf_mbox_version_check(struct octep_vf_device *oct)
{
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int ret;
cmd.u64 = 0;
cmd.s_version.opcode = OCTEP_PFVF_MBOX_CMD_VERSION;
cmd.s_version.version = OCTEP_PFVF_MBOX_VERSION_CURRENT;
ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret == OCTEP_PFVF_MBOX_CMD_STATUS_NACK) {
dev_err(&oct->pdev->dev,
"VF Mbox version is incompatible with PF\n");
return -EINVAL;
}
oct->mbox_neg_ver = (u32)rsp.s_version.version;
dev_dbg(&oct->pdev->dev,
"VF Mbox version:%u Negotiated VF version with PF:%u\n",
(u32)cmd.s_version.version,
(u32)rsp.s_version.version);
return 0;
}
void octep_vf_mbox_work(struct work_struct *work)
{
struct octep_vf_mbox_wk *wk = container_of(work, struct octep_vf_mbox_wk, work);
struct octep_vf_iface_link_info *link_info;
struct octep_vf_device *oct = NULL;
struct octep_vf_mbox *mbox = NULL;
union octep_pfvf_mbox_word *notif;
u64 pf_vf_data;
oct = (struct octep_vf_device *)wk->ctxptr;
link_info = &oct->link_info;
mbox = oct->mbox;
pf_vf_data = readq(mbox->mbox_read_reg);
notif = (union octep_pfvf_mbox_word *)&pf_vf_data;
switch (notif->s.opcode) {
case OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS:
if (notif->s_link_status.status) {
link_info->oper_up = OCTEP_PFVF_LINK_STATUS_UP;
netif_carrier_on(oct->netdev);
dev_info(&oct->pdev->dev, "netif_carrier_on\n");
} else {
link_info->oper_up = OCTEP_PFVF_LINK_STATUS_DOWN;
netif_carrier_off(oct->netdev);
dev_info(&oct->pdev->dev, "netif_carrier_off\n");
}
break;
default:
dev_err(&oct->pdev->dev,
"Received unsupported notif %d\n", notif->s.opcode);
break;
}
}
static int __octep_vf_mbox_send_cmd(struct octep_vf_device *oct,
union octep_pfvf_mbox_word cmd,
union octep_pfvf_mbox_word *rsp)
{
struct octep_vf_mbox *mbox = oct->mbox;
u64 reg_val = 0ull;
int count = 0;
if (!mbox)
return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
cmd.s.type = OCTEP_PFVF_MBOX_TYPE_CMD;
writeq(cmd.u64, mbox->mbox_write_reg);
/* No response for notification messages */
if (!rsp)
return 0;
for (count = 0; count < OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT; count++) {
usleep_range(1000, 1500);
reg_val = readq(mbox->mbox_write_reg);
if (reg_val != cmd.u64) {
rsp->u64 = reg_val;
break;
}
}
if (count == OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT) {
dev_err(&oct->pdev->dev, "mbox send command timed out\n");
return OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT;
}
if (rsp->s.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
dev_err(&oct->pdev->dev, "mbox_send: Received NACK\n");
return OCTEP_PFVF_MBOX_CMD_STATUS_NACK;
}
rsp->u64 = reg_val;
return 0;
}
int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
union octep_pfvf_mbox_word *rsp)
{
struct octep_vf_mbox *mbox = oct->mbox;
int ret;
if (!mbox)
return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
mutex_lock(&mbox->lock);
if (pfvf_cmd_versions[cmd.s.opcode] > oct->mbox_neg_ver) {
dev_dbg(&oct->pdev->dev, "CMD:%d not supported in Version:%d\n",
cmd.s.opcode, oct->mbox_neg_ver);
mutex_unlock(&mbox->lock);
return -EOPNOTSUPP;
}
ret = __octep_vf_mbox_send_cmd(oct, cmd, rsp);
mutex_unlock(&mbox->lock);
return ret;
}
int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
u8 *data, int *size)
{
struct octep_vf_mbox *mbox = oct->mbox;
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int data_len = 0, tmp_len = 0;
int read_cnt, i = 0, ret;
if (!mbox)
return OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP;
mutex_lock(&mbox->lock);
cmd.u64 = 0;
cmd.s_data.opcode = opcode;
cmd.s_data.frag = 0;
/* Send cmd to read data from PF */
ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
mutex_unlock(&mbox->lock);
return ret;
}
/* PF sends the data length of requested CMD
* in ACK
*/
data_len = *((int32_t *)rsp.s_data.data);
tmp_len = data_len;
cmd.u64 = 0;
rsp.u64 = 0;
cmd.s_data.opcode = opcode;
cmd.s_data.frag = 1;
while (data_len) {
ret = __octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "send mbox cmd fail for data request\n");
mutex_unlock(&mbox->lock);
mbox->mbox_data.data_index = 0;
memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
return ret;
}
if (data_len > OCTEP_PFVF_MBOX_MAX_DATA_SIZE) {
data_len -= OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
read_cnt = OCTEP_PFVF_MBOX_MAX_DATA_SIZE;
} else {
read_cnt = data_len;
data_len = 0;
}
for (i = 0; i < read_cnt; i++) {
mbox->mbox_data.recv_data[mbox->mbox_data.data_index] =
rsp.s_data.data[i];
mbox->mbox_data.data_index++;
}
cmd.u64 = 0;
rsp.u64 = 0;
cmd.s_data.opcode = opcode;
cmd.s_data.frag = 1;
}
memcpy(data, mbox->mbox_data.recv_data, tmp_len);
*size = tmp_len;
mbox->mbox_data.data_index = 0;
memset(mbox->mbox_data.recv_data, 0, OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE);
mutex_unlock(&mbox->lock);
return 0;
}
int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu)
{
int frame_size = mtu + ETH_HLEN + ETH_FCS_LEN;
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int ret = 0;
if (mtu < ETH_MIN_MTU || frame_size > ETH_MAX_MTU) {
dev_err(&oct->pdev->dev,
"Failed to set MTU to %d MIN MTU:%d MAX MTU:%d\n",
mtu, ETH_MIN_MTU, ETH_MAX_MTU);
return -EINVAL;
}
cmd.u64 = 0;
cmd.s_set_mtu.opcode = OCTEP_PFVF_MBOX_CMD_SET_MTU;
cmd.s_set_mtu.mtu = mtu;
ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "Mbox send failed; err=%d\n", ret);
return ret;
}
if (rsp.s_set_mtu.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
dev_err(&oct->pdev->dev, "Received Mbox NACK from PF for MTU:%d\n", mtu);
return -EINVAL;
}
return 0;
}
int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr)
{
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int i, ret;
cmd.u64 = 0;
cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR;
for (i = 0; i < ETH_ALEN; i++)
cmd.s_set_mac.mac_addr[i] = mac_addr[i];
ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "Mbox send failed; err = %d\n", ret);
return ret;
}
if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
dev_err(&oct->pdev->dev, "received NACK\n");
return -EINVAL;
}
return 0;
}
int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr)
{
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int i, ret;
cmd.u64 = 0;
cmd.s_set_mac.opcode = OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR;
ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "get_mac: mbox send failed; err = %d\n", ret);
return ret;
}
if (rsp.s_set_mac.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
dev_err(&oct->pdev->dev, "get_mac: received NACK\n");
return -EINVAL;
}
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = rsp.s_set_mac.mac_addr[i];
return 0;
}
int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state)
{
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int ret;
cmd.u64 = 0;
cmd.s_link_state.opcode = OCTEP_PFVF_MBOX_CMD_SET_RX_STATE;
cmd.s_link_state.state = state;
ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "Set Rx state via VF Mbox send failed\n");
return ret;
}
if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
dev_err(&oct->pdev->dev, "Set Rx state received NACK\n");
return -EINVAL;
}
return 0;
}
int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status)
{
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int ret;
cmd.u64 = 0;
cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS;
cmd.s_link_status.status = status;
ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "Set link status via VF Mbox send failed\n");
return ret;
}
if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
dev_err(&oct->pdev->dev, "Set link status received NACK\n");
return -EINVAL;
}
return 0;
}
int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up)
{
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int ret;
cmd.u64 = 0;
cmd.s_link_status.opcode = OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS;
ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
return ret;
}
if (rsp.s_link_status.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
dev_err(&oct->pdev->dev, "Get link status received NACK\n");
return -EINVAL;
}
*oper_up = rsp.s_link_status.status;
return 0;
}
int octep_vf_mbox_dev_remove(struct octep_vf_device *oct)
{
union octep_pfvf_mbox_word cmd;
int ret;
cmd.u64 = 0;
cmd.s.opcode = OCTEP_PFVF_MBOX_CMD_DEV_REMOVE;
ret = octep_vf_mbox_send_cmd(oct, cmd, NULL);
return ret;
}
int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct)
{
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int ret;
cmd.u64 = 0;
cmd.s_fw_info.opcode = OCTEP_PFVF_MBOX_CMD_GET_FW_INFO;
ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "Get link status via VF Mbox send failed\n");
return ret;
}
if (rsp.s_fw_info.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
dev_err(&oct->pdev->dev, "Get link status received NACK\n");
return -EINVAL;
}
oct->fw_info.pkind = rsp.s_fw_info.pkind;
oct->fw_info.fsz = rsp.s_fw_info.fsz;
oct->fw_info.rx_ol_flags = rsp.s_fw_info.rx_ol_flags;
oct->fw_info.tx_ol_flags = rsp.s_fw_info.tx_ol_flags;
return 0;
}
int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads,
u16 rx_offloads)
{
union octep_pfvf_mbox_word cmd;
union octep_pfvf_mbox_word rsp;
int ret;
cmd.u64 = 0;
cmd.s_offloads.opcode = OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS;
cmd.s_offloads.rx_ol_flags = rx_offloads;
cmd.s_offloads.tx_ol_flags = tx_offloads;
ret = octep_vf_mbox_send_cmd(oct, cmd, &rsp);
if (ret) {
dev_err(&oct->pdev->dev, "Set offloads via VF Mbox send failed\n");
return ret;
}
if (rsp.s_link_state.type != OCTEP_PFVF_MBOX_TYPE_RSP_ACK) {
dev_err(&oct->pdev->dev, "Set offloads received NACK\n");
return -EINVAL;
}
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell Octeon EP (EndPoint) Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#ifndef _OCTEP_VF_MBOX_H_
#define _OCTEP_VF_MBOX_H_
/* When a new command is implemented, VF Mbox version should be bumped.
*/
enum octep_pfvf_mbox_version {
OCTEP_PFVF_MBOX_VERSION_V0,
OCTEP_PFVF_MBOX_VERSION_V1,
OCTEP_PFVF_MBOX_VERSION_V2
};
#define OCTEP_PFVF_MBOX_VERSION_CURRENT OCTEP_PFVF_MBOX_VERSION_V2
enum octep_pfvf_mbox_opcode {
OCTEP_PFVF_MBOX_CMD_VERSION,
OCTEP_PFVF_MBOX_CMD_SET_MTU,
OCTEP_PFVF_MBOX_CMD_SET_MAC_ADDR,
OCTEP_PFVF_MBOX_CMD_GET_MAC_ADDR,
OCTEP_PFVF_MBOX_CMD_GET_LINK_INFO,
OCTEP_PFVF_MBOX_CMD_GET_STATS,
OCTEP_PFVF_MBOX_CMD_SET_RX_STATE,
OCTEP_PFVF_MBOX_CMD_SET_LINK_STATUS,
OCTEP_PFVF_MBOX_CMD_GET_LINK_STATUS,
OCTEP_PFVF_MBOX_CMD_GET_MTU,
OCTEP_PFVF_MBOX_CMD_DEV_REMOVE,
OCTEP_PFVF_MBOX_CMD_GET_FW_INFO,
OCTEP_PFVF_MBOX_CMD_SET_OFFLOADS,
OCTEP_PFVF_MBOX_NOTIF_LINK_STATUS,
OCTEP_PFVF_MBOX_CMD_MAX,
};
enum octep_pfvf_mbox_word_type {
OCTEP_PFVF_MBOX_TYPE_CMD,
OCTEP_PFVF_MBOX_TYPE_RSP_ACK,
OCTEP_PFVF_MBOX_TYPE_RSP_NACK,
};
enum octep_pfvf_mbox_cmd_status {
OCTEP_PFVF_MBOX_CMD_STATUS_NOT_SETUP = 1,
OCTEP_PFVF_MBOX_CMD_STATUS_TIMEDOUT = 2,
OCTEP_PFVF_MBOX_CMD_STATUS_NACK = 3,
OCTEP_PFVF_MBOX_CMD_STATUS_BUSY = 4,
OCTEP_PFVF_MBOX_CMD_STATUS_ERR = 5
};
enum octep_pfvf_link_status {
OCTEP_PFVF_LINK_STATUS_DOWN,
OCTEP_PFVF_LINK_STATUS_UP,
};
enum octep_pfvf_link_speed {
OCTEP_PFVF_LINK_SPEED_NONE,
OCTEP_PFVF_LINK_SPEED_1000,
OCTEP_PFVF_LINK_SPEED_10000,
OCTEP_PFVF_LINK_SPEED_25000,
OCTEP_PFVF_LINK_SPEED_40000,
OCTEP_PFVF_LINK_SPEED_50000,
OCTEP_PFVF_LINK_SPEED_100000,
OCTEP_PFVF_LINK_SPEED_LAST,
};
enum octep_pfvf_link_duplex {
OCTEP_PFVF_LINK_HALF_DUPLEX,
OCTEP_PFVF_LINK_FULL_DUPLEX,
};
enum octep_pfvf_link_autoneg {
OCTEP_PFVF_LINK_AUTONEG,
OCTEP_PFVF_LINK_FIXED,
};
#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_COUNT 8000
#define OCTEP_PFVF_MBOX_TIMEOUT_WAIT_UDELAY 1000
#define OCTEP_PFVF_MBOX_MAX_RETRIES 2
#define OCTEP_PFVF_MBOX_VERSION 0
#define OCTEP_PFVF_MBOX_MAX_DATA_SIZE 6
#define OCTEP_PFVF_MBOX_MAX_DATA_BUF_SIZE 320
#define OCTEP_PFVF_MBOX_MORE_FRAG_FLAG 1
union octep_pfvf_mbox_word {
u64 u64;
struct {
u64 opcode:8;
u64 type:2;
u64 rsvd:6;
u64 data:48;
} s;
struct {
u64 opcode:8;
u64 type:2;
u64 frag:1;
u64 rsvd:5;
u8 data[6];
} s_data;
struct {
u64 opcode:8;
u64 type:2;
u64 rsvd:6;
u64 version:48;
} s_version;
struct {
u64 opcode:8;
u64 type:2;
u64 rsvd:6;
u8 mac_addr[6];
} s_set_mac;
struct {
u64 opcode:8;
u64 type:2;
u64 rsvd:6;
u64 mtu:48;
} s_set_mtu;
struct {
u64 opcode:8;
u64 type:2;
u64 state:1;
u64 rsvd:53;
} s_link_state;
struct {
u64 opcode:8;
u64 type:2;
u64 status:1;
u64 rsvd:53;
} s_link_status;
struct {
u64 opcode:8;
u64 type:2;
u64 pkind:8;
u64 fsz:8;
u64 rx_ol_flags:16;
u64 tx_ol_flags:16;
u64 rsvd:6;
} s_fw_info;
struct {
u64 opcode:8;
u64 type:2;
u64 rsvd:22;
u64 rx_ol_flags:16;
u64 tx_ol_flags:16;
} s_offloads;
} __packed;
int octep_vf_setup_mbox(struct octep_vf_device *oct);
void octep_vf_delete_mbox(struct octep_vf_device *oct);
int octep_vf_mbox_send_cmd(struct octep_vf_device *oct, union octep_pfvf_mbox_word cmd,
union octep_pfvf_mbox_word *rsp);
int octep_vf_mbox_bulk_read(struct octep_vf_device *oct, enum octep_pfvf_mbox_opcode opcode,
u8 *data, int *size);
int octep_vf_mbox_set_mtu(struct octep_vf_device *oct, int mtu);
int octep_vf_mbox_set_mac_addr(struct octep_vf_device *oct, char *mac_addr);
int octep_vf_mbox_get_mac_addr(struct octep_vf_device *oct, char *mac_addr);
int octep_vf_mbox_version_check(struct octep_vf_device *oct);
int octep_vf_mbox_set_rx_state(struct octep_vf_device *oct, bool state);
int octep_vf_mbox_set_link_status(struct octep_vf_device *oct, bool status);
int octep_vf_mbox_get_link_status(struct octep_vf_device *oct, u8 *oper_up);
int octep_vf_mbox_dev_remove(struct octep_vf_device *oct);
int octep_vf_mbox_get_fw_info(struct octep_vf_device *oct);
int octep_vf_mbox_set_offloads(struct octep_vf_device *oct, u16 tx_offloads, u16 rx_offloads);
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#ifndef _OCTEP_VF_REGS_CN9K_H_
#define _OCTEP_VF_REGS_CN9K_H_
/*############################ RST #########################*/
#define CN93_VF_CONFIG_XPANSION_BAR 0x38
#define CN93_VF_CONFIG_PCIE_CAP 0x70
#define CN93_VF_CONFIG_PCIE_DEVCAP 0x74
#define CN93_VF_CONFIG_PCIE_DEVCTL 0x78
#define CN93_VF_CONFIG_PCIE_LINKCAP 0x7C
#define CN93_VF_CONFIG_PCIE_LINKCTL 0x80
#define CN93_VF_CONFIG_PCIE_SLOTCAP 0x84
#define CN93_VF_CONFIG_PCIE_SLOTCTL 0x88
#define CN93_VF_RING_OFFSET BIT_ULL(17)
/*###################### RING IN REGISTERS #########################*/
#define CN93_VF_SDP_R_IN_CONTROL_START 0x10000
#define CN93_VF_SDP_R_IN_ENABLE_START 0x10010
#define CN93_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
#define CN93_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
#define CN93_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
#define CN93_VF_SDP_R_IN_CNTS_START 0x10050
#define CN93_VF_SDP_R_IN_INT_LEVELS_START 0x10060
#define CN93_VF_SDP_R_IN_PKT_CNT_START 0x10080
#define CN93_VF_SDP_R_IN_BYTE_CNT_START 0x10090
#define CN93_VF_SDP_R_IN_CONTROL(ring) \
(CN93_VF_SDP_R_IN_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_IN_ENABLE(ring) \
(CN93_VF_SDP_R_IN_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_IN_INSTR_BADDR(ring) \
(CN93_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_IN_INSTR_RSIZE(ring) \
(CN93_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_IN_INSTR_DBELL(ring) \
(CN93_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_IN_CNTS(ring) \
(CN93_VF_SDP_R_IN_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_IN_INT_LEVELS(ring) \
(CN93_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_IN_PKT_CNT(ring) \
(CN93_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_IN_BYTE_CNT(ring) \
(CN93_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
/*------------------ R_IN Masks ----------------*/
/** Rings per Virtual Function **/
#define CN93_VF_R_IN_CTL_RPVF_MASK (0xF)
#define CN93_VF_R_IN_CTL_RPVF_POS (48)
/* Number of instructions to be read in one MAC read request.
* setting to Max value(4)
**/
#define CN93_VF_R_IN_CTL_IDLE BIT_ULL(28)
#define CN93_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
#define CN93_VF_R_IN_CTL_IS_64B BIT_ULL(24)
#define CN93_VF_R_IN_CTL_D_NSR BIT_ULL(8)
#define CN93_VF_R_IN_CTL_D_ESR BIT_ULL(6)
#define CN93_VF_R_IN_CTL_D_ROR BIT_ULL(5)
#define CN93_VF_R_IN_CTL_NSR BIT_ULL(3)
#define CN93_VF_R_IN_CTL_ESR BIT_ULL(1)
#define CN93_VF_R_IN_CTL_ROR BIT_ULL(0)
#define CN93_VF_R_IN_CTL_MASK (CN93_VF_R_IN_CTL_RDSIZE | CN93_VF_R_IN_CTL_IS_64B)
/*###################### RING OUT REGISTERS #########################*/
#define CN93_VF_SDP_R_OUT_CNTS_START 0x10100
#define CN93_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
#define CN93_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
#define CN93_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
#define CN93_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
#define CN93_VF_SDP_R_OUT_CONTROL_START 0x10150
#define CN93_VF_SDP_R_OUT_ENABLE_START 0x10160
#define CN93_VF_SDP_R_OUT_PKT_CNT_START 0x10180
#define CN93_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
#define CN93_VF_SDP_R_OUT_CONTROL(ring) \
(CN93_VF_SDP_R_OUT_CONTROL_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_OUT_ENABLE(ring) \
(CN93_VF_SDP_R_OUT_ENABLE_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_OUT_SLIST_BADDR(ring) \
(CN93_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
(CN93_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_OUT_SLIST_DBELL(ring) \
(CN93_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_OUT_CNTS(ring) \
(CN93_VF_SDP_R_OUT_CNTS_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_OUT_INT_LEVELS(ring) \
(CN93_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_OUT_PKT_CNT(ring) \
(CN93_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_OUT_BYTE_CNT(ring) \
(CN93_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_VF_RING_OFFSET))
/*------------------ R_OUT Masks ----------------*/
#define CN93_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
#define CN93_VF_R_OUT_INT_LEVELS_TIMET (32)
#define CN93_VF_R_OUT_CTL_IDLE BIT_ULL(40)
#define CN93_VF_R_OUT_CTL_ES_I BIT_ULL(34)
#define CN93_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
#define CN93_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
#define CN93_VF_R_OUT_CTL_ES_D BIT_ULL(30)
#define CN93_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
#define CN93_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
#define CN93_VF_R_OUT_CTL_ES_P BIT_ULL(26)
#define CN93_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
#define CN93_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
#define CN93_VF_R_OUT_CTL_IMODE BIT_ULL(23)
/* ##################### Mail Box Registers ########################## */
/* SDP PF to VF Mailbox Data Register */
#define CN93_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
/* SDP Packet PF to VF Mailbox Interrupt Register */
#define CN93_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
/* SDP VF to PF Mailbox Data Register */
#define CN93_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
#define CN93_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
#define CN93_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
#define CN93_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
(CN93_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_MBOX_PF_VF_INT(ring) \
(CN93_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_VF_RING_OFFSET))
#define CN93_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
(CN93_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_VF_RING_OFFSET))
#endif /* _OCTEP_VF_REGS_CN9K_H_ */
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#ifndef _OCTEP_VF_REGS_CNXK_H_
#define _OCTEP_VF_REGS_CNXK_H_
/*############################ RST #########################*/
#define CNXK_VF_CONFIG_XPANSION_BAR 0x38
#define CNXK_VF_CONFIG_PCIE_CAP 0x70
#define CNXK_VF_CONFIG_PCIE_DEVCAP 0x74
#define CNXK_VF_CONFIG_PCIE_DEVCTL 0x78
#define CNXK_VF_CONFIG_PCIE_LINKCAP 0x7C
#define CNXK_VF_CONFIG_PCIE_LINKCTL 0x80
#define CNXK_VF_CONFIG_PCIE_SLOTCAP 0x84
#define CNXK_VF_CONFIG_PCIE_SLOTCTL 0x88
#define CNXK_VF_RING_OFFSET (0x1ULL << 17)
/*###################### RING IN REGISTERS #########################*/
#define CNXK_VF_SDP_R_IN_CONTROL_START 0x10000
#define CNXK_VF_SDP_R_IN_ENABLE_START 0x10010
#define CNXK_VF_SDP_R_IN_INSTR_BADDR_START 0x10020
#define CNXK_VF_SDP_R_IN_INSTR_RSIZE_START 0x10030
#define CNXK_VF_SDP_R_IN_INSTR_DBELL_START 0x10040
#define CNXK_VF_SDP_R_IN_CNTS_START 0x10050
#define CNXK_VF_SDP_R_IN_INT_LEVELS_START 0x10060
#define CNXK_VF_SDP_R_IN_PKT_CNT_START 0x10080
#define CNXK_VF_SDP_R_IN_BYTE_CNT_START 0x10090
#define CNXK_VF_SDP_R_ERR_TYPE_START 0x10400
#define CNXK_VF_SDP_R_ERR_TYPE(ring) \
(CNXK_VF_SDP_R_ERR_TYPE_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_IN_CONTROL(ring) \
(CNXK_VF_SDP_R_IN_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_IN_ENABLE(ring) \
(CNXK_VF_SDP_R_IN_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_IN_INSTR_BADDR(ring) \
(CNXK_VF_SDP_R_IN_INSTR_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_IN_INSTR_RSIZE(ring) \
(CNXK_VF_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_IN_INSTR_DBELL(ring) \
(CNXK_VF_SDP_R_IN_INSTR_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_IN_CNTS(ring) \
(CNXK_VF_SDP_R_IN_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_IN_INT_LEVELS(ring) \
(CNXK_VF_SDP_R_IN_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_IN_PKT_CNT(ring) \
(CNXK_VF_SDP_R_IN_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_IN_BYTE_CNT(ring) \
(CNXK_VF_SDP_R_IN_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
/*------------------ R_IN Masks ----------------*/
/** Rings per Virtual Function **/
#define CNXK_VF_R_IN_CTL_RPVF_MASK (0xF)
#define CNXK_VF_R_IN_CTL_RPVF_POS (48)
/* Number of instructions to be read in one MAC read request.
* setting to Max value(4)
**/
#define CNXK_VF_R_IN_CTL_IDLE (0x1ULL << 28)
#define CNXK_VF_R_IN_CTL_RDSIZE (0x3ULL << 25)
#define CNXK_VF_R_IN_CTL_IS_64B (0x1ULL << 24)
#define CNXK_VF_R_IN_CTL_D_NSR (0x1ULL << 8)
#define CNXK_VF_R_IN_CTL_D_ESR (0x1ULL << 6)
#define CNXK_VF_R_IN_CTL_D_ROR (0x1ULL << 5)
#define CNXK_VF_R_IN_CTL_NSR (0x1ULL << 3)
#define CNXK_VF_R_IN_CTL_ESR (0x1ULL << 1)
#define CNXK_VF_R_IN_CTL_ROR (0x1ULL << 0)
#define CNXK_VF_R_IN_CTL_MASK (CNXK_VF_R_IN_CTL_RDSIZE | CNXK_VF_R_IN_CTL_IS_64B)
/*###################### RING OUT REGISTERS #########################*/
#define CNXK_VF_SDP_R_OUT_CNTS_START 0x10100
#define CNXK_VF_SDP_R_OUT_INT_LEVELS_START 0x10110
#define CNXK_VF_SDP_R_OUT_SLIST_BADDR_START 0x10120
#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START 0x10130
#define CNXK_VF_SDP_R_OUT_SLIST_DBELL_START 0x10140
#define CNXK_VF_SDP_R_OUT_CONTROL_START 0x10150
#define CNXK_VF_SDP_R_OUT_WMARK_START 0x10160
#define CNXK_VF_SDP_R_OUT_ENABLE_START 0x10170
#define CNXK_VF_SDP_R_OUT_PKT_CNT_START 0x10180
#define CNXK_VF_SDP_R_OUT_BYTE_CNT_START 0x10190
#define CNXK_VF_SDP_R_OUT_CONTROL(ring) \
(CNXK_VF_SDP_R_OUT_CONTROL_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_OUT_ENABLE(ring) \
(CNXK_VF_SDP_R_OUT_ENABLE_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_OUT_SLIST_BADDR(ring) \
(CNXK_VF_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_OUT_SLIST_RSIZE(ring) \
(CNXK_VF_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_OUT_SLIST_DBELL(ring) \
(CNXK_VF_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_OUT_WMARK(ring) \
(CNXK_VF_SDP_R_OUT_WMARK_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_OUT_CNTS(ring) \
(CNXK_VF_SDP_R_OUT_CNTS_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_OUT_INT_LEVELS(ring) \
(CNXK_VF_SDP_R_OUT_INT_LEVELS_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_OUT_PKT_CNT(ring) \
(CNXK_VF_SDP_R_OUT_PKT_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_OUT_BYTE_CNT(ring) \
(CNXK_VF_SDP_R_OUT_BYTE_CNT_START + ((ring) * CNXK_VF_RING_OFFSET))
/*------------------ R_OUT Masks ----------------*/
#define CNXK_VF_R_OUT_INT_LEVELS_BMODE BIT_ULL(63)
#define CNXK_VF_R_OUT_INT_LEVELS_TIMET (32)
#define CNXK_VF_R_OUT_CTL_IDLE BIT_ULL(40)
#define CNXK_VF_R_OUT_CTL_ES_I BIT_ULL(34)
#define CNXK_VF_R_OUT_CTL_NSR_I BIT_ULL(33)
#define CNXK_VF_R_OUT_CTL_ROR_I BIT_ULL(32)
#define CNXK_VF_R_OUT_CTL_ES_D BIT_ULL(30)
#define CNXK_VF_R_OUT_CTL_NSR_D BIT_ULL(29)
#define CNXK_VF_R_OUT_CTL_ROR_D BIT_ULL(28)
#define CNXK_VF_R_OUT_CTL_ES_P BIT_ULL(26)
#define CNXK_VF_R_OUT_CTL_NSR_P BIT_ULL(25)
#define CNXK_VF_R_OUT_CTL_ROR_P BIT_ULL(24)
#define CNXK_VF_R_OUT_CTL_IMODE BIT_ULL(23)
/* ##################### Mail Box Registers ########################## */
/* SDP PF to VF Mailbox Data Register */
#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START 0x10210
/* SDP Packet PF to VF Mailbox Interrupt Register */
#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_START 0x10220
/* SDP VF to PF Mailbox Data Register */
#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START 0x10230
#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB BIT_ULL(1)
#define CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS BIT_ULL(0)
#define CNXK_VF_SDP_R_MBOX_PF_VF_DATA(ring) \
(CNXK_VF_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_MBOX_PF_VF_INT(ring) \
(CNXK_VF_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CNXK_VF_RING_OFFSET))
#define CNXK_VF_SDP_R_MBOX_VF_PF_DATA(ring) \
(CNXK_VF_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CNXK_VF_RING_OFFSET))
#endif /* _OCTEP_VF_REGS_CNXK_H_ */
// SPDX-License-Identifier: GPL-2.0
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq)
{
oq->host_read_idx = 0;
oq->host_refill_idx = 0;
oq->refill_count = 0;
oq->last_pkt_count = 0;
oq->pkts_pending = 0;
}
/**
* octep_vf_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring.
*
* @oq: Octeon Rx queue data structure.
*
* Return: 0, if successfully filled receive buffers for all descriptors.
* -1, if failed to allocate a buffer or failed to map for DMA.
*/
static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq)
{
struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
struct page *page;
u32 i;
for (i = 0; i < oq->max_count; i++) {
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "Rx buffer alloc failed\n");
goto rx_buf_alloc_err;
}
desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0,
PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) {
dev_err(oq->dev,
"OQ-%d buffer alloc: DMA mapping error!\n",
oq->q_no);
put_page(page);
goto dma_map_err;
}
oq->buff_info[i].page = page;
}
return 0;
dma_map_err:
rx_buf_alloc_err:
while (i) {
i--;
dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE);
put_page(oq->buff_info[i].page);
oq->buff_info[i].page = NULL;
}
return -1;
}
/**
* octep_vf_oq_refill() - refill buffers for used Rx ring descriptors.
*
* @oct: Octeon device private data structure.
* @oq: Octeon Rx queue data structure.
*
* Return: number of descriptors successfully refilled with receive buffers.
*/
static int octep_vf_oq_refill(struct octep_vf_device *oct, struct octep_vf_oq *oq)
{
struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
struct page *page;
u32 refill_idx, i;
refill_idx = oq->host_refill_idx;
for (i = 0; i < oq->refill_count; i++) {
page = dev_alloc_page();
if (unlikely(!page)) {
dev_err(oq->dev, "refill: rx buffer alloc failed\n");
oq->stats.alloc_failures++;
break;
}
desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) {
dev_err(oq->dev,
"OQ-%d buffer refill: DMA mapping error!\n",
oq->q_no);
put_page(page);
oq->stats.alloc_failures++;
break;
}
oq->buff_info[refill_idx].page = page;
refill_idx++;
if (refill_idx == oq->max_count)
refill_idx = 0;
}
oq->host_refill_idx = refill_idx;
oq->refill_count -= i;
return i;
}
/**
* octep_vf_setup_oq() - Setup a Rx queue.
*
* @oct: Octeon device private data structure.
* @q_no: Rx queue number to be setup.
*
* Allocate resources for a Rx queue.
*/
static int octep_vf_setup_oq(struct octep_vf_device *oct, int q_no)
{
struct octep_vf_oq *oq;
u32 desc_ring_size;
oq = vzalloc(sizeof(*oq));
if (!oq)
goto create_oq_fail;
oct->oq[q_no] = oq;
oq->octep_vf_dev = oct;
oq->netdev = oct->netdev;
oq->dev = &oct->pdev->dev;
oq->q_no = q_no;
oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf);
oq->ring_size_mask = oq->max_count - 1;
oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf);
oq->max_single_buffer_size = oq->buffer_size - OCTEP_VF_OQ_RESP_HW_SIZE;
/* When the hardware/firmware supports additional capabilities,
* additional header is filled-in by Octeon after length field in
* Rx packets. this header contains additional packet information.
*/
if (oct->fw_info.rx_ol_flags)
oq->max_single_buffer_size -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf);
desc_ring_size = oq->max_count * OCTEP_VF_OQ_DESC_SIZE;
oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size,
&oq->desc_ring_dma, GFP_KERNEL);
if (unlikely(!oq->desc_ring)) {
dev_err(oq->dev,
"Failed to allocate DMA memory for OQ-%d !!\n", q_no);
goto desc_dma_alloc_err;
}
oq->buff_info = vzalloc(oq->max_count * OCTEP_VF_OQ_RECVBUF_SIZE);
if (unlikely(!oq->buff_info)) {
dev_err(&oct->pdev->dev,
"Failed to allocate buffer info for OQ-%d\n", q_no);
goto buf_list_err;
}
if (octep_vf_oq_fill_ring_buffers(oq))
goto oq_fill_buff_err;
octep_vf_oq_reset_indices(oq);
oct->hw_ops.setup_oq_regs(oct, q_no);
oct->num_oqs++;
return 0;
oq_fill_buff_err:
vfree(oq->buff_info);
oq->buff_info = NULL;
buf_list_err:
dma_free_coherent(oq->dev, desc_ring_size,
oq->desc_ring, oq->desc_ring_dma);
oq->desc_ring = NULL;
desc_dma_alloc_err:
vfree(oq);
oct->oq[q_no] = NULL;
create_oq_fail:
return -1;
}
/**
* octep_vf_oq_free_ring_buffers() - Free ring buffers.
*
* @oq: Octeon Rx queue data structure.
*
* Free receive buffers in unused Rx queue descriptors.
*/
static void octep_vf_oq_free_ring_buffers(struct octep_vf_oq *oq)
{
struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring;
int i;
if (!oq->desc_ring || !oq->buff_info)
return;
for (i = 0; i < oq->max_count; i++) {
if (oq->buff_info[i].page) {
dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr,
PAGE_SIZE, DMA_FROM_DEVICE);
put_page(oq->buff_info[i].page);
oq->buff_info[i].page = NULL;
desc_ring[i].buffer_ptr = 0;
}
}
octep_vf_oq_reset_indices(oq);
}
/**
* octep_vf_free_oq() - Free Rx queue resources.
*
* @oq: Octeon Rx queue data structure.
*
* Free all resources of a Rx queue.
*/
static int octep_vf_free_oq(struct octep_vf_oq *oq)
{
struct octep_vf_device *oct = oq->octep_vf_dev;
int q_no = oq->q_no;
octep_vf_oq_free_ring_buffers(oq);
if (oq->buff_info)
vfree(oq->buff_info);
if (oq->desc_ring)
dma_free_coherent(oq->dev,
oq->max_count * OCTEP_VF_OQ_DESC_SIZE,
oq->desc_ring, oq->desc_ring_dma);
vfree(oq);
oct->oq[q_no] = NULL;
oct->num_oqs--;
return 0;
}
/**
* octep_vf_setup_oqs() - setup resources for all Rx queues.
*
* @oct: Octeon device private data structure.
*/
int octep_vf_setup_oqs(struct octep_vf_device *oct)
{
int i, retval = 0;
oct->num_oqs = 0;
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
retval = octep_vf_setup_oq(oct, i);
if (retval) {
dev_err(&oct->pdev->dev,
"Failed to setup OQ(RxQ)-%d.\n", i);
goto oq_setup_err;
}
dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i);
}
return 0;
oq_setup_err:
while (i) {
i--;
octep_vf_free_oq(oct->oq[i]);
}
return -1;
}
/**
* octep_vf_oq_dbell_init() - Initialize Rx queue doorbell.
*
* @oct: Octeon device private data structure.
*
* Write number of descriptors to Rx queue doorbell register.
*/
void octep_vf_oq_dbell_init(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_oqs; i++)
writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
}
/**
* octep_vf_free_oqs() - Free resources of all Rx queues.
*
* @oct: Octeon device private data structure.
*/
void octep_vf_free_oqs(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
if (!oct->oq[i])
continue;
octep_vf_free_oq(oct->oq[i]);
dev_dbg(&oct->pdev->dev,
"Successfully freed OQ(RxQ)-%d.\n", i);
}
}
/**
* octep_vf_oq_check_hw_for_pkts() - Check for new Rx packets.
*
* @oct: Octeon device private data structure.
* @oq: Octeon Rx queue data structure.
*
* Return: packets received after previous check.
*/
static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
struct octep_vf_oq *oq)
{
u32 pkt_count, new_pkts;
pkt_count = readl(oq->pkts_sent_reg);
new_pkts = pkt_count - oq->last_pkt_count;
/* Clear the hardware packets counter register if the rx queue is
* being processed continuously with-in a single interrupt and
* reached half its max value.
* this counter is not cleared every time read, to save write cycles.
*/
if (unlikely(pkt_count > 0xF0000000U)) {
writel(pkt_count, oq->pkts_sent_reg);
pkt_count = readl(oq->pkts_sent_reg);
new_pkts += pkt_count;
}
oq->last_pkt_count = pkt_count;
oq->pkts_pending += new_pkts;
return new_pkts;
}
/**
* __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack.
*
* @oct: Octeon device private data structure.
* @oq: Octeon Rx queue data structure.
* @pkts_to_process: number of packets to be processed.
*
* Process the new packets in Rx queue.
* Packets larger than single Rx buffer arrive in consecutive descriptors.
* But, count returned by the API only accounts full packets, not fragments.
*
* Return: number of packets processed and pushed to stack.
*/
static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
struct octep_vf_oq *oq, u16 pkts_to_process)
{
struct octep_vf_oq_resp_hw_ext *resp_hw_ext = NULL;
netdev_features_t feat = oq->netdev->features;
struct octep_vf_rx_buffer *buff_info;
struct octep_vf_oq_resp_hw *resp_hw;
u32 pkt, rx_bytes, desc_used;
u16 data_offset, rx_ol_flags;
struct sk_buff *skb;
u32 read_idx;
read_idx = oq->host_read_idx;
rx_bytes = 0;
desc_used = 0;
for (pkt = 0; pkt < pkts_to_process; pkt++) {
buff_info = (struct octep_vf_rx_buffer *)&oq->buff_info[read_idx];
dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
PAGE_SIZE, DMA_FROM_DEVICE);
resp_hw = page_address(buff_info->page);
buff_info->page = NULL;
/* Swap the length field that is in Big-Endian to CPU */
buff_info->len = be64_to_cpu(resp_hw->length);
if (oct->fw_info.rx_ol_flags) {
/* Extended response header is immediately after
* response header (resp_hw)
*/
resp_hw_ext = (struct octep_vf_oq_resp_hw_ext *)
(resp_hw + 1);
buff_info->len -= OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
/* Packet Data is immediately after
* extended response header.
*/
data_offset = OCTEP_VF_OQ_RESP_HW_SIZE +
OCTEP_VF_OQ_RESP_HW_EXT_SIZE;
rx_ol_flags = resp_hw_ext->rx_ol_flags;
} else {
/* Data is immediately after
* Hardware Rx response header.
*/
data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
rx_ol_flags = 0;
}
rx_bytes += buff_info->len;
if (buff_info->len <= oq->max_single_buffer_size) {
skb = build_skb((void *)resp_hw, PAGE_SIZE);
skb_reserve(skb, data_offset);
skb_put(skb, buff_info->len);
read_idx++;
desc_used++;
if (read_idx == oq->max_count)
read_idx = 0;
} else {
struct skb_shared_info *shinfo;
u16 data_len;
skb = build_skb((void *)resp_hw, PAGE_SIZE);
skb_reserve(skb, data_offset);
/* Head fragment includes response header(s);
* subsequent fragments contains only data.
*/
skb_put(skb, oq->max_single_buffer_size);
read_idx++;
desc_used++;
if (read_idx == oq->max_count)
read_idx = 0;
shinfo = skb_shinfo(skb);
data_len = buff_info->len - oq->max_single_buffer_size;
while (data_len) {
dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
PAGE_SIZE, DMA_FROM_DEVICE);
buff_info = (struct octep_vf_rx_buffer *)
&oq->buff_info[read_idx];
if (data_len < oq->buffer_size) {
buff_info->len = data_len;
data_len = 0;
} else {
buff_info->len = oq->buffer_size;
data_len -= oq->buffer_size;
}
skb_add_rx_frag(skb, shinfo->nr_frags,
buff_info->page, 0,
buff_info->len,
buff_info->len);
buff_info->page = NULL;
read_idx++;
desc_used++;
if (read_idx == oq->max_count)
read_idx = 0;
}
}
skb->dev = oq->netdev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (feat & NETIF_F_RXCSUM &&
OCTEP_VF_RX_CSUM_VERIFIED(rx_ol_flags))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
napi_gro_receive(oq->napi, skb);
}
oq->host_read_idx = read_idx;
oq->refill_count += desc_used;
oq->stats.packets += pkt;
oq->stats.bytes += rx_bytes;
return pkt;
}
/**
* octep_vf_oq_process_rx() - Process Rx queue.
*
* @oq: Octeon Rx queue data structure.
* @budget: max number of packets can be processed in one invocation.
*
* Check for newly received packets and process them.
* Keeps checking for new packets until budget is used or no new packets seen.
*
* Return: number of packets processed.
*/
int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget)
{
u32 pkts_available, pkts_processed, total_pkts_processed;
struct octep_vf_device *oct = oq->octep_vf_dev;
pkts_available = 0;
pkts_processed = 0;
total_pkts_processed = 0;
while (total_pkts_processed < budget) {
/* update pending count only when current one exhausted */
if (oq->pkts_pending == 0)
octep_vf_oq_check_hw_for_pkts(oct, oq);
pkts_available = min(budget - total_pkts_processed,
oq->pkts_pending);
if (!pkts_available)
break;
pkts_processed = __octep_vf_oq_process_rx(oct, oq,
pkts_available);
oq->pkts_pending -= pkts_processed;
total_pkts_processed += pkts_processed;
}
if (oq->refill_count >= oq->refill_threshold) {
u32 desc_refilled = octep_vf_oq_refill(oct, oq);
/* flush pending writes before updating credits */
smp_wmb();
writel(desc_refilled, oq->pkts_credit_reg);
}
return total_pkts_processed;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#ifndef _OCTEP_VF_RX_H_
#define _OCTEP_VF_RX_H_
/* struct octep_vf_oq_desc_hw - Octeon Hardware OQ descriptor format.
*
* The descriptor ring is made of descriptors which have 2 64-bit values:
*
* @buffer_ptr: DMA address of the skb->data
* @info_ptr: DMA address of host memory, used to update pkt count by hw.
* This is currently unused to save pci writes.
*/
struct octep_vf_oq_desc_hw {
dma_addr_t buffer_ptr;
u64 info_ptr;
};
static_assert(sizeof(struct octep_vf_oq_desc_hw) == 16);
#define OCTEP_VF_OQ_DESC_SIZE (sizeof(struct octep_vf_oq_desc_hw))
/* Rx offload flags */
#define OCTEP_VF_RX_OFFLOAD_VLAN_STRIP BIT(0)
#define OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM BIT(1)
#define OCTEP_VF_RX_OFFLOAD_UDP_CKSUM BIT(2)
#define OCTEP_VF_RX_OFFLOAD_TCP_CKSUM BIT(3)
#define OCTEP_VF_RX_OFFLOAD_CKSUM (OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
OCTEP_VF_RX_OFFLOAD_UDP_CKSUM | \
OCTEP_VF_RX_OFFLOAD_TCP_CKSUM)
#define OCTEP_VF_RX_IP_CSUM(flags) ((flags) & \
(OCTEP_VF_RX_OFFLOAD_IPV4_CKSUM | \
OCTEP_VF_RX_OFFLOAD_TCP_CKSUM | \
OCTEP_VF_RX_OFFLOAD_UDP_CKSUM))
/* bit 0 is vlan strip */
#define OCTEP_VF_RX_CSUM_IP_VERIFIED BIT(1)
#define OCTEP_VF_RX_CSUM_L4_VERIFIED BIT(2)
#define OCTEP_VF_RX_CSUM_VERIFIED(flags) ((flags) & \
(OCTEP_VF_RX_CSUM_L4_VERIFIED | \
OCTEP_VF_RX_CSUM_IP_VERIFIED))
/* Extended Response Header in packet data received from Hardware.
* Includes metadata like checksum status.
* this is valid only if hardware/firmware published support for this.
* This is at offset 0 of packet data (skb->data).
*/
struct octep_vf_oq_resp_hw_ext {
/* Reserved. */
u64 rsvd:48;
/* rx offload flags */
u16 rx_ol_flags;
};
static_assert(sizeof(struct octep_vf_oq_resp_hw_ext) == 8);
#define OCTEP_VF_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_vf_oq_resp_hw_ext))
/* Length of Rx packet DMA'ed by Octeon to Host.
* this is in bigendian; so need to be converted to cpu endian.
* Octeon writes this at the beginning of Rx buffer (skb->data).
*/
struct octep_vf_oq_resp_hw {
/* The Length of the packet. */
__be64 length;
};
static_assert(sizeof(struct octep_vf_oq_resp_hw) == 8);
#define OCTEP_VF_OQ_RESP_HW_SIZE (sizeof(struct octep_vf_oq_resp_hw))
/* Pointer to data buffer.
* Driver keeps a pointer to the data buffer that it made available to
* the Octeon device. Since the descriptor ring keeps physical (bus)
* addresses, this field is required for the driver to keep track of
* the virtual address pointers. The fields are operated by
* OS-dependent routines.
*/
struct octep_vf_rx_buffer {
struct page *page;
/* length from rx hardware descriptor after converting to cpu endian */
u64 len;
};
#define OCTEP_VF_OQ_RECVBUF_SIZE (sizeof(struct octep_vf_rx_buffer))
/* Output Queue statistics. Each output queue has four stats fields. */
struct octep_vf_oq_stats {
/* Number of packets received from the Device. */
u64 packets;
/* Number of bytes received from the Device. */
u64 bytes;
/* Number of times failed to allocate buffers. */
u64 alloc_failures;
};
#define OCTEP_VF_OQ_STATS_SIZE (sizeof(struct octep_vf_oq_stats))
/* Hardware interface Rx statistics */
struct octep_vf_iface_rx_stats {
/* Received packets */
u64 pkts;
/* Octets of received packets */
u64 octets;
/* Received PAUSE and Control packets */
u64 pause_pkts;
/* Received PAUSE and Control octets */
u64 pause_octets;
/* Filtered DMAC0 packets */
u64 dmac0_pkts;
/* Filtered DMAC0 octets */
u64 dmac0_octets;
/* Packets dropped due to RX FIFO full */
u64 dropped_pkts_fifo_full;
/* Octets dropped due to RX FIFO full */
u64 dropped_octets_fifo_full;
/* Error packets */
u64 err_pkts;
/* Filtered DMAC1 packets */
u64 dmac1_pkts;
/* Filtered DMAC1 octets */
u64 dmac1_octets;
/* NCSI-bound packets dropped */
u64 ncsi_dropped_pkts;
/* NCSI-bound octets dropped */
u64 ncsi_dropped_octets;
/* Multicast packets received. */
u64 mcast_pkts;
/* Broadcast packets received. */
u64 bcast_pkts;
};
/* The Descriptor Ring Output Queue structure.
* This structure has all the information required to implement a
* Octeon OQ.
*/
struct octep_vf_oq {
u32 q_no;
struct octep_vf_device *octep_vf_dev;
struct net_device *netdev;
struct device *dev;
struct napi_struct *napi;
/* The receive buffer list. This list has the virtual addresses
* of the buffers.
*/
struct octep_vf_rx_buffer *buff_info;
/* Pointer to the mapped packet credit register.
* Host writes number of info/buffer ptrs available to this register
*/
u8 __iomem *pkts_credit_reg;
/* Pointer to the mapped packet sent register.
* Octeon writes the number of packets DMA'ed to host memory
* in this register.
*/
u8 __iomem *pkts_sent_reg;
/* Statistics for this OQ. */
struct octep_vf_oq_stats stats;
/* Packets pending to be processed */
u32 pkts_pending;
u32 last_pkt_count;
/* Index in the ring where the driver should read the next packet */
u32 host_read_idx;
/* Number of descriptors in this ring. */
u32 max_count;
u32 ring_size_mask;
/* The number of descriptors pending refill. */
u32 refill_count;
/* Index in the ring where the driver will refill the
* descriptor's buffer
*/
u32 host_refill_idx;
u32 refill_threshold;
/* The size of each buffer pointed by the buffer pointer. */
u32 buffer_size;
u32 max_single_buffer_size;
/* The 8B aligned descriptor ring starts at this address. */
struct octep_vf_oq_desc_hw *desc_ring;
/* DMA mapped address of the OQ descriptor ring. */
dma_addr_t desc_ring_dma;
};
#define OCTEP_VF_OQ_SIZE (sizeof(struct octep_vf_oq))
#endif /* _OCTEP_VF_RX_H_ */
// SPDX-License-Identifier: GPL-2.0
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
/* Reset various index of Tx queue data structure. */
static void octep_vf_iq_reset_indices(struct octep_vf_iq *iq)
{
iq->fill_cnt = 0;
iq->host_write_index = 0;
iq->octep_vf_read_index = 0;
iq->flush_index = 0;
iq->pkts_processed = 0;
iq->pkt_in_done = 0;
}
/**
* octep_vf_iq_process_completions() - Process Tx queue completions.
*
* @iq: Octeon Tx queue data structure.
* @budget: max number of completions to be processed in one invocation.
*/
int octep_vf_iq_process_completions(struct octep_vf_iq *iq, u16 budget)
{
u32 compl_pkts, compl_bytes, compl_sg;
struct octep_vf_device *oct = iq->octep_vf_dev;
struct octep_vf_tx_buffer *tx_buffer;
struct skb_shared_info *shinfo;
u32 fi = iq->flush_index;
struct sk_buff *skb;
u8 frags, i;
compl_pkts = 0;
compl_sg = 0;
compl_bytes = 0;
iq->octep_vf_read_index = oct->hw_ops.update_iq_read_idx(iq);
while (likely(budget && (fi != iq->octep_vf_read_index))) {
tx_buffer = iq->buff_info + fi;
skb = tx_buffer->skb;
fi++;
if (unlikely(fi == iq->max_count))
fi = 0;
compl_bytes += skb->len;
compl_pkts++;
budget--;
if (!tx_buffer->gather) {
dma_unmap_single(iq->dev, tx_buffer->dma,
tx_buffer->skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
continue;
}
/* Scatter/Gather */
shinfo = skb_shinfo(skb);
frags = shinfo->nr_frags;
compl_sg++;
dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
i = 1; /* entry 0 is main skb, unmapped above */
while (frags--) {
dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
i++;
}
dev_kfree_skb_any(skb);
}
iq->pkts_processed += compl_pkts;
iq->stats.instr_completed += compl_pkts;
iq->stats.bytes_sent += compl_bytes;
iq->stats.sgentry_sent += compl_sg;
iq->flush_index = fi;
netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes);
if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) &&
(IQ_INSTR_SPACE(iq) >
OCTEP_VF_WAKE_QUEUE_THRESHOLD))
netif_wake_subqueue(iq->netdev, iq->q_no);
return !budget;
}
/**
* octep_vf_iq_free_pending() - Free Tx buffers for pending completions.
*
* @iq: Octeon Tx queue data structure.
*/
static void octep_vf_iq_free_pending(struct octep_vf_iq *iq)
{
struct octep_vf_tx_buffer *tx_buffer;
struct skb_shared_info *shinfo;
u32 fi = iq->flush_index;
struct sk_buff *skb;
u8 frags, i;
while (fi != iq->host_write_index) {
tx_buffer = iq->buff_info + fi;
skb = tx_buffer->skb;
fi++;
if (unlikely(fi == iq->max_count))
fi = 0;
if (!tx_buffer->gather) {
dma_unmap_single(iq->dev, tx_buffer->dma,
tx_buffer->skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
continue;
}
/* Scatter/Gather */
shinfo = skb_shinfo(skb);
frags = shinfo->nr_frags;
dma_unmap_single(iq->dev,
tx_buffer->sglist[0].dma_ptr[0],
tx_buffer->sglist[0].len[0],
DMA_TO_DEVICE);
i = 1; /* entry 0 is main skb, unmapped above */
while (frags--) {
dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
i++;
}
dev_kfree_skb_any(skb);
}
iq->flush_index = fi;
netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no));
}
/**
* octep_vf_clean_iqs() - Clean Tx queues to shutdown the device.
*
* @oct: Octeon device private data structure.
*
* Free the buffers in Tx queue descriptors pending completion and
* reset queue indices
*/
void octep_vf_clean_iqs(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < oct->num_iqs; i++) {
octep_vf_iq_free_pending(oct->iq[i]);
octep_vf_iq_reset_indices(oct->iq[i]);
}
}
/**
* octep_vf_setup_iq() - Setup a Tx queue.
*
* @oct: Octeon device private data structure.
* @q_no: Tx queue number to be setup.
*
* Allocate resources for a Tx queue.
*/
static int octep_vf_setup_iq(struct octep_vf_device *oct, int q_no)
{
u32 desc_ring_size, buff_info_size, sglist_size;
struct octep_vf_iq *iq;
int i;
iq = vzalloc(sizeof(*iq));
if (!iq)
goto iq_alloc_err;
oct->iq[q_no] = iq;
iq->octep_vf_dev = oct;
iq->netdev = oct->netdev;
iq->dev = &oct->pdev->dev;
iq->q_no = q_no;
iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf);
iq->ring_size_mask = iq->max_count - 1;
iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf);
iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no);
/* Allocate memory for hardware queue descriptors */
desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size,
&iq->desc_ring_dma, GFP_KERNEL);
if (unlikely(!iq->desc_ring)) {
dev_err(iq->dev,
"Failed to allocate DMA memory for IQ-%d\n", q_no);
goto desc_dma_alloc_err;
}
/* Allocate memory for hardware SGLIST descriptors */
sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
CFG_GET_IQ_NUM_DESC(oct->conf);
iq->sglist = dma_alloc_coherent(iq->dev, sglist_size,
&iq->sglist_dma, GFP_KERNEL);
if (unlikely(!iq->sglist)) {
dev_err(iq->dev,
"Failed to allocate DMA memory for IQ-%d SGLIST\n",
q_no);
goto sglist_alloc_err;
}
/* allocate memory to manage Tx packets pending completion */
buff_info_size = OCTEP_VF_IQ_TXBUFF_INFO_SIZE * iq->max_count;
iq->buff_info = vzalloc(buff_info_size);
if (!iq->buff_info) {
dev_err(iq->dev,
"Failed to allocate buff info for IQ-%d\n", q_no);
goto buff_info_err;
}
/* Setup sglist addresses in tx_buffer entries */
for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) {
struct octep_vf_tx_buffer *tx_buffer;
tx_buffer = &iq->buff_info[i];
tx_buffer->sglist =
&iq->sglist[i * OCTEP_VF_SGLIST_ENTRIES_PER_PKT];
tx_buffer->sglist_dma =
iq->sglist_dma + (i * OCTEP_VF_SGLIST_SIZE_PER_PKT);
}
octep_vf_iq_reset_indices(iq);
oct->hw_ops.setup_iq_regs(oct, q_no);
oct->num_iqs++;
return 0;
buff_info_err:
dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma);
sglist_alloc_err:
dma_free_coherent(iq->dev, desc_ring_size,
iq->desc_ring, iq->desc_ring_dma);
desc_dma_alloc_err:
vfree(iq);
oct->iq[q_no] = NULL;
iq_alloc_err:
return -1;
}
/**
* octep_vf_free_iq() - Free Tx queue resources.
*
* @iq: Octeon Tx queue data structure.
*
* Free all the resources allocated for a Tx queue.
*/
static void octep_vf_free_iq(struct octep_vf_iq *iq)
{
struct octep_vf_device *oct = iq->octep_vf_dev;
u64 desc_ring_size, sglist_size;
int q_no = iq->q_no;
desc_ring_size = OCTEP_VF_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf);
vfree(iq->buff_info);
if (iq->desc_ring)
dma_free_coherent(iq->dev, desc_ring_size,
iq->desc_ring, iq->desc_ring_dma);
sglist_size = OCTEP_VF_SGLIST_SIZE_PER_PKT *
CFG_GET_IQ_NUM_DESC(oct->conf);
if (iq->sglist)
dma_free_coherent(iq->dev, sglist_size,
iq->sglist, iq->sglist_dma);
vfree(iq);
oct->iq[q_no] = NULL;
oct->num_iqs--;
}
/**
* octep_vf_setup_iqs() - setup resources for all Tx queues.
*
* @oct: Octeon device private data structure.
*/
int octep_vf_setup_iqs(struct octep_vf_device *oct)
{
int i;
oct->num_iqs = 0;
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
if (octep_vf_setup_iq(oct, i)) {
dev_err(&oct->pdev->dev,
"Failed to setup IQ(TxQ)-%d.\n", i);
goto iq_setup_err;
}
dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i);
}
return 0;
iq_setup_err:
while (i) {
i--;
octep_vf_free_iq(oct->iq[i]);
}
return -1;
}
/**
* octep_vf_free_iqs() - Free resources of all Tx queues.
*
* @oct: Octeon device private data structure.
*/
void octep_vf_free_iqs(struct octep_vf_device *oct)
{
int i;
for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
octep_vf_free_iq(oct->iq[i]);
dev_dbg(&oct->pdev->dev,
"Successfully destroyed IQ(TxQ)-%d.\n", i);
}
oct->num_iqs = 0;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell Octeon EP (EndPoint) VF Ethernet Driver
*
* Copyright (C) 2020 Marvell.
*
*/
#ifndef _OCTEP_VF_TX_H_
#define _OCTEP_VF_TX_H_
#define IQ_SEND_OK 0
#define IQ_SEND_STOP 1
#define IQ_SEND_FAILED -1
#define TX_BUFTYPE_NONE 0
#define TX_BUFTYPE_NET 1
#define TX_BUFTYPE_NET_SG 2
#define NUM_TX_BUFTYPES 3
/* Hardware format for Scatter/Gather list
*
* 63 48|47 32|31 16|15 0
* -----------------------------------------
* | Len 0 | Len 1 | Len 2 | Len 3 |
* -----------------------------------------
* | Ptr 0 |
* -----------------------------------------
* | Ptr 1 |
* -----------------------------------------
* | Ptr 2 |
* -----------------------------------------
* | Ptr 3 |
* -----------------------------------------
*/
struct octep_vf_tx_sglist_desc {
u16 len[4];
dma_addr_t dma_ptr[4];
};
static_assert(sizeof(struct octep_vf_tx_sglist_desc) == 40);
/* Each Scatter/Gather entry sent to hardwar hold four pointers.
* So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
* is for main skb which also goes as a gather buffer to Octeon hardware.
* To allocate sufficient SGLIST entries for a packet with max fragments,
* align by adding 3 before calcuating max SGLIST entries per packet.
*/
#define OCTEP_VF_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4)
#define OCTEP_VF_SGLIST_SIZE_PER_PKT \
(OCTEP_VF_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_vf_tx_sglist_desc))
struct octep_vf_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
struct octep_vf_tx_sglist_desc *sglist;
dma_addr_t sglist_dma;
u8 gather;
};
#define OCTEP_VF_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_vf_tx_buffer))
/* VF Hardware interface Tx statistics */
struct octep_vf_iface_tx_stats {
/* Total frames sent on the interface */
u64 pkts;
/* Total octets sent on the interface */
u64 octs;
/* Packets sent to a broadcast DMAC */
u64 bcst;
/* Packets sent to the multicast DMAC */
u64 mcst;
/* Packets dropped */
u64 dropped;
/* Reserved */
u64 reserved[13];
};
/* VF Input Queue statistics */
struct octep_vf_iq_stats {
/* Instructions posted to this queue. */
u64 instr_posted;
/* Instructions copied by hardware for processing. */
u64 instr_completed;
/* Instructions that could not be processed. */
u64 instr_dropped;
/* Bytes sent through this queue. */
u64 bytes_sent;
/* Gather entries sent through this queue. */
u64 sgentry_sent;
/* Number of transmit failures due to TX_BUSY */
u64 tx_busy;
/* Number of times the queue is restarted */
u64 restart_cnt;
};
/* The instruction (input) queue.
* The input queue is used to post raw (instruction) mode data or packet
* data to Octeon device from the host. Each input queue (up to 4) for
* a Octeon device has one such structure to represent it.
*/
struct octep_vf_iq {
u32 q_no;
struct octep_vf_device *octep_vf_dev;
struct net_device *netdev;
struct device *dev;
struct netdev_queue *netdev_q;
/* Index in input ring where driver should write the next packet */
u16 host_write_index;
/* Index in input ring where Octeon is expected to read next packet */
u16 octep_vf_read_index;
/* This index aids in finding the window in the queue where Octeon
* has read the commands.
*/
u16 flush_index;
/* Statistics for this input queue. */
struct octep_vf_iq_stats stats;
/* Pointer to the Virtual Base addr of the input ring. */
struct octep_vf_tx_desc_hw *desc_ring;
/* DMA mapped base address of the input descriptor ring. */
dma_addr_t desc_ring_dma;
/* Info of Tx buffers pending completion. */
struct octep_vf_tx_buffer *buff_info;
/* Base pointer to Scatter/Gather lists for all ring descriptors. */
struct octep_vf_tx_sglist_desc *sglist;
/* DMA mapped addr of Scatter Gather Lists */
dma_addr_t sglist_dma;
/* Octeon doorbell register for the ring. */
u8 __iomem *doorbell_reg;
/* Octeon instruction count register for this ring. */
u8 __iomem *inst_cnt_reg;
/* interrupt level register for this ring */
u8 __iomem *intr_lvl_reg;
/* Maximum no. of instructions in this queue. */
u32 max_count;
u32 ring_size_mask;
u32 pkt_in_done;
u32 pkts_processed;
u32 status;
/* Number of instructions pending to be posted to Octeon. */
u32 fill_cnt;
/* The max. number of instructions that can be held pending by the
* driver before ringing doorbell.
*/
u32 fill_threshold;
};
/* Hardware Tx Instruction Header */
struct octep_vf_instr_hdr {
/* Data Len */
u64 tlen:16;
/* Reserved */
u64 rsvd:20;
/* PKIND for SDP */
u64 pkind:6;
/* Front Data size */
u64 fsz:6;
/* No. of entries in gather list */
u64 gsz:14;
/* Gather indicator 1=gather*/
u64 gather:1;
/* Reserved3 */
u64 reserved3:1;
};
static_assert(sizeof(struct octep_vf_instr_hdr) == 8);
/* Tx offload flags */
#define OCTEP_VF_TX_OFFLOAD_VLAN_INSERT BIT(0)
#define OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM BIT(1)
#define OCTEP_VF_TX_OFFLOAD_UDP_CKSUM BIT(2)
#define OCTEP_VF_TX_OFFLOAD_TCP_CKSUM BIT(3)
#define OCTEP_VF_TX_OFFLOAD_SCTP_CKSUM BIT(4)
#define OCTEP_VF_TX_OFFLOAD_TCP_TSO BIT(5)
#define OCTEP_VF_TX_OFFLOAD_UDP_TSO BIT(6)
#define OCTEP_VF_TX_OFFLOAD_CKSUM (OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
OCTEP_VF_TX_OFFLOAD_UDP_CKSUM | \
OCTEP_VF_TX_OFFLOAD_TCP_CKSUM)
#define OCTEP_VF_TX_OFFLOAD_TSO (OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
OCTEP_VF_TX_OFFLOAD_UDP_TSO)
#define OCTEP_VF_TX_IP_CSUM(flags) ((flags) & \
(OCTEP_VF_TX_OFFLOAD_IPV4_CKSUM | \
OCTEP_VF_TX_OFFLOAD_TCP_CKSUM | \
OCTEP_VF_TX_OFFLOAD_UDP_CKSUM))
#define OCTEP_VF_TX_TSO(flags) ((flags) & \
(OCTEP_VF_TX_OFFLOAD_TCP_TSO | \
OCTEP_VF_TX_OFFLOAD_UDP_TSO))
struct tx_mdata {
/* offload flags */
u16 ol_flags;
/* gso size */
u16 gso_size;
/* gso flags */
u16 gso_segs;
/* reserved */
u16 rsvd1;
/* reserved */
u64 rsvd2;
};
static_assert(sizeof(struct tx_mdata) == 16);
/* 64-byte Tx instruction format.
* Format of instruction for a 64-byte mode input queue.
*
* only first 16-bytes (dptr and ih) are mandatory; rest are optional
* and filled by the driver based on firmware/hardware capabilities.
* These optional headers together called Front Data and its size is
* described by ih->fsz.
*/
struct octep_vf_tx_desc_hw {
/* Pointer where the input data is available. */
u64 dptr;
/* Instruction Header. */
union {
struct octep_vf_instr_hdr ih;
u64 ih64;
};
union {
u64 txm64[2];
struct tx_mdata txm;
};
/* Additional headers available in a 64-byte instruction. */
u64 exhdr[4];
};
static_assert(sizeof(struct octep_vf_tx_desc_hw) == 64);
#define OCTEP_VF_IQ_DESC_SIZE (sizeof(struct octep_vf_tx_desc_hw))
#endif /* _OCTEP_VF_TX_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment