Commit a31eb63a authored by David S. Miller's avatar David S. Miller

Merge branch 'thunderx-next'

Sunil Goutham says:

====================
net: thunderx: Support for newer chips and miscellaneous patches

This patch series adds support for VNIC on 81xx and 83xx SOCs.
81xx/83xx is different from 88xx in terms of capabilities and new type
of interfaces supported (eg: QSGMII, RGMII) and have DLMs instead of
QLMs which allows single BGX to have interfaces of different LMAC types.

Also included some patches which are common for all 88xx/81xx/83xx
SOCs like using netdev's name while registering irqs, reset receive
queue stats and some changes to use standard API for split buffer Rx
packets, generating RSS key e.t.c

PS: Most of the patches were submitted earlier under different series but
for some reason were not picked up by patchwork. Since new patches have been
added in the meantime, resubmitting all as a new patchset.

Changes from v1:
- Incorporated Yuval Mintz's suggestion to use generic API to set minimum
  queue count i.e by using netif_get_num_default_rss_queues().
- Resolved a compilation issue reported by test robot while compiling
  patch 'Add support for 16 LMACs of 83xx'
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2ce66f9c 93db2cf8
...@@ -36,10 +36,20 @@ config THUNDER_NIC_BGX ...@@ -36,10 +36,20 @@ config THUNDER_NIC_BGX
depends on 64BIT depends on 64BIT
select PHYLIB select PHYLIB
select MDIO_THUNDER select MDIO_THUNDER
select THUNDER_NIC_RGX
---help--- ---help---
This driver supports programming and controlling of MAC This driver supports programming and controlling of MAC
interface from NIC physical function driver. interface from NIC physical function driver.
config THUNDER_NIC_RGX
tristate "Thunder MAC interface driver (RGX)"
depends on 64BIT
select PHYLIB
select MDIO_THUNDER
---help---
This driver supports configuring XCV block of RGX interface
present on CN81XX chip.
config LIQUIDIO config LIQUIDIO
tristate "Cavium LiquidIO support" tristate "Cavium LiquidIO support"
depends on 64BIT depends on 64BIT
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
# Makefile for Cavium's Thunder ethernet device # Makefile for Cavium's Thunder ethernet device
# #
obj-$(CONFIG_THUNDER_NIC_RGX) += thunder_xcv.o
obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
......
...@@ -20,6 +20,17 @@ ...@@ -20,6 +20,17 @@
#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034 #define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
#define PCI_DEVICE_ID_THUNDER_BGX 0xA026 #define PCI_DEVICE_ID_THUNDER_BGX 0xA026
/* Subsystem device IDs */
#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
/* PCI BAR nos */ /* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0 #define PCI_CFG_REG_BAR_NUM 0
#define PCI_MSIX_REG_BAR_NUM 4 #define PCI_MSIX_REG_BAR_NUM 4
...@@ -41,40 +52,8 @@ ...@@ -41,40 +52,8 @@
/* Max pkinds */ /* Max pkinds */
#define NIC_MAX_PKIND 16 #define NIC_MAX_PKIND 16
/* Rx Channels */ /* Max when CPI_ALG is IP diffserv */
/* Receive channel configuration in TNS bypass mode #define NIC_MAX_CPI_PER_LMAC 64
* Below is configuration in TNS bypass mode
* BGX0-LMAC0-CHAN0 - VNIC CHAN0
* BGX0-LMAC1-CHAN0 - VNIC CHAN16
* ...
* BGX1-LMAC0-CHAN0 - VNIC CHAN128
* ...
* BGX1-LMAC3-CHAN0 - VNIC CHAN174
*/
#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
#define NIC_CHANS_PER_INF 128
#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
/* Tx scheduling */
#define NIC_MAX_TL4 1024
#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
#define NIC_MAX_TL3 256
#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
#define NIC_MAX_TL2 64
#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
#define NIC_MAX_TL1 2
/* TNS bypass mode */
#define NIC_TL2_PER_BGX 32
#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
/* NIC VF Interrupts */ /* NIC VF Interrupts */
#define NICVF_INTR_CQ 0 #define NICVF_INTR_CQ 0
...@@ -148,7 +127,6 @@ struct nicvf_cq_poll { ...@@ -148,7 +127,6 @@ struct nicvf_cq_poll {
struct napi_struct napi; struct napi_struct napi;
}; };
#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
#define NIC_MAX_RSS_HASH_BITS 8 #define NIC_MAX_RSS_HASH_BITS 8
#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS) #define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */ #define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
...@@ -273,6 +251,7 @@ struct nicvf { ...@@ -273,6 +251,7 @@ struct nicvf {
struct net_device *netdev; struct net_device *netdev;
struct pci_dev *pdev; struct pci_dev *pdev;
void __iomem *reg_base; void __iomem *reg_base;
#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs; struct queue_set *qs;
struct nicvf_cq_poll *napi[8]; struct nicvf_cq_poll *napi[8];
u8 vf_id; u8 vf_id;
...@@ -368,6 +347,7 @@ struct nicvf { ...@@ -368,6 +347,7 @@ struct nicvf {
#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */ #define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */ #define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */ #define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ #define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ #define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
...@@ -484,6 +464,31 @@ struct set_loopback { ...@@ -484,6 +464,31 @@ struct set_loopback {
bool enable; bool enable;
}; };
/* Reset statistics counters */
struct reset_stat_cfg {
u8 msg;
/* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
u16 rx_stat_mask;
/* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
u8 tx_stat_mask;
/* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
* bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
* bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
* ..
* bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
* bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
*/
u16 rq_stat_mask;
/* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
* bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
* bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
* ..
* bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
* bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
*/
u16 sq_stat_mask;
};
/* 128 bit shared memory between PF and each VF */ /* 128 bit shared memory between PF and each VF */
union nic_mbx { union nic_mbx {
struct { u8 msg; } msg; struct { u8 msg; } msg;
...@@ -501,6 +506,7 @@ union nic_mbx { ...@@ -501,6 +506,7 @@ union nic_mbx {
struct sqs_alloc sqs_alloc; struct sqs_alloc sqs_alloc;
struct nicvf_ptr nicvf; struct nicvf_ptr nicvf;
struct set_loopback lbk; struct set_loopback lbk;
struct reset_stat_cfg reset_stat;
}; };
#define NIC_NODE_ID_MASK 0x03 #define NIC_NODE_ID_MASK 0x03
...@@ -514,7 +520,14 @@ static inline int nic_get_node_id(struct pci_dev *pdev) ...@@ -514,7 +520,14 @@ static inline int nic_get_node_id(struct pci_dev *pdev)
static inline bool pass1_silicon(struct pci_dev *pdev) static inline bool pass1_silicon(struct pci_dev *pdev)
{ {
return pdev->revision < 8; return (pdev->revision < 8) &&
(pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
}
static inline bool pass2_silicon(struct pci_dev *pdev)
{
return (pdev->revision >= 8) &&
(pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
} }
int nicvf_set_real_num_queues(struct net_device *netdev, int nicvf_set_real_num_queues(struct net_device *netdev,
......
...@@ -36,6 +36,20 @@ ...@@ -36,6 +36,20 @@
#define NIC_PF_MAILBOX_ENA_W1C (0x0450) #define NIC_PF_MAILBOX_ENA_W1C (0x0450)
#define NIC_PF_MAILBOX_ENA_W1S (0x0470) #define NIC_PF_MAILBOX_ENA_W1S (0x0470)
#define NIC_PF_RX_ETYPE_0_7 (0x0500) #define NIC_PF_RX_ETYPE_0_7 (0x0500)
#define NIC_PF_RX_GENEVE_DEF (0x0580)
#define UDP_GENEVE_PORT_NUM 0x17C1ULL
#define NIC_PF_RX_GENEVE_PROT_DEF (0x0588)
#define IPV6_PROT 0x86DDULL
#define IPV4_PROT 0x800ULL
#define ET_PROT 0x6558ULL
#define NIC_PF_RX_NVGRE_PROT_DEF (0x0598)
#define NIC_PF_RX_VXLAN_DEF_0_1 (0x05A0)
#define UDP_VXLAN_PORT_NUM 0x12B5
#define NIC_PF_RX_VXLAN_PROT_DEF (0x05B0)
#define IPV6_PROT_DEF 0x2ULL
#define IPV4_PROT_DEF 0x1ULL
#define ET_PROT_DEF 0x3ULL
#define NIC_PF_RX_CFG (0x05D0)
#define NIC_PF_PKIND_0_15_CFG (0x0600) #define NIC_PF_PKIND_0_15_CFG (0x0600)
#define NIC_PF_ECC0_FLIP0 (0x1000) #define NIC_PF_ECC0_FLIP0 (0x1000)
#define NIC_PF_ECC1_FLIP0 (0x1008) #define NIC_PF_ECC1_FLIP0 (0x1008)
...@@ -103,6 +117,7 @@ ...@@ -103,6 +117,7 @@
#define NIC_PF_SW_SYNC_RX_DONE (0x490008) #define NIC_PF_SW_SYNC_RX_DONE (0x490008)
#define NIC_PF_TL2_0_63_CFG (0x500000) #define NIC_PF_TL2_0_63_CFG (0x500000)
#define NIC_PF_TL2_0_63_PRI (0x520000) #define NIC_PF_TL2_0_63_PRI (0x520000)
#define NIC_PF_TL2_LMAC (0x540000)
#define NIC_PF_TL2_0_63_SH_STATUS (0x580000) #define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
#define NIC_PF_TL3A_0_63_CFG (0x5F0000) #define NIC_PF_TL3A_0_63_CFG (0x5F0000)
#define NIC_PF_TL3_0_255_CFG (0x600000) #define NIC_PF_TL3_0_255_CFG (0x600000)
......
...@@ -29,10 +29,20 @@ ...@@ -29,10 +29,20 @@
static const struct pci_device_id nicvf_id_table[] = { static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_NIC_VF, PCI_DEVICE_ID_THUNDER_NIC_VF,
PCI_VENDOR_ID_CAVIUM, 0xA134) }, PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_88XX_NIC_VF) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF, PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
PCI_VENDOR_ID_CAVIUM, 0xA11E) }, PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_NIC_VF,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_81XX_NIC_VF) },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
PCI_DEVICE_ID_THUNDER_NIC_VF,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_83XX_NIC_VF) },
{ 0, } /* end of table */ { 0, } /* end of table */
}; };
...@@ -134,15 +144,19 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) ...@@ -134,15 +144,19 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
/* Wait for previous message to be acked, timeout 2sec */ /* Wait for previous message to be acked, timeout 2sec */
while (!nic->pf_acked) { while (!nic->pf_acked) {
if (nic->pf_nacked) if (nic->pf_nacked) {
netdev_err(nic->netdev,
"PF NACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id);
return -EINVAL; return -EINVAL;
}
msleep(sleep); msleep(sleep);
if (nic->pf_acked) if (nic->pf_acked)
break; break;
timeout -= sleep; timeout -= sleep;
if (!timeout) { if (!timeout) {
netdev_err(nic->netdev, netdev_err(nic->netdev,
"PF didn't ack to mbox msg %d from VF%d\n", "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
(mbx->msg.msg & 0xFF), nic->vf_id); (mbx->msg.msg & 0xFF), nic->vf_id);
return -EBUSY; return -EBUSY;
} }
...@@ -352,13 +366,7 @@ static int nicvf_rss_init(struct nicvf *nic) ...@@ -352,13 +366,7 @@ static int nicvf_rss_init(struct nicvf *nic)
rss->enable = true; rss->enable = true;
/* Using the HW reset value for now */ netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
rss->key[0] = 0xFEED0BADFEED0BADULL;
rss->key[1] = 0xFEED0BADFEED0BADULL;
rss->key[2] = 0xFEED0BADFEED0BADULL;
rss->key[3] = 0xFEED0BADFEED0BADULL;
rss->key[4] = 0xFEED0BADFEED0BADULL;
nicvf_set_rss_key(nic); nicvf_set_rss_key(nic);
rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA; rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
...@@ -507,7 +515,8 @@ static int nicvf_init_resources(struct nicvf *nic) ...@@ -507,7 +515,8 @@ static int nicvf_init_resources(struct nicvf *nic)
static void nicvf_snd_pkt_handler(struct net_device *netdev, static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct cmp_queue *cq, struct cmp_queue *cq,
struct cqe_send_t *cqe_tx, int cqe_type) struct cqe_send_t *cqe_tx,
int cqe_type, int budget)
{ {
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct nicvf *nic = netdev_priv(netdev); struct nicvf *nic = netdev_priv(netdev);
...@@ -531,7 +540,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, ...@@ -531,7 +540,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
if (skb) { if (skb) {
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb); prefetch(skb);
dev_consume_skb_any(skb); napi_consume_skb(skb, budget);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else { } else {
/* In case of HW TSO, HW sends a CQE for each segment of a TSO /* In case of HW TSO, HW sends a CQE for each segment of a TSO
...@@ -686,7 +695,8 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, ...@@ -686,7 +695,8 @@ static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
break; break;
case CQE_TYPE_SEND: case CQE_TYPE_SEND:
nicvf_snd_pkt_handler(netdev, cq, nicvf_snd_pkt_handler(netdev, cq,
(void *)cq_desc, CQE_TYPE_SEND); (void *)cq_desc, CQE_TYPE_SEND,
budget);
tx_done++; tx_done++;
break; break;
case CQE_TYPE_INVALID: case CQE_TYPE_INVALID:
...@@ -928,16 +938,19 @@ static int nicvf_register_interrupts(struct nicvf *nic) ...@@ -928,16 +938,19 @@ static int nicvf_register_interrupts(struct nicvf *nic)
int vector; int vector;
for_each_cq_irq(irq) for_each_cq_irq(irq)
sprintf(nic->irq_name[irq], "NICVF%d CQ%d", sprintf(nic->irq_name[irq], "%s-rxtx-%d",
nic->vf_id, irq); nic->pnicvf->netdev->name,
nicvf_netdev_qidx(nic, irq));
for_each_sq_irq(irq) for_each_sq_irq(irq)
sprintf(nic->irq_name[irq], "NICVF%d SQ%d", sprintf(nic->irq_name[irq], "%s-sq-%d",
nic->vf_id, irq - NICVF_INTR_ID_SQ); nic->pnicvf->netdev->name,
nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
for_each_rbdr_irq(irq) for_each_rbdr_irq(irq)
sprintf(nic->irq_name[irq], "NICVF%d RBDR%d", sprintf(nic->irq_name[irq], "%s-rbdr-%d",
nic->vf_id, irq - NICVF_INTR_ID_RBDR); nic->pnicvf->netdev->name,
nic->sqs_mode ? (nic->sqs_id + 1) : 0);
/* Register CQ interrupts */ /* Register CQ interrupts */
for (irq = 0; irq < nic->qs->cq_cnt; irq++) { for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
...@@ -961,8 +974,9 @@ static int nicvf_register_interrupts(struct nicvf *nic) ...@@ -961,8 +974,9 @@ static int nicvf_register_interrupts(struct nicvf *nic)
} }
/* Register QS error interrupt */ /* Register QS error interrupt */
sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
"NICVF%d Qset error", nic->vf_id); nic->pnicvf->netdev->name,
nic->sqs_mode ? (nic->sqs_id + 1) : 0);
irq = NICVF_INTR_ID_QS_ERR; irq = NICVF_INTR_ID_QS_ERR;
ret = request_irq(nic->msix_entries[irq].vector, ret = request_irq(nic->msix_entries[irq].vector,
nicvf_qs_err_intr_handler, nicvf_qs_err_intr_handler,
...@@ -1191,7 +1205,7 @@ int nicvf_open(struct net_device *netdev) ...@@ -1191,7 +1205,7 @@ int nicvf_open(struct net_device *netdev)
} }
/* Check if we got MAC address from PF or else generate a radom MAC */ /* Check if we got MAC address from PF or else generate a radom MAC */
if (is_zero_ether_addr(netdev->dev_addr)) { if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
eth_hw_addr_random(netdev); eth_hw_addr_random(netdev);
nicvf_hw_set_mac_addr(nic, netdev); nicvf_hw_set_mac_addr(nic, netdev);
} }
...@@ -1527,14 +1541,13 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1527,14 +1541,13 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions; goto err_release_regions;
} }
qcount = MAX_CMP_QUEUES_PER_QS; qcount = netif_get_num_default_rss_queues();
/* Restrict multiqset support only for host bound VFs */ /* Restrict multiqset support only for host bound VFs */
if (pdev->is_virtfn) { if (pdev->is_virtfn) {
/* Set max number of queues per VF */ /* Set max number of queues per VF */
qcount = roundup(num_online_cpus(), MAX_CMP_QUEUES_PER_QS); qcount = min_t(int, num_online_cpus(),
qcount = min(qcount, (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
(MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
} }
netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount); netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
......
...@@ -479,6 +479,16 @@ void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features) ...@@ -479,6 +479,16 @@ void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
NIC_QSET_RQ_GEN_CFG, 0, rq_cfg); NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
} }
static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
{
union nic_mbx mbx = {};
/* Reset all RXQ's stats */
mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
mbx.reset_stat.rq_stat_mask = 0xFFFF;
nicvf_send_msg_to_pf(nic, &mbx);
}
/* Configures receive queue */ /* Configures receive queue */
static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
int qidx, bool enable) int qidx, bool enable)
...@@ -762,10 +772,10 @@ int nicvf_set_qset_resources(struct nicvf *nic) ...@@ -762,10 +772,10 @@ int nicvf_set_qset_resources(struct nicvf *nic)
nic->qs = qs; nic->qs = qs;
/* Set count of each queue */ /* Set count of each queue */
qs->rbdr_cnt = RBDR_CNT; qs->rbdr_cnt = DEFAULT_RBDR_CNT;
qs->rq_cnt = RCV_QUEUE_CNT; qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
qs->sq_cnt = SND_QUEUE_CNT; qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
qs->cq_cnt = CMP_QUEUE_CNT; qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
/* Set queue lengths */ /* Set queue lengths */
qs->rbdr_len = RCV_BUF_COUNT; qs->rbdr_len = RCV_BUF_COUNT;
...@@ -812,6 +822,11 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable) ...@@ -812,6 +822,11 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
nicvf_free_resources(nic); nicvf_free_resources(nic);
} }
/* Reset RXQ's stats.
* SQ's stats will get reset automatically once SQ is reset.
*/
nicvf_reset_rcv_queue_stats(nic);
return 0; return 0;
} }
...@@ -1184,13 +1199,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) ...@@ -1184,13 +1199,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
int frag; int frag;
int payload_len = 0; int payload_len = 0;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
struct sk_buff *skb_frag = NULL; struct page *page;
struct sk_buff *prev_frag = NULL; int offset;
u16 *rb_lens = NULL; u16 *rb_lens = NULL;
u64 *rb_ptrs = NULL; u64 *rb_ptrs = NULL;
rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
* CQE_RX at word6, hence buffer pointers move by word
*
* Use existing 'hw_tso' flag which will be set for all chips
* except 88xx pass1 instead of a additional cache line
* access (or miss) by using pci dev's revision.
*/
if (!nic->hw_tso)
rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
else
rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n", netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
__func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
...@@ -1208,22 +1233,10 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx) ...@@ -1208,22 +1233,10 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len); skb_put(skb, payload_len);
} else { } else {
/* Add fragments */ /* Add fragments */
skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs, page = virt_to_page(phys_to_virt(*rb_ptrs));
payload_len); offset = phys_to_virt(*rb_ptrs) - page_address(page);
if (!skb_frag) { skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
dev_kfree_skb(skb); offset, payload_len, RCV_FRAG_LEN);
return NULL;
}
if (!skb_shinfo(skb)->frag_list)
skb_shinfo(skb)->frag_list = skb_frag;
else
prev_frag->next = skb_frag;
prev_frag = skb_frag;
skb->len += payload_len;
skb->data_len += payload_len;
skb_frag->len = payload_len;
} }
/* Next buffer pointer */ /* Next buffer pointer */
rb_ptrs++; rb_ptrs++;
......
...@@ -57,10 +57,7 @@ ...@@ -57,10 +57,7 @@
#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */ #define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
/* Default queue count per QS, its lengths and threshold values */ /* Default queue count per QS, its lengths and threshold values */
#define RBDR_CNT 1 #define DEFAULT_RBDR_CNT 1
#define RCV_QUEUE_CNT 8
#define SND_QUEUE_CNT 8
#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
#define SND_QSIZE SND_QUEUE_SIZE2 #define SND_QSIZE SND_QUEUE_SIZE2
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) #define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
......
...@@ -9,8 +9,20 @@ ...@@ -9,8 +9,20 @@
#ifndef THUNDER_BGX_H #ifndef THUNDER_BGX_H
#define THUNDER_BGX_H #define THUNDER_BGX_H
#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */ /* PCI device ID */
#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
#define PCI_DEVICE_ID_THUNDER_RGX 0xA054
/* Subsystem device IDs */
#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
#define MAX_BGX_PER_CN88XX 2 #define MAX_BGX_PER_CN88XX 2
#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
#define MAX_BGX_PER_CN83XX 4
#define MAX_BGX_PER_NODE 4
#define MAX_LMAC_PER_BGX 4 #define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16 #define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8 #define MAX_DMAC_PER_LMAC 8
...@@ -18,8 +30,6 @@ ...@@ -18,8 +30,6 @@
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
/* Registers */ /* Registers */
#define BGX_CMRX_CFG 0x00 #define BGX_CMRX_CFG 0x00
#define CMR_PKT_TX_EN BIT_ULL(13) #define CMR_PKT_TX_EN BIT_ULL(13)
...@@ -136,6 +146,7 @@ ...@@ -136,6 +146,7 @@
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020 #define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068 #define BGX_GMP_PCS_SGM_AN_ADV 0x30068
#define BGX_GMP_PCS_MISCX_CTL 0x30078 #define BGX_GMP_PCS_MISCX_CTL 0x30078
#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11) #define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full #define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
#define BGX_GMP_GMI_PRTX_CFG 0x38020 #define BGX_GMP_GMI_PRTX_CFG 0x38020
...@@ -194,6 +205,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac); ...@@ -194,6 +205,9 @@ void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status); void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
void bgx_lmac_internal_loopback(int node, int bgx_idx, void bgx_lmac_internal_loopback(int node, int bgx_idx,
int lmac_idx, bool enable); int lmac_idx, bool enable);
void xcv_init_hw(void);
void xcv_setup_link(bool link_up, int link_speed);
u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx); u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx); u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
#define BGX_RX_STATS_COUNT 11 #define BGX_RX_STATS_COUNT 11
...@@ -213,16 +227,9 @@ enum LMAC_TYPE { ...@@ -213,16 +227,9 @@ enum LMAC_TYPE {
BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */ BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */ BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */ BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
}; BGX_MODE_RGMII = 5,
BGX_MODE_QSGMII = 6,
enum qlm_mode { BGX_MODE_INVALID = 7,
QLM_MODE_SGMII, /* SGMII, each lane independent */
QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */
QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */
QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */
QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */
QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */
QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */
}; };
#endif /* THUNDER_BGX_H */ #endif /* THUNDER_BGX_H */
/*
* Copyright (C) 2016 Cavium, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*/
#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "nic.h"
#include "thunder_bgx.h"
#define DRV_NAME "thunder-xcv"
#define DRV_VERSION "1.0"
/* Register offsets */
#define XCV_RESET 0x00
#define PORT_EN BIT_ULL(63)
#define CLK_RESET BIT_ULL(15)
#define DLL_RESET BIT_ULL(11)
#define COMP_EN BIT_ULL(7)
#define TX_PKT_RESET BIT_ULL(3)
#define TX_DATA_RESET BIT_ULL(2)
#define RX_PKT_RESET BIT_ULL(1)
#define RX_DATA_RESET BIT_ULL(0)
#define XCV_DLL_CTL 0x10
#define CLKRX_BYP BIT_ULL(23)
#define CLKTX_BYP BIT_ULL(15)
#define XCV_COMP_CTL 0x20
#define DRV_BYP BIT_ULL(63)
#define XCV_CTL 0x30
#define XCV_INT 0x40
#define XCV_INT_W1S 0x48
#define XCV_INT_ENA_W1C 0x50
#define XCV_INT_ENA_W1S 0x58
#define XCV_INBND_STATUS 0x80
#define XCV_BATCH_CRD_RET 0x100
struct xcv {
void __iomem *reg_base;
struct pci_dev *pdev;
};
static struct xcv *xcv;
/* Supported devices */
static const struct pci_device_id xcv_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA056) },
{ 0, } /* end of table */
};
MODULE_AUTHOR("Cavium Inc");
MODULE_DESCRIPTION("Cavium Thunder RGX/XCV Driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, xcv_id_table);
void xcv_init_hw(void)
{
u64 cfg;
/* Take DLL out of reset */
cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
cfg &= ~DLL_RESET;
writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
/* Take clock tree out of reset */
cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
cfg &= ~CLK_RESET;
writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
/* Wait for DLL to lock */
msleep(1);
/* Configure DLL - enable or bypass
* TX no bypass, RX bypass
*/
cfg = readq_relaxed(xcv->reg_base + XCV_DLL_CTL);
cfg &= ~0xFF03;
cfg |= CLKRX_BYP;
writeq_relaxed(cfg, xcv->reg_base + XCV_DLL_CTL);
/* Enable compensation controller and force the
* write to be visible to HW by readig back.
*/
cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
cfg |= COMP_EN;
writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
readq_relaxed(xcv->reg_base + XCV_RESET);
/* Wait for compensation state machine to lock */
msleep(10);
/* enable the XCV block */
cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
cfg |= PORT_EN;
writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
cfg |= CLK_RESET;
writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
}
EXPORT_SYMBOL(xcv_init_hw);
void xcv_setup_link(bool link_up, int link_speed)
{
u64 cfg;
int speed = 2;
if (!xcv) {
dev_err(&xcv->pdev->dev,
"XCV init not done, probe may have failed\n");
return;
}
if (link_speed == 100)
speed = 1;
else if (link_speed == 10)
speed = 0;
if (link_up) {
/* set operating speed */
cfg = readq_relaxed(xcv->reg_base + XCV_CTL);
cfg &= ~0x03;
cfg |= speed;
writeq_relaxed(cfg, xcv->reg_base + XCV_CTL);
/* Reset datapaths */
cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
cfg |= TX_DATA_RESET | RX_DATA_RESET;
writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
/* Enable the packet flow */
cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
cfg |= TX_PKT_RESET | RX_PKT_RESET;
writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
/* Return credits to RGX */
writeq_relaxed(0x01, xcv->reg_base + XCV_BATCH_CRD_RET);
} else {
/* Disable packet flow */
cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
cfg &= ~(TX_PKT_RESET | RX_PKT_RESET);
writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
readq_relaxed(xcv->reg_base + XCV_RESET);
}
}
EXPORT_SYMBOL(xcv_setup_link);
static int xcv_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err;
struct device *dev = &pdev->dev;
xcv = devm_kzalloc(dev, sizeof(struct xcv), GFP_KERNEL);
if (!xcv)
return -ENOMEM;
xcv->pdev = pdev;
pci_set_drvdata(pdev, xcv);
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
goto err_kfree;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
goto err_disable_device;
}
/* MAP configuration registers */
xcv->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
if (!xcv->reg_base) {
dev_err(dev, "XCV: Cannot map CSR memory space, aborting\n");
err = -ENOMEM;
goto err_release_regions;
}
return 0;
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
pci_disable_device(pdev);
err_kfree:
pci_set_drvdata(pdev, NULL);
devm_kfree(dev, xcv);
xcv = NULL;
return err;
}
static void xcv_remove(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
if (xcv) {
devm_kfree(dev, xcv);
xcv = NULL;
}
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static struct pci_driver xcv_driver = {
.name = DRV_NAME,
.id_table = xcv_id_table,
.probe = xcv_probe,
.remove = xcv_remove,
};
static int __init xcv_init_module(void)
{
pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
return pci_register_driver(&xcv_driver);
}
static void __exit xcv_cleanup_module(void)
{
pci_unregister_driver(&xcv_driver);
}
module_init(xcv_init_module);
module_exit(xcv_cleanup_module);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment