Commit d223d194 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'vmxnet3-upgrade-to-version-9'

Ronak Doshi says:

====================
vmxnet3: upgrade to version 9

vmxnet3 emulation has recently added timestamping feature which allows the
hypervisor (ESXi) to calculate latency from guest virtual NIC driver to all
the way up to the physical NIC. This patch series extends vmxnet3 driver
to leverage these new feature.

Compatibility is maintained using existing vmxnet3 versioning mechanism as
follows:
 - new features added to vmxnet3 emulation are associated with new vmxnet3
   version viz. vmxnet3 version 9.
 - emulation advertises all the versions it supports to the driver.
 - during initialization, vmxnet3 driver picks the highest version number
   supported by both the emulation and the driver and configures emulation
   to run at that version.

In particular, following changes are introduced:

Patch 1:
  This patch introduces utility macros for vmxnet3 version 9 comparison
  and updates Copyright information.

Patch 2:
  This patch adds support to timestamp the packets so as to allow latency
  measurement in the ESXi.

Patch 3:
  This patch adds support to disable certain offloads on the device based
  on the request specified by the user in the VM configuration.

Patch 4:
  With all vmxnet3 version 9 changes incorporated in the vmxnet3 driver,
  with this patch, the driver can configure emulation to run at vmxnet3
  version 9.
====================

Link: https://lore.kernel.org/r/20240531193050.4132-1-ronak.doshi@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 1467713e 63587234
......@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
# Copyright (C) 2007-2022, VMware, Inc. All Rights Reserved.
# Copyright (C) 2007-2024, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
......
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
* Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -80,6 +80,8 @@ enum {
#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
#define VMXNET3_PMC_PSEUDO_TSC 0x10003
enum {
VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
......@@ -123,6 +125,8 @@ enum {
VMXNET3_CMD_GET_RESERVED4,
VMXNET3_CMD_GET_MAX_CAPABILITIES,
VMXNET3_CMD_GET_DCR0_REG,
VMXNET3_CMD_GET_TSRING_DESC_SIZE,
VMXNET3_CMD_GET_DISABLED_OFFLOADS,
};
/*
......@@ -254,6 +258,24 @@ struct Vmxnet3_RxDesc {
#define VMXNET3_RCD_HDR_INNER_SHIFT 13
struct Vmxnet3TSInfo {
u64 tsData:56;
u64 tsType:4;
u64 tsi:1; //bit to indicate to set ts
u64 pad:3;
u64 pad2;
};
struct Vmxnet3_TxTSDesc {
struct Vmxnet3TSInfo ts;
u64 pad[14];
};
struct Vmxnet3_RxTSDesc {
struct Vmxnet3TSInfo ts;
u64 pad[14];
};
struct Vmxnet3_RxCompDesc {
#ifdef __BIG_ENDIAN_BITFIELD
u32 ext2:1;
......@@ -427,6 +449,13 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
/* Rx TS Ring buffer size must be a multiple of 64 bytes */
#define VMXNET3_RXTS_DESC_SIZE_ALIGN 64
#define VMXNET3_RXTS_DESC_SIZE_MASK (VMXNET3_RXTS_DESC_SIZE_ALIGN - 1)
/* Tx TS Ring buffer size must be a multiple of 64 bytes */
#define VMXNET3_TXTS_DESC_SIZE_ALIGN 64
#define VMXNET3_TXTS_DESC_SIZE_MASK (VMXNET3_TXTS_DESC_SIZE_ALIGN - 1)
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
......@@ -439,6 +468,9 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
#define VMXNET3_TXTS_DESC_MAX_SIZE 256
#define VMXNET3_RXTS_DESC_MAX_SIZE 256
/* a list of reasons for queue stop */
enum {
......@@ -546,6 +578,24 @@ struct Vmxnet3_RxQueueConf {
};
struct Vmxnet3_LatencyConf {
u16 sampleRate;
u16 pad;
};
struct Vmxnet3_TxQueueTSConf {
__le64 txTSRingBasePA;
__le16 txTSRingDescSize; /* size of tx timestamp ring buffer */
u16 pad;
struct Vmxnet3_LatencyConf latencyConf;
};
struct Vmxnet3_RxQueueTSConf {
__le64 rxTSRingBasePA;
__le16 rxTSRingDescSize; /* size of rx timestamp ring buffer */
u16 pad[3];
};
enum vmxnet3_intr_mask_mode {
VMXNET3_IMM_AUTO = 0,
VMXNET3_IMM_ACTIVE = 1,
......@@ -679,7 +729,8 @@ struct Vmxnet3_TxQueueDesc {
/* Driver read after a GET command */
struct Vmxnet3_QueueStatus status;
struct UPT1_TxStats stats;
u8 _pad[88]; /* 128 aligned */
struct Vmxnet3_TxQueueTSConf tsConf;
u8 _pad[72]; /* 128 aligned */
};
......@@ -689,7 +740,8 @@ struct Vmxnet3_RxQueueDesc {
/* Driver read after a GET commad */
struct Vmxnet3_QueueStatus status;
struct UPT1_RxStats stats;
u8 __pad[88]; /* 128 aligned */
struct Vmxnet3_RxQueueTSConf tsConf;
u8 __pad[72]; /* 128 aligned */
};
struct Vmxnet3_SetPolling {
......@@ -861,4 +913,7 @@ struct Vmxnet3_DriverShared {
/* when new capability is introduced, update VMXNET3_CAP_MAX */
#define VMXNET3_CAP_MAX VMXNET3_CAP_VERSION_7_MAX
#define VMXNET3_OFFLOAD_TSO BIT(0)
#define VMXNET3_OFFLOAD_LRO BIT(1)
#endif /* _VMXNET3_DEFS_H_ */
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
* Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -143,6 +143,32 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
static u64
vmxnet3_get_cycles(int pmc)
{
#ifdef CONFIG_X86
return native_read_pmc(pmc);
#else
return 0;
#endif
}
static bool
vmxnet3_apply_timestamp(struct vmxnet3_tx_queue *tq, u16 rate)
{
#ifdef CONFIG_X86
if (rate > 0) {
if (tq->tsPktCount == 1) {
if (rate != 1)
tq->tsPktCount = rate;
return true;
}
tq->tsPktCount--;
}
#endif
return false;
}
/* Check if capability is supported by UPT device or
* UPT is even requested
*/
......@@ -498,6 +524,12 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
if (tq->ts_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
tq->tx_ring.size * tq->tx_ts_desc_size,
tq->ts_ring.base, tq->ts_ring.basePA);
tq->ts_ring.base = NULL;
}
if (tq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc),
......@@ -535,6 +567,10 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
memset(tq->data_ring.base, 0,
tq->data_ring.size * tq->txdata_desc_size);
if (tq->ts_ring.base)
memset(tq->ts_ring.base, 0,
tq->tx_ring.size * tq->tx_ts_desc_size);
/* reset the tx comp ring contents to 0 and reset comp ring states */
memset(tq->comp_ring.base, 0, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc));
......@@ -573,6 +609,18 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
goto err;
}
if (tq->tx_ts_desc_size != 0) {
tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->tx_ring.size * tq->tx_ts_desc_size,
&tq->ts_ring.basePA, GFP_KERNEL);
if (!tq->ts_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx ts ring\n");
tq->tx_ts_desc_size = 0;
}
} else {
tq->ts_ring.base = NULL;
}
tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
&tq->comp_ring.basePA, GFP_KERNEL);
......@@ -861,6 +909,11 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
/* set the last buf_info for the pkt */
tbi->skb = skb;
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
if (tq->tx_ts_desc_size != 0) {
ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base +
tbi->sop_idx * tq->tx_ts_desc_size);
ctx->ts_txd->ts.tsi = 0;
}
return 0;
}
......@@ -968,7 +1021,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb_headlen(skb));
}
if (skb->len <= VMXNET3_HDR_COPY_SIZE)
if (skb->len <= tq->txdata_desc_size)
ctx->copy_size = skb->len;
/* make sure headers are accessible directly */
......@@ -1259,6 +1312,14 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
gdesc->txd.tci = skb_vlan_tag_get(skb);
}
if (tq->tx_ts_desc_size != 0 &&
adapter->latencyConf->sampleRate != 0) {
if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) {
ctx.ts_txd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
ctx.ts_txd->ts.tsi = 1;
}
}
/* Ensure that the write to (&gdesc->txd)->gen will be observed after
* all other writes to &gdesc->txd.
*/
......@@ -1608,6 +1669,15 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = false;
ctx->skb = rbi->skb;
if (rq->rx_ts_desc_size != 0 && rcd->ext2) {
struct Vmxnet3_RxTSDesc *ts_rxd;
ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base +
idx * rq->rx_ts_desc_size);
ts_rxd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
ts_rxd->ts.tsi = 1;
}
rxDataRingUsed =
VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
len = rxDataRingUsed ? rcd->len : rbi->len;
......@@ -2007,6 +2077,13 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->data_ring.base = NULL;
}
if (rq->ts_ring.base) {
dma_free_coherent(&adapter->pdev->dev,
rq->rx_ring[0].size * rq->rx_ts_desc_size,
rq->ts_ring.base, rq->ts_ring.basePA);
rq->ts_ring.base = NULL;
}
if (rq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
* sizeof(struct Vmxnet3_RxCompDesc),
......@@ -2090,6 +2167,10 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
}
vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
if (rq->ts_ring.base)
memset(rq->ts_ring.base, 0,
rq->rx_ring[0].size * rq->rx_ts_desc_size);
/* reset the comp ring */
rq->comp_ring.next2proc = 0;
memset(rq->comp_ring.base, 0, rq->comp_ring.size *
......@@ -2160,6 +2241,21 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
rq->data_ring.desc_size = 0;
}
if (rq->rx_ts_desc_size != 0) {
sz = rq->rx_ring[0].size * rq->rx_ts_desc_size;
rq->ts_ring.base =
dma_alloc_coherent(&adapter->pdev->dev, sz,
&rq->ts_ring.basePA,
GFP_KERNEL);
if (!rq->ts_ring.base) {
netdev_err(adapter->netdev,
"rx ts ring will be disabled\n");
rq->rx_ts_desc_size = 0;
}
} else {
rq->ts_ring.base = NULL;
}
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
&rq->comp_ring.basePA,
......@@ -2759,6 +2855,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
struct Vmxnet3_TxQueueConf *tqc;
struct Vmxnet3_RxQueueConf *rqc;
struct Vmxnet3_TxQueueTSConf *tqtsc;
struct Vmxnet3_RxQueueTSConf *rqtsc;
int i;
memset(shared, 0, sizeof(*shared));
......@@ -2815,6 +2913,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
tqc->ddLen = cpu_to_le32(0);
tqc->intrIdx = tq->comp_ring.intr_idx;
if (VMXNET3_VERSION_GE_9(adapter)) {
tqtsc = &adapter->tqd_start[i].tsConf;
tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA);
tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size);
}
}
/* rx queue settings */
......@@ -2837,6 +2940,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rqc->rxDataRingDescSize =
cpu_to_le16(rq->data_ring.desc_size);
}
if (VMXNET3_VERSION_GE_9(adapter)) {
rqtsc = &adapter->rqd_start[i].tsConf;
rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA);
rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size);
}
}
#ifdef VMXNET3_RSS
......@@ -3299,6 +3407,8 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
tq->stopped = true;
tq->adapter = adapter;
tq->qid = i;
tq->tx_ts_desc_size = adapter->tx_ts_desc_size;
tq->tsPktCount = 1;
err = vmxnet3_tq_create(tq, adapter);
/*
* Too late to change num_tx_queues. We cannot do away with
......@@ -3320,6 +3430,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
rq->shared = &adapter->rqd_start[i].ctrl;
rq->adapter = adapter;
rq->data_ring.desc_size = rxdata_desc_size;
rq->rx_ts_desc_size = adapter->rx_ts_desc_size;
err = vmxnet3_rq_create(rq, adapter);
if (err) {
if (i == 0) {
......@@ -3361,14 +3472,15 @@ vmxnet3_open(struct net_device *netdev)
if (VMXNET3_VERSION_GE_3(adapter)) {
unsigned long flags;
u16 txdata_desc_size;
u32 ret;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
VMXNET3_REG_CMD);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
txdata_desc_size = ret & 0xffff;
if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
(txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
(txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
......@@ -3377,10 +3489,40 @@ vmxnet3_open(struct net_device *netdev)
} else {
adapter->txdata_desc_size = txdata_desc_size;
}
if (VMXNET3_VERSION_GE_9(adapter))
adapter->rxdata_desc_size = (ret >> 16) & 0xffff;
} else {
adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
}
if (VMXNET3_VERSION_GE_9(adapter)) {
unsigned long flags;
u16 tx_ts_desc_size = 0;
u16 rx_ts_desc_size = 0;
u32 ret;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_TSRING_DESC_SIZE);
ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
if (ret > 0) {
tx_ts_desc_size = (ret & 0xff);
rx_ts_desc_size = ((ret >> 16) & 0xff);
}
if (tx_ts_desc_size > VMXNET3_TXTS_DESC_MAX_SIZE ||
tx_ts_desc_size & VMXNET3_TXTS_DESC_SIZE_MASK)
tx_ts_desc_size = 0;
if (rx_ts_desc_size > VMXNET3_RXTS_DESC_MAX_SIZE ||
rx_ts_desc_size & VMXNET3_RXTS_DESC_SIZE_MASK)
rx_ts_desc_size = 0;
adapter->tx_ts_desc_size = tx_ts_desc_size;
adapter->rx_ts_desc_size = rx_ts_desc_size;
} else {
adapter->tx_ts_desc_size = 0;
adapter->rx_ts_desc_size = 0;
}
err = vmxnet3_create_queues(adapter,
adapter->tx_ring_size,
adapter->rx_ring_size,
......@@ -3503,6 +3645,15 @@ static void
vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
unsigned long flags;
if (VMXNET3_VERSION_GE_9(adapter)) {
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_DISABLED_OFFLOADS);
adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
}
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
......@@ -3520,6 +3671,16 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) {
netdev->hw_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
netdev->hw_enc_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
}
if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) {
netdev->hw_features &= ~(NETIF_F_LRO);
netdev->hw_enc_features &= ~(NETIF_F_LRO);
}
if (VMXNET3_VERSION_GE_7(adapter)) {
unsigned long flags;
......@@ -3790,7 +3951,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
u8 mac[ETH_ALEN];
int size;
int size, i;
int num_tx_queues;
int num_rx_queues;
int queues;
......@@ -3857,42 +4018,14 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
if (ver & (1 << VMXNET3_REV_7)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_7);
adapter->version = VMXNET3_REV_7 + 1;
} else if (ver & (1 << VMXNET3_REV_6)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_6);
adapter->version = VMXNET3_REV_6 + 1;
} else if (ver & (1 << VMXNET3_REV_5)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_5);
adapter->version = VMXNET3_REV_5 + 1;
} else if (ver & (1 << VMXNET3_REV_4)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_4);
adapter->version = VMXNET3_REV_4 + 1;
} else if (ver & (1 << VMXNET3_REV_3)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_3);
adapter->version = VMXNET3_REV_3 + 1;
} else if (ver & (1 << VMXNET3_REV_2)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_2);
adapter->version = VMXNET3_REV_2 + 1;
} else if (ver & (1 << VMXNET3_REV_1)) {
VMXNET3_WRITE_BAR1_REG(adapter,
VMXNET3_REG_VRRS,
1 << VMXNET3_REV_1);
adapter->version = VMXNET3_REV_1 + 1;
} else {
for (i = VMXNET3_REV_9; i >= VMXNET3_REV_1; i--) {
if (ver & (1 << i)) {
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << i);
adapter->version = i + 1;
break;
}
}
if (i < VMXNET3_REV_1) {
dev_err(&pdev->dev,
"Incompatible h/w version (0x%x) for adapter\n", ver);
err = -EBUSY;
......@@ -3992,6 +4125,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
}
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
adapter->num_tx_queues);
if (VMXNET3_VERSION_GE_9(adapter))
adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf;
adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_PMConf),
......
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
* Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
* Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
* Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
......@@ -72,18 +72,20 @@
/*
* Version numbers
*/
#define VMXNET3_DRIVER_VERSION_STRING "1.7.0.0-k"
#define VMXNET3_DRIVER_VERSION_STRING "1.9.0.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
#define VMXNET3_DRIVER_VERSION_NUM 0x01070000
#define VMXNET3_DRIVER_VERSION_NUM 0x01090000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
#define VMXNET3_REV_9 8 /* Vmxnet3 Rev. 9 */
#define VMXNET3_REV_8 7 /* Vmxnet3 Rev. 8 */
#define VMXNET3_REV_7 6 /* Vmxnet3 Rev. 7 */
#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */
#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */
......@@ -191,6 +193,11 @@ struct vmxnet3_tx_data_ring {
dma_addr_t basePA;
};
struct vmxnet3_tx_ts_ring {
struct Vmxnet3_TxTSDesc *base;
dma_addr_t basePA;
};
#define VMXNET3_MAP_NONE 0
#define VMXNET3_MAP_SINGLE BIT(0)
#define VMXNET3_MAP_PAGE BIT(1)
......@@ -243,6 +250,7 @@ struct vmxnet3_tx_ctx {
u32 copy_size; /* # of bytes copied into the data ring */
union Vmxnet3_GenericDesc *sop_txd;
union Vmxnet3_GenericDesc *eop_txd;
struct Vmxnet3_TxTSDesc *ts_txd;
};
struct vmxnet3_tx_queue {
......@@ -252,6 +260,7 @@ struct vmxnet3_tx_queue {
struct vmxnet3_cmd_ring tx_ring;
struct vmxnet3_tx_buf_info *buf_info;
struct vmxnet3_tx_data_ring data_ring;
struct vmxnet3_tx_ts_ring ts_ring;
struct vmxnet3_comp_ring comp_ring;
struct Vmxnet3_TxQueueCtrl *shared;
struct vmxnet3_tq_driver_stats stats;
......@@ -260,6 +269,8 @@ struct vmxnet3_tx_queue {
* stopped */
int qid;
u16 txdata_desc_size;
u16 tx_ts_desc_size;
u16 tsPktCount;
} ____cacheline_aligned;
enum vmxnet3_rx_buf_type {
......@@ -307,6 +318,11 @@ struct vmxnet3_rx_data_ring {
u16 desc_size;
};
struct vmxnet3_rx_ts_ring {
struct Vmxnet3_RxTSDesc *base;
dma_addr_t basePA;
};
struct vmxnet3_rx_queue {
char name[IFNAMSIZ + 8]; /* To identify interrupt */
struct vmxnet3_adapter *adapter;
......@@ -314,6 +330,7 @@ struct vmxnet3_rx_queue {
struct vmxnet3_cmd_ring rx_ring[2];
struct vmxnet3_rx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
struct vmxnet3_rx_ts_ring ts_ring;
struct vmxnet3_rx_ctx rx_ctx;
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
......@@ -323,6 +340,7 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rq_driver_stats stats;
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
u16 rx_ts_desc_size;
} ____cacheline_aligned;
#define VMXNET3_DEVICE_MAX_TX_QUEUES 32
......@@ -432,6 +450,11 @@ struct vmxnet3_adapter {
u16 rx_prod_offset;
u16 rx_prod2_offset;
struct bpf_prog __rcu *xdp_bpf_prog;
struct Vmxnet3_LatencyConf *latencyConf;
/* Size of buffer in the ts ring */
u16 tx_ts_desc_size;
u16 rx_ts_desc_size;
u32 disabledOffloads;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
......@@ -463,6 +486,10 @@ struct vmxnet3_adapter {
(adapter->version >= VMXNET3_REV_6 + 1)
#define VMXNET3_VERSION_GE_7(adapter) \
(adapter->version >= VMXNET3_REV_7 + 1)
#define VMXNET3_VERSION_GE_8(adapter) \
(adapter->version >= VMXNET3_REV_8 + 1)
#define VMXNET3_VERSION_GE_9(adapter) \
(adapter->version >= VMXNET3_REV_9 + 1)
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment