Commit 6818c4d5 authored by Joshua Hay's avatar Joshua Hay Committed by Tony Nguyen

idpf: add splitq start_xmit

Add start_xmit support for split queue model. To start with, add the
necessary checks to linearize the skb if it uses more number of
buffers than the hardware supported limit. Stop the transmit queue
if there are no enough descriptors available for the skb to use or
if there we're going to potentially overrun the completion queue.
Finally prepare the descriptor with all the required
information and update the tail.
Signed-off-by: default avatarJoshua Hay <joshua.a.hay@intel.com>
Co-developed-by: default avatarAlan Brady <alan.brady@intel.com>
Signed-off-by: default avatarAlan Brady <alan.brady@intel.com>
Co-developed-by: default avatarMadhu Chittim <madhu.chittim@intel.com>
Signed-off-by: default avatarMadhu Chittim <madhu.chittim@intel.com>
Co-developed-by: default avatarPhani Burra <phani.r.burra@intel.com>
Signed-off-by: default avatarPhani Burra <phani.r.burra@intel.com>
Reviewed-by: default avatarSridhar Samudrala <sridhar.samudrala@intel.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Co-developed-by: default avatarPavan Kumar Linga <pavan.kumar.linga@intel.com>
Signed-off-by: default avatarPavan Kumar Linga <pavan.kumar.linga@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent d4d55871
......@@ -13,6 +13,7 @@ struct idpf_vport_max_q;
#include <linux/aer.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/bitfield.h>
#include "virtchnl2.h"
#include "idpf_lan_txrx.h"
......
......@@ -56,6 +56,64 @@ enum idpf_rss_hash {
BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV6_UDP))
#define IDPF_TXD_CTX_QW1_MSS_S 50
#define IDPF_TXD_CTX_QW1_MSS_M GENMASK_ULL(63, 50)
#define IDPF_TXD_CTX_QW1_TSO_LEN_S 30
#define IDPF_TXD_CTX_QW1_TSO_LEN_M GENMASK_ULL(47, 30)
#define IDPF_TXD_CTX_QW1_CMD_S 4
#define IDPF_TXD_CTX_QW1_CMD_M GENMASK_ULL(15, 4)
#define IDPF_TXD_CTX_QW1_DTYPE_S 0
#define IDPF_TXD_CTX_QW1_DTYPE_M GENMASK_ULL(3, 0)
#define IDPF_TXD_QW1_L2TAG1_S 48
#define IDPF_TXD_QW1_L2TAG1_M GENMASK_ULL(63, 48)
#define IDPF_TXD_QW1_TX_BUF_SZ_S 34
#define IDPF_TXD_QW1_TX_BUF_SZ_M GENMASK_ULL(47, 34)
#define IDPF_TXD_QW1_OFFSET_S 16
#define IDPF_TXD_QW1_OFFSET_M GENMASK_ULL(33, 16)
#define IDPF_TXD_QW1_CMD_S 4
#define IDPF_TXD_QW1_CMD_M GENMASK_ULL(15, 4)
#define IDPF_TXD_QW1_DTYPE_S 0
#define IDPF_TXD_QW1_DTYPE_M GENMASK_ULL(3, 0)
enum idpf_tx_desc_dtype_value {
IDPF_TX_DESC_DTYPE_DATA = 0,
IDPF_TX_DESC_DTYPE_CTX = 1,
/* DTYPE 2 is reserved
* DTYPE 3 is free for future use
* DTYPE 4 is reserved
*/
IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5,
/* DTYPE 6 is reserved */
IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 = 7,
/* DTYPE 8, 9 are free for future use
* DTYPE 10 is reserved
* DTYPE 11 is free for future use
*/
IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE = 12,
/* DTYPE 13, 14 are free for future use */
/* DESC_DONE - HW has completed write-back of descriptor */
IDPF_TX_DESC_DTYPE_DESC_DONE = 15,
};
enum idpf_tx_base_desc_cmd_bits {
IDPF_TX_DESC_CMD_EOP = BIT(0),
IDPF_TX_DESC_CMD_RS = BIT(1),
/* only on VFs else RSVD */
IDPF_TX_DESC_CMD_ICRC = BIT(2),
IDPF_TX_DESC_CMD_IL2TAG1 = BIT(3),
IDPF_TX_DESC_CMD_RSVD1 = BIT(4),
IDPF_TX_DESC_CMD_IIPT_IPV6 = BIT(5),
IDPF_TX_DESC_CMD_IIPT_IPV4 = BIT(6),
IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM = GENMASK(6, 5),
IDPF_TX_DESC_CMD_RSVD2 = BIT(7),
IDPF_TX_DESC_CMD_L4T_EOFT_TCP = BIT(8),
IDPF_TX_DESC_CMD_L4T_EOFT_SCTP = BIT(9),
IDPF_TX_DESC_CMD_L4T_EOFT_UDP = GENMASK(9, 8),
IDPF_TX_DESC_CMD_RSVD3 = BIT(10),
IDPF_TX_DESC_CMD_RSVD4 = BIT(11),
};
/* Transmit descriptors */
/* splitq tx buf, singleq tx buf and singleq compl desc */
struct idpf_base_tx_desc {
......@@ -74,4 +132,89 @@ struct idpf_splitq_tx_compl_desc {
u8 rsvd; /* Reserved */
}; /* writeback used with completion queues */
/* Common cmd field defines for all desc except Flex Flow Scheduler (0x0C) */
enum idpf_tx_flex_desc_cmd_bits {
IDPF_TX_FLEX_DESC_CMD_EOP = BIT(0),
IDPF_TX_FLEX_DESC_CMD_RS = BIT(1),
IDPF_TX_FLEX_DESC_CMD_RE = BIT(2),
IDPF_TX_FLEX_DESC_CMD_IL2TAG1 = BIT(3),
IDPF_TX_FLEX_DESC_CMD_DUMMY = BIT(4),
IDPF_TX_FLEX_DESC_CMD_CS_EN = BIT(5),
IDPF_TX_FLEX_DESC_CMD_FILT_AU_EN = BIT(6),
IDPF_TX_FLEX_DESC_CMD_FILT_AU_EVICT = BIT(7),
};
struct idpf_flex_tx_desc {
__le64 buf_addr; /* Packet buffer address */
struct {
#define IDPF_FLEX_TXD_QW1_DTYPE_S 0
#define IDPF_FLEX_TXD_QW1_DTYPE_M GENMASK(4, 0)
#define IDPF_FLEX_TXD_QW1_CMD_S 5
#define IDPF_FLEX_TXD_QW1_CMD_M GENMASK(15, 5)
__le16 cmd_dtype;
/* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */
struct {
__le16 l2tag1;
__le16 l2tag2;
} l2tags;
__le16 buf_size;
} qw1;
};
struct idpf_flex_tx_sched_desc {
__le64 buf_addr; /* Packet buffer address */
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE_16B (0x0C) */
struct {
u8 cmd_dtype;
#define IDPF_TXD_FLEX_FLOW_DTYPE_M GENMASK(4, 0)
#define IDPF_TXD_FLEX_FLOW_CMD_EOP BIT(5)
#define IDPF_TXD_FLEX_FLOW_CMD_CS_EN BIT(6)
#define IDPF_TXD_FLEX_FLOW_CMD_RE BIT(7)
/* [23:23] Horizon Overflow bit, [22:0] timestamp */
u8 ts[3];
#define IDPF_TXD_FLOW_SCH_HORIZON_OVERFLOW_M BIT(7)
__le16 compl_tag;
__le16 rxr_bufsize;
#define IDPF_TXD_FLEX_FLOW_RXR BIT(14)
#define IDPF_TXD_FLEX_FLOW_BUFSIZE_M GENMASK(13, 0)
} qw1;
};
/* Common cmd fields for all flex context descriptors
* Note: these defines already account for the 5 bit dtype in the cmd_dtype
* field
*/
enum idpf_tx_flex_ctx_desc_cmd_bits {
IDPF_TX_FLEX_CTX_DESC_CMD_TSO = BIT(5),
IDPF_TX_FLEX_CTX_DESC_CMD_TSYN_EN = BIT(6),
IDPF_TX_FLEX_CTX_DESC_CMD_L2TAG2 = BIT(7),
IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK = BIT(9),
IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_LOCAL = BIT(10),
IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI = GENMASK(10, 9),
};
/* Standard flex descriptor TSO context quad word */
struct idpf_flex_tx_tso_ctx_qw {
__le32 flex_tlen;
#define IDPF_TXD_FLEX_CTX_TLEN_M GENMASK(17, 0)
#define IDPF_TXD_FLEX_TSO_CTX_FLEX_S 24
__le16 mss_rt;
#define IDPF_TXD_FLEX_CTX_MSS_RT_M GENMASK(13, 0)
u8 hdr_len;
u8 flex;
};
struct idpf_flex_tx_ctx_desc {
/* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
struct {
struct idpf_flex_tx_tso_ctx_qw qw0;
struct {
__le16 cmd_dtype;
u8 flex[6];
} qw1;
} tso;
};
#endif /* _IDPF_LAN_TXRX_H_ */
......@@ -1557,6 +1557,7 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
static const struct net_device_ops idpf_netdev_ops_splitq = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
.ndo_start_xmit = idpf_tx_splitq_start,
};
static const struct net_device_ops idpf_netdev_ops_singleq = {
......
......@@ -10,7 +10,24 @@
*/
static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
{
if (tx_buf->skb) {
if (dma_unmap_len(tx_buf, len))
dma_unmap_single(tx_q->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_buf->skb);
} else if (dma_unmap_len(tx_buf, len)) {
dma_unmap_page(tx_q->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
}
tx_buf->next_to_watch = NULL;
tx_buf->skb = NULL;
tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
dma_unmap_len_set(tx_buf, len, 0);
}
/**
......@@ -1347,6 +1364,773 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
return err;
}
/**
* idpf_tx_splitq_build_ctb - populate command tag and size for queue
* based scheduling descriptors
* @desc: descriptor to populate
* @params: pointer to tx params struct
* @td_cmd: command to be filled in desc
* @size: size of buffer
*/
void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
desc->q.qw1.cmd_dtype =
cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M);
desc->q.qw1.cmd_dtype |=
cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) &
IDPF_FLEX_TXD_QW1_CMD_M);
desc->q.qw1.buf_size = cpu_to_le16((u16)size);
desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
}
/**
* idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
* scheduling descriptors
* @desc: descriptor to populate
* @params: pointer to tx params struct
* @td_cmd: command to be filled in desc
* @size: size of buffer
*/
void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
}
/**
* idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
* @tx_q: the queue to be checked
* @size: number of descriptors we want to assure is available
*
* Returns 0 if stop is not needed
*/
static int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
{
struct netdev_queue *nq;
if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
return 0;
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.tx.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
}
/**
* idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
* @tx_q: the queue to be checked
* @descs_needed: number of descriptors required for this packet
*
* Returns 0 if stop is not needed
*/
static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
unsigned int descs_needed)
{
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
goto splitq_stop;
/* If there are too many outstanding completions expected on the
* completion queue, stop the TX queue to give the device some time to
* catch up
*/
if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
goto splitq_stop;
/* Also check for available book keeping buffers; if we are low, stop
* the queue to wait for more completions
*/
if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
goto splitq_stop;
return 0;
splitq_stop:
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.tx.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
return -EBUSY;
}
/**
* idpf_tx_buf_hw_update - Store the new tail value
* @tx_q: queue to bump
* @val: new tail index
* @xmit_more: more skb's pending
*
* The naming here is special in that 'hw' signals that this function is about
* to do a register write to update our queue status. We know this can only
* mean tail here as HW should be owning head for TX.
*/
static void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
bool xmit_more)
{
struct netdev_queue *nq;
nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
tx_q->next_to_use = val;
idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
/* notify HW of packet */
if (netif_xmit_stopped(nq) || !xmit_more)
writel(val, tx_q->tail);
}
/**
* idpf_tx_desc_count_required - calculate number of Tx descriptors needed
* @skb: send buffer
*
* Returns number of data descriptors needed for this skb.
*/
static unsigned int idpf_tx_desc_count_required(struct sk_buff *skb)
{
const struct skb_shared_info *shinfo;
unsigned int count = 0, i;
count += !!skb_headlen(skb);
if (!skb_is_nonlinear(skb))
return count;
shinfo = skb_shinfo(skb);
for (i = 0; i < shinfo->nr_frags; i++) {
unsigned int size;
size = skb_frag_size(&shinfo->frags[i]);
/* We only need to use the idpf_size_to_txd_count check if the
* fragment is going to span multiple descriptors,
* i.e. size >= 16K.
*/
if (size >= SZ_16K)
count += idpf_size_to_txd_count(size);
else
count++;
}
return count;
}
/**
* idpf_tx_dma_map_error - handle TX DMA map errors
* @txq: queue to send buffer on
* @skb: send buffer
* @first: original first buffer info buffer for packet
* @idx: starting point on ring to unwind
*/
static void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.tx.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);
/* clear dma mappings for failed tx_buf map */
for (;;) {
struct idpf_tx_buf *tx_buf;
tx_buf = &txq->tx_buf[idx];
idpf_tx_buf_rel(txq, tx_buf);
if (tx_buf == first)
break;
if (idx == 0)
idx = txq->desc_count;
idx--;
}
if (skb_is_gso(skb)) {
union idpf_tx_flex_desc *tx_desc;
/* If we failed a DMA mapping for a TSO packet, we will have
* used one additional descriptor for a context
* descriptor. Reset that here.
*/
tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
if (idx == 0)
idx = txq->desc_count;
idx--;
}
/* Update tail in case netdev_xmit_more was previously true */
idpf_tx_buf_hw_update(txq, idx, false);
}
/**
* idpf_tx_splitq_bump_ntu - adjust NTU and generation
* @txq: the tx ring to wrap
* @ntu: ring index to bump
*/
static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
{
ntu++;
if (ntu == txq->desc_count) {
ntu = 0;
txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
}
return ntu;
}
/**
* idpf_tx_splitq_map - Build the Tx flex descriptor
* @tx_q: queue to send buffer on
* @params: pointer to splitq params struct
* @first: first buffer info buffer to use
*
* This function loops over the skb data pointed to by *first
* and gets a physical address for each memory location and programs
* it and the length into the transmit flex descriptor.
*/
static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
struct idpf_tx_splitq_params *params,
struct idpf_tx_buf *first)
{
union idpf_tx_flex_desc *tx_desc;
unsigned int data_len, size;
struct idpf_tx_buf *tx_buf;
u16 i = tx_q->next_to_use;
struct netdev_queue *nq;
struct sk_buff *skb;
skb_frag_t *frag;
u16 td_cmd = 0;
dma_addr_t dma;
skb = first->skb;
td_cmd = params->offload.td_cmd;
data_len = skb->data_len;
size = skb_headlen(skb);
tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
tx_buf = first;
params->compl_tag =
(tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
if (dma_mapping_error(tx_q->dev, dma))
return idpf_tx_dma_map_error(tx_q, skb, first, i);
tx_buf->compl_tag = params->compl_tag;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
dma_unmap_addr_set(tx_buf, dma, dma);
/* buf_addr is in same location for both desc types */
tx_desc->q.buf_addr = cpu_to_le64(dma);
/* The stack can send us fragments that are too large for a
* single descriptor i.e. frag size > 16K-1. We will need to
* split the fragment across multiple descriptors in this case.
* To adhere to HW alignment restrictions, the fragment needs
* to be split such that the first chunk ends on a 4K boundary
* and all subsequent chunks start on a 4K boundary. We still
* want to send as much data as possible though, so our
* intermediate descriptor chunk size will be 12K.
*
* For example, consider a 32K fragment mapped to DMA addr 2600.
* ------------------------------------------------------------
* | frag_size = 32K |
* ------------------------------------------------------------
* |2600 |16384 |28672
*
* 3 descriptors will be used for this fragment. The HW expects
* the descriptors to contain the following:
* ------------------------------------------------------------
* | size = 13784 | size = 12K | size = 6696 |
* | dma = 2600 | dma = 16384 | dma = 28672 |
* ------------------------------------------------------------
*
* We need to first adjust the max_data for the first chunk so
* that it ends on a 4K boundary. By negating the value of the
* DMA address and taking only the low order bits, we're
* effectively calculating
* 4K - (DMA addr lower order bits) =
* bytes to next boundary.
*
* Add that to our base aligned max_data (12K) and we have
* our first chunk size. In the example above,
* 13784 = 12K + (4096-2600)
*
* After guaranteeing the first chunk ends on a 4K boundary, we
* will give the intermediate descriptors 12K chunks and
* whatever is left to the final descriptor. This ensures that
* all descriptors used for the remaining chunks of the
* fragment start on a 4K boundary and we use as few
* descriptors as possible.
*/
max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
max_data);
tx_desc++;
i++;
if (i == tx_q->desc_count) {
tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
i = 0;
tx_q->compl_tag_cur_gen =
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
}
/* Since this packet has a buffer that is going to span
* multiple descriptors, it's going to leave holes in
* to the TX buffer ring. To ensure these holes do not
* cause issues in the cleaning routines, we will clear
* them of any stale data and assign them the same
* completion tag as the current packet. Then when the
* packet is being cleaned, the cleaning routines will
* simply pass over these holes and finish cleaning the
* rest of the packet.
*/
memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
tx_q->tx_buf[i].compl_tag = params->compl_tag;
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
* max_data will be >= 12K and <= 16K-1. On any
* subsequent iteration of this loop, max_data will
* always be 12K.
*/
dma += max_data;
size -= max_data;
/* Reset max_data since remaining chunks will be 12K
* at most
*/
max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
/* buf_addr is in same location for both desc types */
tx_desc->q.buf_addr = cpu_to_le64(dma);
}
if (!data_len)
break;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
tx_desc++;
i++;
if (i == tx_q->desc_count) {
tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
i = 0;
tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
}
size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
DMA_TO_DEVICE);
tx_buf = &tx_q->tx_buf[i];
}
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(skb);
/* write last descriptor with RS and EOP bits */
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
/**
* idpf_tso - computes mss and TSO length to prepare for TSO
* @skb: pointer to skb
* @off: pointer to struct that holds offload parameters
*
* Returns error (negative) if TSO was requested but cannot be applied to the
* given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
*/
static int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
union {
struct iphdr *v4;
struct ipv6hdr *v6;
unsigned char *hdr;
} ip;
union {
struct tcphdr *tcp;
struct udphdr *udp;
unsigned char *hdr;
} l4;
u32 paylen, l4_start;
int err;
if (!shinfo->gso_size)
return 0;
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
/* initialize outer IP header fields */
if (ip.v4->version == 4) {
ip.v4->tot_len = 0;
ip.v4->check = 0;
} else if (ip.v6->version == 6) {
ip.v6->payload_len = 0;
}
l4_start = skb_transport_offset(skb);
/* remove payload length from checksum */
paylen = skb->len - l4_start;
switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
case SKB_GSO_TCPV4:
case SKB_GSO_TCPV6:
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
break;
case SKB_GSO_UDP_L4:
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
/* compute length of segmentation header */
off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
break;
default:
return -EINVAL;
}
off->tso_len = skb->len - off->tso_hdr_len;
off->mss = shinfo->gso_size;
off->tso_segs = shinfo->gso_segs;
off->tx_flags |= IDPF_TX_FLAGS_TSO;
return 1;
}
/**
* __idpf_chk_linearize - Check skb is not using too many buffers
* @skb: send buffer
* @max_bufs: maximum number of buffers
*
* For TSO we need to count the TSO header and segment payload separately. As
* such we need to check cases where we have max_bufs-1 fragments or more as we
* can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
* for the segment payload in the first descriptor, and another max_buf-1 for
* the fragments.
*/
static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
const skb_frag_t *frag, *stale;
int nr_frags, sum;
/* no need to check if number of frags is less than max_bufs - 1 */
nr_frags = shinfo->nr_frags;
if (nr_frags < (max_bufs - 1))
return false;
/* We need to walk through the list and validate that each group
* of max_bufs-2 fragments totals at least gso_size.
*/
nr_frags -= max_bufs - 2;
frag = &shinfo->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We use
* this as the worst case scenario in which the frag ahead of us only
* provides one byte which is why we are limited to max_bufs-2
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
*/
sum = 1 - shinfo->gso_size;
/* Add size of frags 0 through 4 to create our initial sum */
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
sum += skb_frag_size(frag++);
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
for (stale = &shinfo->frags[0];; stale++) {
int stale_size = skb_frag_size(stale);
sum += skb_frag_size(frag++);
/* The stale fragment may present us with a smaller
* descriptor than the actual fragment size. To account
* for that we need to remove all the data on the front and
* figure out what the remainder would be in the last
* descriptor associated with the fragment.
*/
if (stale_size > IDPF_TX_MAX_DESC_DATA) {
int align_pad = -(skb_frag_off(stale)) &
(IDPF_TX_MAX_READ_REQ_SIZE - 1);
sum -= align_pad;
stale_size -= align_pad;
do {
sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
} while (stale_size > IDPF_TX_MAX_DESC_DATA);
}
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
if (!nr_frags--)
break;
sum -= stale_size;
}
return false;
}
/**
* idpf_chk_linearize - Check if skb exceeds max descriptors per packet
* @skb: send buffer
* @max_bufs: maximum scatter gather buffers for single packet
* @count: number of buffers this packet needs
*
* Make sure we don't exceed maximum scatter gather buffers for a single
* packet. We have to do some special checking around the boundary (max_bufs-1)
* if TSO is on since we need count the TSO header and payload separately.
* E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
* header, 1 for segment payload, and then 7 for the fragments.
*/
static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count)
{
if (likely(count < max_bufs))
return false;
if (skb_is_gso(skb))
return __idpf_chk_linearize(skb, max_bufs);
return count > max_bufs;
}
/**
* idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
* @txq: queue to put context descriptor on
*
* Since the TX buffer rings mimics the descriptor ring, update the tx buffer
* ring entry to reflect that this index is a context descriptor
*/
static struct idpf_flex_tx_ctx_desc *
idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
{
struct idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
/* grab the next descriptor */
desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
return desc;
}
/**
* idpf_tx_drop_skb - free the SKB and bump tail if necessary
* @tx_q: queue to send buffer on
* @skb: pointer to skb
*/
static netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q,
struct sk_buff *skb)
{
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
u64_stats_update_end(&tx_q->stats_sync);
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/**
* idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
* @skb: send buffer
* @tx_q: queue to send buffer on
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
struct idpf_queue *tx_q)
{
struct idpf_tx_splitq_params tx_params = { };
struct idpf_tx_buf *first;
unsigned int count;
int tso;
count = idpf_tx_desc_count_required(skb);
if (idpf_chk_linearize(skb, tx_q->tx_max_bufs, count)) {
if (__skb_linearize(skb))
return idpf_tx_drop_skb(tx_q, skb);
count = idpf_size_to_txd_count(skb->len);
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.tx.linearize);
u64_stats_update_end(&tx_q->stats_sync);
}
tso = idpf_tso(skb, &tx_params.offload);
if (unlikely(tso < 0))
return idpf_tx_drop_skb(tx_q, skb);
/* Check for splitq specific TX resources */
count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_BUSY;
}
if (tso) {
/* If tso is needed, set up context desc */
struct idpf_flex_tx_ctx_desc *ctx_desc =
idpf_tx_splitq_get_ctx_desc(tx_q);
ctx_desc->tso.qw1.cmd_dtype =
cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
ctx_desc->tso.qw0.flex_tlen =
cpu_to_le32(tx_params.offload.tso_len &
IDPF_TXD_FLEX_CTX_TLEN_M);
ctx_desc->tso.qw0.mss_rt =
cpu_to_le16(tx_params.offload.mss &
IDPF_TXD_FLEX_CTX_MSS_RT_M);
ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
u64_stats_update_begin(&tx_q->stats_sync);
u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
u64_stats_update_end(&tx_q->stats_sync);
}
/* record the location of the first descriptor for this packet */
first = &tx_q->tx_buf[tx_q->next_to_use];
first->skb = skb;
if (tso) {
first->gso_segs = tx_params.offload.tso_segs;
first->bytecount = skb->len +
((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
} else {
first->gso_segs = 1;
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
}
if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
/* Set the RE bit to catch any packets that may have not been
* stashed during RS completion cleaning. MIN_GAP is set to
* MIN_RING size to ensure it will be set at least once each
* time around the ring.
*/
if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
tx_q->txq_grp->num_completions_pending++;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
} else {
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
if (skb->ip_summed == CHECKSUM_PARTIAL)
tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
}
idpf_tx_splitq_map(tx_q, &tx_params, first);
return NETDEV_TX_OK;
}
/**
* idpf_tx_splitq_start - Selects the right Tx queue to send buffer
* @skb: send buffer
* @netdev: network interface device structure
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
struct net_device *netdev)
{
struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
struct idpf_queue *tx_q;
if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
tx_q = vport->txqs[skb_get_queue_mapping(skb)];
/* hardware can't handle really short frames, hardware padding works
* beyond this point
*/
if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
return NETDEV_TX_OK;
}
return idpf_tx_splitq_frame(skb, tx_q);
}
/**
* idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
* @irq: interrupt number
......
......@@ -5,6 +5,8 @@
#define _IDPF_TXRX_H_
#include <net/page_pool/helpers.h>
#include <net/tcp.h>
#include <net/netdev_queues.h>
#define IDPF_LARGE_MAX_Q 256
#define IDPF_MAX_Q 16
......@@ -67,18 +69,62 @@
#define IDPF_PACKET_HDR_PAD \
(ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
/* Minimum number of descriptors between 2 descriptors with the RE bit set;
* only relevant in flow scheduling mode
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
(&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
(&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
#define IDPF_FLEX_TX_DESC(txq, i) \
(&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
(&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
#define IDPF_DESC_UNUSED(txq) \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top)
#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
(txq)->desc_count >> 2)
#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
/* Determine the absolute number of completions pending, i.e. the number of
* completions that are expected to arrive on the TX completion queue.
*/
#define IDPF_TX_COMPLQ_PENDING(txq) \
(((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
0 : U64_MAX) + \
(txq)->num_completions_pending - (txq)->complq->num_completions)
#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
/* Adjust the generation for the completion tag and wrap if necessary */
#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
0 : (txq)->compl_tag_cur_gen)
#define IDPF_TX_MIN_PKT_LEN 17
#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
#define IDPF_TX_FLAGS_TSO BIT(0)
union idpf_tx_flex_desc {
struct idpf_flex_tx_desc q; /* queue based scheduling */
struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
};
/**
* struct idpf_tx_buf
* @next_to_watch: Next descriptor to clean
* @skb: Pointer to the skb
* @dma: DMA address
* @len: DMA length
* @bytecount: Number of bytes
* @gso_segs: Number of GSO segments
* @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
* with completion tag returned in buffer completion event.
* Because the completion tag is expected to be the same in all
......@@ -94,6 +140,13 @@
* this buffer entry should be skipped.
*/
struct idpf_tx_buf {
void *next_to_watch;
struct sk_buff *skb;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
unsigned int bytecount;
unsigned short gso_segs;
union {
int compl_tag;
......@@ -117,6 +170,64 @@ struct idpf_buf_lifo {
struct idpf_tx_stash **bufs;
};
/**
* struct idpf_tx_offload_params - Offload parameters for a given packet
* @tx_flags: Feature flags enabled for this packet
* @tso_len: Total length of payload to segment
* @mss: Segment size
* @tso_segs: Number of segments to be sent
* @tso_hdr_len: Length of headers to be duplicated
* @td_cmd: Command field to be inserted into descriptor
*/
struct idpf_tx_offload_params {
u32 tx_flags;
u32 tso_len;
u16 mss;
u16 tso_segs;
u16 tso_hdr_len;
u16 td_cmd;
};
/**
* struct idpf_tx_splitq_params
* @dtype: General descriptor info
* @eop_cmd: Type of EOP
* @compl_tag: Associated tag for completion
* @td_tag: Descriptor tunneling tag
* @offload: Offload parameters
*/
struct idpf_tx_splitq_params {
enum idpf_tx_desc_dtype_value dtype;
u16 eop_cmd;
union {
u16 compl_tag;
u16 td_tag;
};
struct idpf_tx_offload_params offload;
};
#define IDPF_TX_MIN_PKT_LEN 17
#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
#define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \
sizeof(struct idpf_flex_tx_desc))
#define IDPF_TX_DESCS_FOR_CTX 1
/* TX descriptors needed, worst case */
#define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
IDPF_TX_DESCS_PER_CACHE_LINE + \
IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
* the nearest 4K which represents our maximum read request size.
*/
#define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K
#define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
#define IDPF_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
......@@ -344,6 +455,23 @@ struct idpf_q_vector {
char *name;
};
struct idpf_rx_queue_stats {
/* stub */
};
struct idpf_tx_queue_stats {
u64_stats_t lso_pkts;
u64_stats_t linearize;
u64_stats_t q_busy;
u64_stats_t skb_drops;
u64_stats_t dma_map_errs;
};
union idpf_queue_stats {
struct idpf_rx_queue_stats rx;
struct idpf_tx_queue_stats tx;
};
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_20K 0x0032
#define IDPF_ITR_TX_DEF IDPF_ITR_20K
......@@ -382,6 +510,8 @@ struct idpf_q_vector {
* @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
* only relevant to RX queue.
* @flags: See enum idpf_queue_flags_t
* @q_stats: See union idpf_queue_stats
* @stats_sync: See struct u64_stats_sync
* @rx_hsplit_en: RX headsplit enable
* @rx_hbuf_size: Header buffer size
* @rx_buf_size: Buffer size
......@@ -395,6 +525,10 @@ struct idpf_q_vector {
* @desc_ring: Descriptor ring memory
* @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
* @tx_min_pkt_len: Min supported packet length
* @num_completions: Only relevant for TX completion queue. It tracks the
* number of completions received to compare against the
* number of completions pending, as accumulated by the
* TX queues.
* @buf_stack: Stack of empty buffers to store buffer info for out of order
* buffer completions. See struct idpf_buf_lifo.
* @compl_tag_bufid_m: Completion tag buffer id mask
......@@ -450,6 +584,9 @@ struct idpf_queue {
u16 next_to_alloc;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
union idpf_queue_stats q_stats;
struct u64_stats_sync stats_sync;
bool rx_hsplit_en;
u16 rx_hbuf_size;
u16 rx_buf_size;
......@@ -465,6 +602,8 @@ struct idpf_queue {
u16 tx_max_bufs;
u8 tx_min_pkt_len;
u32 num_completions;
struct idpf_buf_lifo buf_stack;
u16 compl_tag_bufid_m;
......@@ -587,6 +726,42 @@ struct idpf_txq_group {
u32 num_completions_pending;
};
/**
* idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
* @size: transmit request size in bytes
*
* In the case where a large frag (>= 16K) needs to be split across multiple
* descriptors, we need to assume that we can have no more than 12K of data
* per descriptor due to hardware alignment restrictions (4K alignment).
*/
static inline u32 idpf_size_to_txd_count(unsigned int size)
{
return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
}
void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size);
void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size);
/**
* idpf_tx_splitq_build_desc - determine which type of data descriptor to build
* @desc: descriptor to populate
* @params: pointer to tx params struct
* @td_cmd: command to be filled in desc
* @size: size of buffer
*/
static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
struct idpf_tx_splitq_params *params,
u16 td_cmd, u16 size)
{
if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
else
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
}
/**
* idpf_alloc_page - Allocate a new RX buffer from the page pool
* @pool: page_pool to allocate from
......@@ -634,6 +809,8 @@ void idpf_deinit_rss(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
struct net_device *netdev);
bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
u16 cleaned_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment