Commit fcd1ecc8 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'cxgb4-ch_ktls-fixes-in-nic-tls-code'

Rohit Maheshwari says:

====================
cxgb4/ch_ktls: Fixes in nic tls code

This series helps in fixing multiple nic ktls issues. Series is broken
into 12 patches.

Patch 1 avoids deciding tls packet based on decrypted bit. If its a
retransmit packet which has tls handshake and finish (for encryption),
decrypted bit won't be set there, and so we can't rely on decrypted
bit.

Patch 2 helps supporting linear skb. SKBs were assumed non-linear.
Corrected the length extraction.

Patch 3 fixes the checksum offload update in WR.

Patch 4 fixes kernel panic happening due to creating new skb for each
record. As part of fix driver will use same skb to send out one tls
record (partial data) of the same SKB.

Patch 5 fixes the problem of skb data length smaller than remaining data
of the record.

Patch 6 fixes the handling of SKBs which has tls header alone pkt, but
not starting from beginning.

Patch 7 avoids sending extra data which is used to make a record 16 byte
aligned. We don't need to retransmit those extra few bytes.

Patch 8 handles the cases where retransmit packet has tls starting
exchanges which are prior to tls start marker.

Patch 9 fixes the problem os skb free before HW knows about tcp FIN.

Patch 10 handles the small packet case which has partial TAG bytes only.
HW can't handle those, hence using sw crypto for such pkts.

Patch 11 corrects the potential tcb update problem.

Patch 12 stops the queue if queue reaches threshold value.

v1->v2:
- Corrected fixes tag issue.
- Marked chcr_ktls_sw_fallback() static.

v2->v3:
- Replaced GFP_KERNEL with GFP_ATOMIC.
- Removed mixed fixes.

v3->v4:
- Corrected fixes tag issue.

v4->v5:
- Separated mixed fixes from patch 4.

v5-v6:
- Fixes tag should be at the end.
====================

Link: https://lore.kernel.org/r/20201109105142.15398-1-rohitm@chelsio.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 36118230 83a95df0
...@@ -2124,6 +2124,9 @@ void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, ...@@ -2124,6 +2124,9 @@ void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end, unsigned int start, struct ulptx_sgl *sgl, u64 *end, unsigned int start,
const dma_addr_t *addr); const dma_addr_t *addr);
void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end,
const dma_addr_t *addr, u32 start, u32 send_len);
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf, int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan); u16 vlan);
......
...@@ -3573,6 +3573,8 @@ static int chcr_stats_show(struct seq_file *seq, void *v) ...@@ -3573,6 +3573,8 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts)); atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n", seq_printf(seq, "TX trim pkts : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts)); atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
seq_printf(seq, "TX sw fallback : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_fallback));
while (i < MAX_NPORTS) { while (i < MAX_NPORTS) {
ktls_port = &adap->ch_ktls_stats.ktls_port[i]; ktls_port = &adap->ch_ktls_stats.ktls_port[i];
seq_printf(seq, "Port %d\n", i); seq_printf(seq, "Port %d\n", i);
......
...@@ -1176,6 +1176,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -1176,6 +1176,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
txq = netdev_pick_tx(dev, skb, sb_dev); txq = netdev_pick_tx(dev, skb, sb_dev);
if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) || if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
skb->encapsulation || skb->encapsulation ||
cxgb4_is_ktls_skb(skb) ||
(proto != IPPROTO_TCP && proto != IPPROTO_UDP)) (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
txq = txq % pi->nqsets; txq = txq % pi->nqsets;
......
...@@ -388,6 +388,7 @@ struct ch_ktls_stats_debug { ...@@ -388,6 +388,7 @@ struct ch_ktls_stats_debug {
atomic64_t ktls_tx_retransmit_pkts; atomic64_t ktls_tx_retransmit_pkts;
atomic64_t ktls_tx_complete_pkts; atomic64_t ktls_tx_complete_pkts;
atomic64_t ktls_tx_trimmed_pkts; atomic64_t ktls_tx_trimmed_pkts;
atomic64_t ktls_tx_fallback;
}; };
#endif #endif
...@@ -493,6 +494,11 @@ struct cxgb4_uld_info { ...@@ -493,6 +494,11 @@ struct cxgb4_uld_info {
#endif #endif
}; };
static inline bool cxgb4_is_ktls_skb(struct sk_buff *skb)
{
return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
}
void cxgb4_uld_enable(struct adapter *adap); void cxgb4_uld_enable(struct adapter *adap);
void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); void cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
int cxgb4_unregister_uld(enum cxgb4_uld type); int cxgb4_unregister_uld(enum cxgb4_uld type);
......
...@@ -890,6 +890,114 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, ...@@ -890,6 +890,114 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
} }
EXPORT_SYMBOL(cxgb4_write_sgl); EXPORT_SYMBOL(cxgb4_write_sgl);
/* cxgb4_write_partial_sgl - populate SGL for partial packet
* @skb: the packet
* @q: the Tx queue we are writing into
* @sgl: starting location for writing the SGL
* @end: points right after the end of the SGL
* @addr: the list of bus addresses for the SGL elements
* @start: start offset in the SKB where partial data starts
* @len: length of data from @start to send out
*
* This API will handle sending out partial data of a skb if required.
* Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
* and @len will decide how much data after @start offset to send out.
*/
void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end,
const dma_addr_t *addr, u32 start, u32 len)
{
struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
u32 frag_size, skb_linear_data_len = skb_headlen(skb);
struct skb_shared_info *si = skb_shinfo(skb);
u8 i = 0, frag_idx = 0, nfrags = 0;
skb_frag_t *frag;
/* Fill the first SGL either from linear data or from partial
* frag based on @start.
*/
if (unlikely(start < skb_linear_data_len)) {
frag_size = min(len, skb_linear_data_len - start);
sgl->len0 = htonl(frag_size);
sgl->addr0 = cpu_to_be64(addr[0] + start);
len -= frag_size;
nfrags++;
} else {
start -= skb_linear_data_len;
frag = &si->frags[frag_idx];
frag_size = skb_frag_size(frag);
/* find the first frag */
while (start >= frag_size) {
start -= frag_size;
frag_idx++;
frag = &si->frags[frag_idx];
frag_size = skb_frag_size(frag);
}
frag_size = min(len, skb_frag_size(frag) - start);
sgl->len0 = cpu_to_be32(frag_size);
sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
len -= frag_size;
nfrags++;
frag_idx++;
}
/* If the entire partial data fit in one SGL, then send it out
* now.
*/
if (!len)
goto done;
/* Most of the complexity below deals with the possibility we hit the
* end of the queue in the middle of writing the SGL. For this case
* only we create the SGL in a temporary buffer and then copy it.
*/
to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
/* If the skb couldn't fit in first SGL completely, fill the
* rest of the frags in subsequent SGLs. Note that each SGL
* pair can store 2 frags.
*/
while (len) {
frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
to->len[i & 1] = cpu_to_be32(frag_size);
to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
if (i && (i & 1))
to++;
nfrags++;
frag_idx++;
i++;
len -= frag_size;
}
/* If we ended in an odd boundary, then set the second SGL's
* length in the pair to 0.
*/
if (i & 1)
to->len[1] = cpu_to_be32(0);
/* Copy from temporary buffer to Tx ring, in case we hit the
* end of the queue in the middle of writing the SGL.
*/
if (unlikely((u8 *)end > (u8 *)q->stat)) {
u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
if (likely(part0))
memcpy(sgl->sge, buf, part0);
part1 = (u8 *)end - (u8 *)q->stat;
memcpy(q->desc, (u8 *)buf + part0, part1);
end = (void *)q->desc + part1;
}
/* 0-pad to multiple of 16 */
if ((uintptr_t)end & 8)
*end = 0;
done:
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
ULPTX_NSGE_V(nfrags));
}
EXPORT_SYMBOL(cxgb4_write_partial_sgl);
/* This function copies 64 byte coalesced work request to /* This function copies 64 byte coalesced work request to
* memory mapped BAR2 space. For coalesced WR SGE fetches * memory mapped BAR2 space. For coalesced WR SGE fetches
* data from the FIFO instead of from Host. * data from the FIFO instead of from Host.
...@@ -1422,7 +1530,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1422,7 +1530,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
#endif /* CHELSIO_IPSEC_INLINE */ #endif /* CHELSIO_IPSEC_INLINE */
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (skb->decrypted) if (cxgb4_is_ktls_skb(skb) &&
(skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */ #endif /* CHELSIO_TLS_DEVICE */
......
...@@ -14,6 +14,50 @@ ...@@ -14,6 +14,50 @@
static LIST_HEAD(uld_ctx_list); static LIST_HEAD(uld_ctx_list);
static DEFINE_MUTEX(dev_mutex); static DEFINE_MUTEX(dev_mutex);
/* chcr_get_nfrags_to_send: get the remaining nfrags after start offset
* @skb: skb
* @start: start offset.
* @len: how much data to send after @start
*/
static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
{
struct skb_shared_info *si = skb_shinfo(skb);
u32 frag_size, skb_linear_data_len = skb_headlen(skb);
u8 nfrags = 0, frag_idx = 0;
skb_frag_t *frag;
/* if its a linear skb then return 1 */
if (!skb_is_nonlinear(skb))
return 1;
if (unlikely(start < skb_linear_data_len)) {
frag_size = min(len, skb_linear_data_len - start);
start = 0;
} else {
start -= skb_linear_data_len;
frag = &si->frags[frag_idx];
frag_size = skb_frag_size(frag);
while (start >= frag_size) {
start -= frag_size;
frag_idx++;
frag = &si->frags[frag_idx];
frag_size = skb_frag_size(frag);
}
frag_size = min(len, skb_frag_size(frag) - start);
}
len -= frag_size;
nfrags++;
while (len) {
frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
len -= frag_size;
nfrags++;
frag_idx++;
}
return nfrags;
}
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info); static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
/* /*
* chcr_ktls_save_keys: calculate and save crypto keys. * chcr_ktls_save_keys: calculate and save crypto keys.
...@@ -689,7 +733,8 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) ...@@ -689,7 +733,8 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
} }
static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
u32 tid, void *pos, u16 word, u64 mask, u32 tid, void *pos, u16 word,
struct sge_eth_txq *q, u64 mask,
u64 val, u32 reply) u64 val, u32 reply)
{ {
struct cpl_set_tcb_field_core *cpl; struct cpl_set_tcb_field_core *cpl;
...@@ -698,7 +743,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, ...@@ -698,7 +743,10 @@ static void *__chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
/* ULP_TXPKT */ /* ULP_TXPKT */
txpkt = pos; txpkt = pos;
txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0)); txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) |
ULP_TXPKT_CHANNELID_V(tx_info->port_id) |
ULP_TXPKT_FID_V(q->q.cntxt_id) |
ULP_TXPKT_RO_F);
txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16)); txpkt->len = htonl(DIV_ROUND_UP(CHCR_SET_TCB_FIELD_LEN, 16));
/* ULPTX_IDATA sub-command */ /* ULPTX_IDATA sub-command */
...@@ -753,7 +801,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, ...@@ -753,7 +801,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
} else { } else {
u8 buf[48] = {0}; u8 buf[48] = {0};
__chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, __chcr_write_cpl_set_tcb_ulp(tx_info, tid, buf, word, q,
mask, val, reply); mask, val, reply);
return chcr_copy_to_txd(buf, &q->q, pos, return chcr_copy_to_txd(buf, &q->q, pos,
...@@ -761,7 +809,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, ...@@ -761,7 +809,7 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
} }
} }
pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, pos = __chcr_write_cpl_set_tcb_ulp(tx_info, tid, pos, word, q,
mask, val, reply); mask, val, reply);
/* check again if we are at the end of the queue */ /* check again if we are at the end of the queue */
...@@ -783,11 +831,11 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info, ...@@ -783,11 +831,11 @@ static void *chcr_write_cpl_set_tcb_ulp(struct chcr_ktls_info *tx_info,
*/ */
static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u64 tcp_seq, struct sge_eth_txq *q, u64 tcp_seq,
u64 tcp_ack, u64 tcp_win) u64 tcp_ack, u64 tcp_win, bool offset)
{ {
bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0)); bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
struct ch_ktls_port_stats_debug *port_stats; struct ch_ktls_port_stats_debug *port_stats;
u32 len, cpl = 0, ndesc, wr_len; u32 len, cpl = 0, ndesc, wr_len, wr_mid = 0;
struct fw_ulptx_wr *wr; struct fw_ulptx_wr *wr;
int credits; int credits;
void *pos; void *pos;
...@@ -803,6 +851,11 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, ...@@ -803,6 +851,11 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
chcr_eth_txq_stop(q);
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
pos = &q->q.desc[q->q.pidx]; pos = &q->q.desc[q->q.pidx];
/* make space for WR, we'll fill it later when we know all the cpls /* make space for WR, we'll fill it later when we know all the cpls
* being sent out and have complete length. * being sent out and have complete length.
...@@ -818,7 +871,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, ...@@ -818,7 +871,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
cpl++; cpl++;
} }
/* reset snd una if it's a re-transmit pkt */ /* reset snd una if it's a re-transmit pkt */
if (tcp_seq != tx_info->prev_seq) { if (tcp_seq != tx_info->prev_seq || offset) {
/* reset snd_una */ /* reset snd_una */
port_stats = port_stats =
&tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
...@@ -827,7 +880,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, ...@@ -827,7 +880,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
TCB_SND_UNA_RAW_V TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M), (TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0); TCB_SND_UNA_RAW_V(0), 0);
atomic64_inc(&port_stats->ktls_tx_ooo); if (tcp_seq != tx_info->prev_seq)
atomic64_inc(&port_stats->ktls_tx_ooo);
cpl++; cpl++;
} }
/* update ack */ /* update ack */
...@@ -856,7 +910,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, ...@@ -856,7 +910,8 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); wr->op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
wr->cookie = 0; wr->cookie = 0;
/* fill len in wr field */ /* fill len in wr field */
wr->flowid_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(len, 16))); wr->flowid_len16 = htonl(wr_mid |
FW_WR_LEN16_V(DIV_ROUND_UP(len, 16)));
ndesc = DIV_ROUND_UP(len, 64); ndesc = DIV_ROUND_UP(len, 64);
chcr_txq_advance(&q->q, ndesc); chcr_txq_advance(&q->q, ndesc);
...@@ -865,35 +920,15 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, ...@@ -865,35 +920,15 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
return 0; return 0;
} }
/*
* chcr_ktls_skb_copy
* @nskb - new skb where the frags to be added.
* @skb - old skb from which frags will be copied.
*/
static void chcr_ktls_skb_copy(struct sk_buff *skb, struct sk_buff *nskb)
{
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(nskb)->frags[i] = skb_shinfo(skb)->frags[i];
__skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
}
skb_shinfo(nskb)->nr_frags = skb_shinfo(skb)->nr_frags;
nskb->len += skb->data_len;
nskb->data_len = skb->data_len;
nskb->truesize += skb->data_len;
}
/* /*
* chcr_ktls_get_tx_flits * chcr_ktls_get_tx_flits
* returns number of flits to be sent out, it includes key context length, WR * returns number of flits to be sent out, it includes key context length, WR
* size and skb fragments. * size and skb fragments.
*/ */
static unsigned int static unsigned int
chcr_ktls_get_tx_flits(const struct sk_buff *skb, unsigned int key_ctx_len) chcr_ktls_get_tx_flits(u32 nr_frags, unsigned int key_ctx_len)
{ {
return chcr_sgl_len(skb_shinfo(skb)->nr_frags) + return chcr_sgl_len(nr_frags) +
DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8); DIV_ROUND_UP(key_ctx_len + CHCR_KTLS_WR_SIZE, 8);
} }
...@@ -957,8 +992,10 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, ...@@ -957,8 +992,10 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
struct tcphdr *tcp; struct tcphdr *tcp;
int len16, pktlen; int len16, pktlen;
struct iphdr *ip; struct iphdr *ip;
u32 wr_mid = 0;
int credits; int credits;
u8 buf[150]; u8 buf[150];
u64 cntrl1;
void *pos; void *pos;
iplen = skb_network_header_len(skb); iplen = skb_network_header_len(skb);
...@@ -967,7 +1004,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, ...@@ -967,7 +1004,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
/* packet length = eth hdr len + ip hdr len + tcp hdr len /* packet length = eth hdr len + ip hdr len + tcp hdr len
* (including options). * (including options).
*/ */
pktlen = skb->len - skb->data_len; pktlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
ctrl = sizeof(*cpl) + pktlen; ctrl = sizeof(*cpl) + pktlen;
len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16); len16 = DIV_ROUND_UP(sizeof(*wr) + ctrl, 16);
...@@ -980,6 +1017,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, ...@@ -980,6 +1017,11 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
chcr_eth_txq_stop(q);
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
pos = &q->q.desc[q->q.pidx]; pos = &q->q.desc[q->q.pidx];
wr = pos; wr = pos;
...@@ -987,7 +1029,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, ...@@ -987,7 +1029,7 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(ctrl)); FW_WR_IMMDLEN_V(ctrl));
wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(len16)); wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
wr->r3 = 0; wr->r3 = 0;
cpl = (void *)(wr + 1); cpl = (void *)(wr + 1);
...@@ -997,22 +1039,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, ...@@ -997,22 +1039,28 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
TXPKT_PF_V(tx_info->adap->pf)); TXPKT_PF_V(tx_info->adap->pf));
cpl->pack = 0; cpl->pack = 0;
cpl->len = htons(pktlen); cpl->len = htons(pktlen);
/* checksum offload */
cpl->ctrl1 = 0;
pos = cpl + 1;
memcpy(buf, skb->data, pktlen); memcpy(buf, skb->data, pktlen);
if (tx_info->ip_family == AF_INET) { if (tx_info->ip_family == AF_INET) {
/* we need to correct ip header len */ /* we need to correct ip header len */
ip = (struct iphdr *)(buf + maclen); ip = (struct iphdr *)(buf + maclen);
ip->tot_len = htons(pktlen - maclen); ip->tot_len = htons(pktlen - maclen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
} else { } else {
ip6 = (struct ipv6hdr *)(buf + maclen); ip6 = (struct ipv6hdr *)(buf + maclen);
ip6->payload_len = htons(pktlen - maclen - iplen); ip6->payload_len = htons(pktlen - maclen - iplen);
cntrl1 = TXPKT_CSUM_TYPE_V(TX_CSUM_TCPIP6);
#endif #endif
} }
cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
TXPKT_IPHDR_LEN_V(iplen);
/* checksum offload */
cpl->ctrl1 = cpu_to_be64(cntrl1);
pos = cpl + 1;
/* now take care of the tcp header, if fin is not set then clear push /* now take care of the tcp header, if fin is not set then clear push
* bit as well, and if fin is set, it will be sent at the last so we * bit as well, and if fin is set, it will be sent at the last so we
* need to update the tcp sequence number as per the last packet. * need to update the tcp sequence number as per the last packet.
...@@ -1031,71 +1079,6 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb, ...@@ -1031,71 +1079,6 @@ chcr_ktls_write_tcp_options(struct chcr_ktls_info *tx_info, struct sk_buff *skb,
return 0; return 0;
} }
/* chcr_ktls_skb_shift - Shifts request length paged data from skb to another.
* @tgt- buffer into which tail data gets added
* @skb- buffer from which the paged data comes from
* @shiftlen- shift up to this many bytes
*/
static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen)
{
skb_frag_t *fragfrom, *fragto;
int from, to, todo;
WARN_ON(shiftlen > skb->data_len);
todo = shiftlen;
from = 0;
to = 0;
fragfrom = &skb_shinfo(skb)->frags[from];
while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
fragfrom = &skb_shinfo(skb)->frags[from];
fragto = &skb_shinfo(tgt)->frags[to];
if (todo >= skb_frag_size(fragfrom)) {
*fragto = *fragfrom;
todo -= skb_frag_size(fragfrom);
from++;
to++;
} else {
__skb_frag_ref(fragfrom);
skb_frag_page_copy(fragto, fragfrom);
skb_frag_off_copy(fragto, fragfrom);
skb_frag_size_set(fragto, todo);
skb_frag_off_add(fragfrom, todo);
skb_frag_size_sub(fragfrom, todo);
todo = 0;
to++;
break;
}
}
/* Ready to "commit" this state change to tgt */
skb_shinfo(tgt)->nr_frags = to;
/* Reposition in the original skb */
to = 0;
while (from < skb_shinfo(skb)->nr_frags)
skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
skb_shinfo(skb)->nr_frags = to;
WARN_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
skb->len -= shiftlen;
skb->data_len -= shiftlen;
skb->truesize -= shiftlen;
tgt->len += shiftlen;
tgt->data_len += shiftlen;
tgt->truesize += shiftlen;
return shiftlen;
}
/* /*
* chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb * chcr_ktls_xmit_wr_complete: This sends out the complete record. If an skb
* received has partial end part of the record, send out the complete record, so * received has partial end part of the record, send out the complete record, so
...@@ -1111,6 +1094,8 @@ static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb, ...@@ -1111,6 +1094,8 @@ static int chcr_ktls_skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb, static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
struct chcr_ktls_info *tx_info, struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q, u32 tcp_seq, struct sge_eth_txq *q, u32 tcp_seq,
bool is_last_wr, u32 data_len,
u32 skb_offset, u32 nfrags,
bool tcp_push, u32 mss) bool tcp_push, u32 mss)
{ {
u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start; u32 len16, wr_mid = 0, flits = 0, ndesc, cipher_start;
...@@ -1126,7 +1111,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb, ...@@ -1126,7 +1111,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
u64 *end; u64 *end;
/* get the number of flits required */ /* get the number of flits required */
flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len); flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len);
/* number of descriptors */ /* number of descriptors */
ndesc = chcr_flits_to_desc(flits); ndesc = chcr_flits_to_desc(flits);
/* check if enough credits available */ /* check if enough credits available */
...@@ -1155,6 +1140,9 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb, ...@@ -1155,6 +1140,9 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
if (!is_last_wr)
skb_get(skb);
pos = &q->q.desc[q->q.pidx]; pos = &q->q.desc[q->q.pidx];
end = (u64 *)pos + flits; end = (u64 *)pos + flits;
/* FW_ULPTX_WR */ /* FW_ULPTX_WR */
...@@ -1187,7 +1175,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb, ...@@ -1187,7 +1175,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) | CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_PLACEHOLDER_V(1) | CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1)); CPL_TX_SEC_PDU_IVINSRTOFST_V(TLS_HEADER_SIZE + 1));
cpl->pldlen = htonl(skb->data_len); cpl->pldlen = htonl(data_len);
/* encryption should start after tls header size + iv size */ /* encryption should start after tls header size + iv size */
cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1; cipher_start = TLS_HEADER_SIZE + tx_info->iv_size + 1;
...@@ -1229,7 +1217,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb, ...@@ -1229,7 +1217,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
/* CPL_TX_DATA */ /* CPL_TX_DATA */
tx_data = (void *)pos; tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid)); OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(skb->data_len)); tx_data->len = htonl(TX_DATA_MSS_V(mss) | TX_LENGTH_V(data_len));
tx_data->rsvd = htonl(tcp_seq); tx_data->rsvd = htonl(tcp_seq);
...@@ -1249,8 +1237,8 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb, ...@@ -1249,8 +1237,8 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
} }
/* send the complete packet except the header */ /* send the complete packet except the header */
cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len, cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
sgl_sdesc->addr); skb_offset, data_len);
sgl_sdesc->skb = skb; sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc); chcr_txq_advance(&q->q, ndesc);
...@@ -1282,10 +1270,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb, ...@@ -1282,10 +1270,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
struct sge_eth_txq *q, struct sge_eth_txq *q,
u32 tcp_seq, bool tcp_push, u32 mss, u32 tcp_seq, bool tcp_push, u32 mss,
u32 tls_rec_offset, u8 *prior_data, u32 tls_rec_offset, u8 *prior_data,
u32 prior_data_len) u32 prior_data_len, u32 data_len,
u32 skb_offset)
{ {
u32 len16, wr_mid = 0, cipher_start, nfrags;
struct adapter *adap = tx_info->adap; struct adapter *adap = tx_info->adap;
u32 len16, wr_mid = 0, cipher_start;
unsigned int flits = 0, ndesc; unsigned int flits = 0, ndesc;
int credits, left, last_desc; int credits, left, last_desc;
struct tx_sw_desc *sgl_sdesc; struct tx_sw_desc *sgl_sdesc;
...@@ -1298,10 +1287,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb, ...@@ -1298,10 +1287,11 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
void *pos; void *pos;
u64 *end; u64 *end;
nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
/* get the number of flits required, it's a partial record so 2 flits /* get the number of flits required, it's a partial record so 2 flits
* (AES_BLOCK_SIZE) will be added. * (AES_BLOCK_SIZE) will be added.
*/ */
flits = chcr_ktls_get_tx_flits(skb, tx_info->key_ctx_len) + 2; flits = chcr_ktls_get_tx_flits(nfrags, tx_info->key_ctx_len) + 2;
/* get the correct 8 byte IV of this record */ /* get the correct 8 byte IV of this record */
iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no); iv_record = cpu_to_be64(tx_info->iv + tx_info->record_no);
/* If it's a middle record and not 16 byte aligned to run AES CTR, need /* If it's a middle record and not 16 byte aligned to run AES CTR, need
...@@ -1373,7 +1363,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb, ...@@ -1373,7 +1363,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | htonl(CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) | CPL_TX_SEC_PDU_CPLLEN_V(CHCR_CPL_TX_SEC_PDU_LEN_64BIT) |
CPL_TX_SEC_PDU_IVINSRTOFST_V(1)); CPL_TX_SEC_PDU_IVINSRTOFST_V(1));
cpl->pldlen = htonl(skb->data_len + AES_BLOCK_LEN + prior_data_len); cpl->pldlen = htonl(data_len + AES_BLOCK_LEN + prior_data_len);
cpl->aadstart_cipherstop_hi = cpl->aadstart_cipherstop_hi =
htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start)); htonl(CPL_TX_SEC_PDU_CIPHERSTART_V(cipher_start));
cpl->cipherstop_lo_authinsert = 0; cpl->cipherstop_lo_authinsert = 0;
...@@ -1404,7 +1394,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb, ...@@ -1404,7 +1394,7 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
tx_data = (void *)pos; tx_data = (void *)pos;
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid)); OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) | tx_data->len = htonl(TX_DATA_MSS_V(mss) |
TX_LENGTH_V(skb->data_len + prior_data_len)); TX_LENGTH_V(data_len + prior_data_len));
tx_data->rsvd = htonl(tcp_seq); tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F); tx_data->flags = htonl(TX_BYPASS_F);
if (tcp_push) if (tcp_push)
...@@ -1437,8 +1427,8 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb, ...@@ -1437,8 +1427,8 @@ static int chcr_ktls_xmit_wr_short(struct sk_buff *skb,
if (prior_data_len) if (prior_data_len)
pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16); pos = chcr_copy_to_txd(prior_data, &q->q, pos, 16);
/* send the complete packet except the header */ /* send the complete packet except the header */
cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len, cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
sgl_sdesc->addr); skb_offset, data_len);
sgl_sdesc->skb = skb; sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc); chcr_txq_advance(&q->q, ndesc);
...@@ -1466,6 +1456,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info, ...@@ -1466,6 +1456,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct sk_buff *skb, u32 tcp_seq, u32 mss, struct sk_buff *skb, u32 tcp_seq, u32 mss,
bool tcp_push, struct sge_eth_txq *q, bool tcp_push, struct sge_eth_txq *q,
u32 port_id, u8 *prior_data, u32 port_id, u8 *prior_data,
u32 data_len, u32 skb_offset,
u32 prior_data_len) u32 prior_data_len)
{ {
int credits, left, len16, last_desc; int credits, left, len16, last_desc;
...@@ -1475,14 +1466,16 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info, ...@@ -1475,14 +1466,16 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
struct ulptx_idata *idata; struct ulptx_idata *idata;
struct ulp_txpkt *ulptx; struct ulp_txpkt *ulptx;
struct fw_ulptx_wr *wr; struct fw_ulptx_wr *wr;
u32 wr_mid = 0; u32 wr_mid = 0, nfrags;
void *pos; void *pos;
u64 *end; u64 *end;
flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8); flits = DIV_ROUND_UP(CHCR_PLAIN_TX_DATA_LEN, 8);
flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags); nfrags = chcr_get_nfrags_to_send(skb, skb_offset, data_len);
flits += chcr_sgl_len(nfrags);
if (prior_data_len) if (prior_data_len)
flits += 2; flits += 2;
/* WR will need len16 */ /* WR will need len16 */
len16 = DIV_ROUND_UP(flits, 2); len16 = DIV_ROUND_UP(flits, 2);
/* check how many descriptors needed */ /* check how many descriptors needed */
...@@ -1535,7 +1528,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info, ...@@ -1535,7 +1528,7 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
tx_data = (struct cpl_tx_data *)(idata + 1); tx_data = (struct cpl_tx_data *)(idata + 1);
OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid)); OPCODE_TID(tx_data) = htonl(MK_OPCODE_TID(CPL_TX_DATA, tx_info->tid));
tx_data->len = htonl(TX_DATA_MSS_V(mss) | tx_data->len = htonl(TX_DATA_MSS_V(mss) |
TX_LENGTH_V(skb->data_len + prior_data_len)); TX_LENGTH_V(data_len + prior_data_len));
/* set tcp seq number */ /* set tcp seq number */
tx_data->rsvd = htonl(tcp_seq); tx_data->rsvd = htonl(tcp_seq);
tx_data->flags = htonl(TX_BYPASS_F); tx_data->flags = htonl(TX_BYPASS_F);
...@@ -1559,8 +1552,8 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info, ...@@ -1559,8 +1552,8 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
end = pos + left; end = pos + left;
} }
/* send the complete packet including the header */ /* send the complete packet including the header */
cxgb4_write_sgl(skb, &q->q, pos, end, skb->len - skb->data_len, cxgb4_write_partial_sgl(skb, &q->q, pos, end, sgl_sdesc->addr,
sgl_sdesc->addr); skb_offset, data_len);
sgl_sdesc->skb = skb; sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc); chcr_txq_advance(&q->q, ndesc);
...@@ -1568,12 +1561,96 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info, ...@@ -1568,12 +1561,96 @@ static int chcr_ktls_tx_plaintxt(struct chcr_ktls_info *tx_info,
return 0; return 0;
} }
static int chcr_ktls_tunnel_pkt(struct chcr_ktls_info *tx_info,
struct sk_buff *skb,
struct sge_eth_txq *q)
{
u32 ctrl, iplen, maclen, wr_mid = 0, len16;
struct tx_sw_desc *sgl_sdesc;
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
unsigned int flits, ndesc;
int credits, last_desc;
u64 cntrl1, *end;
void *pos;
ctrl = sizeof(*cpl);
flits = DIV_ROUND_UP(sizeof(*wr) + ctrl, 8);
flits += chcr_sgl_len(skb_shinfo(skb)->nr_frags + 1);
len16 = DIV_ROUND_UP(flits, 2);
/* check how many descriptors needed */
ndesc = DIV_ROUND_UP(flits, 8);
credits = chcr_txq_avail(&q->q) - ndesc;
if (unlikely(credits < 0)) {
chcr_eth_txq_stop(q);
return -ENOMEM;
}
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
chcr_eth_txq_stop(q);
wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
last_desc = q->q.pidx + ndesc - 1;
if (last_desc >= q->q.size)
last_desc -= q->q.size;
sgl_sdesc = &q->q.sdesc[last_desc];
if (unlikely(cxgb4_map_skb(tx_info->adap->pdev_dev, skb,
sgl_sdesc->addr) < 0)) {
memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
return -ENOMEM;
}
iplen = skb_network_header_len(skb);
maclen = skb_mac_header_len(skb);
pos = &q->q.desc[q->q.pidx];
end = (u64 *)pos + flits;
wr = pos;
/* Firmware work request header */
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(ctrl));
wr->equiq_to_len16 = htonl(wr_mid | FW_WR_LEN16_V(len16));
wr->r3 = 0;
cpl = (void *)(wr + 1);
/* CPL header */
cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT) |
TXPKT_INTF_V(tx_info->tx_chan) |
TXPKT_PF_V(tx_info->adap->pf));
cpl->pack = 0;
cntrl1 = TXPKT_CSUM_TYPE_V(tx_info->ip_family == AF_INET ?
TX_CSUM_TCPIP : TX_CSUM_TCPIP6);
cntrl1 |= T6_TXPKT_ETHHDR_LEN_V(maclen - ETH_HLEN) |
TXPKT_IPHDR_LEN_V(iplen);
/* checksum offload */
cpl->ctrl1 = cpu_to_be64(cntrl1);
cpl->len = htons(skb->len);
pos = cpl + 1;
cxgb4_write_sgl(skb, &q->q, pos, end, 0, sgl_sdesc->addr);
sgl_sdesc->skb = skb;
chcr_txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(tx_info->adap, &q->q, ndesc);
return 0;
}
/* /*
* chcr_ktls_copy_record_in_skb * chcr_ktls_copy_record_in_skb
* @nskb - new skb where the frags to be added. * @nskb - new skb where the frags to be added.
* @skb - old skb, to copy socket and destructor details.
* @record - specific record which has complete 16k record in frags. * @record - specific record which has complete 16k record in frags.
*/ */
static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb, static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
struct sk_buff *skb,
struct tls_record_info *record) struct tls_record_info *record)
{ {
int i = 0; int i = 0;
...@@ -1588,6 +1665,9 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb, ...@@ -1588,6 +1665,9 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
nskb->data_len = record->len; nskb->data_len = record->len;
nskb->len += record->len; nskb->len += record->len;
nskb->truesize += record->len; nskb->truesize += record->len;
nskb->sk = skb->sk;
nskb->destructor = skb->destructor;
refcount_add(nskb->truesize, &nskb->sk->sk_wmem_alloc);
} }
/* /*
...@@ -1659,7 +1739,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, ...@@ -1659,7 +1739,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb, struct sk_buff *skb,
struct tls_record_info *record, struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin, u32 tcp_seq, int mss, bool tcp_push_no_fin,
struct sge_eth_txq *q, struct sge_eth_txq *q, u32 skb_offset,
u32 tls_end_offset, bool last_wr) u32 tls_end_offset, bool last_wr)
{ {
struct sk_buff *nskb = NULL; struct sk_buff *nskb = NULL;
...@@ -1668,30 +1748,37 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info, ...@@ -1668,30 +1748,37 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
nskb = skb; nskb = skb;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts); atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
} else { } else {
dev_kfree_skb_any(skb); nskb = alloc_skb(0, GFP_ATOMIC);
if (!nskb) {
nskb = alloc_skb(0, GFP_KERNEL); dev_kfree_skb_any(skb);
if (!nskb)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
}
/* copy complete record in skb */ /* copy complete record in skb */
chcr_ktls_copy_record_in_skb(nskb, record); chcr_ktls_copy_record_in_skb(nskb, skb, record);
/* packet is being sent from the beginning, update the tcp_seq /* packet is being sent from the beginning, update the tcp_seq
* accordingly. * accordingly.
*/ */
tcp_seq = tls_record_start_seq(record); tcp_seq = tls_record_start_seq(record);
/* reset snd una, so the middle record won't send the already /* reset skb offset */
* sent part. skb_offset = 0;
*/
if (chcr_ktls_update_snd_una(tx_info, q)) if (last_wr)
goto out; dev_kfree_skb_any(skb);
last_wr = true;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts); atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
} }
if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq, if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
last_wr, record->len, skb_offset,
record->num_frags,
(last_wr && tcp_push_no_fin), (last_wr && tcp_push_no_fin),
mss)) { mss)) {
goto out; goto out;
} }
tx_info->prev_seq = record->end_seq;
return 0; return 0;
out: out:
dev_kfree_skb_any(nskb); dev_kfree_skb_any(nskb);
...@@ -1723,41 +1810,47 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info, ...@@ -1723,41 +1810,47 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
struct sk_buff *skb, struct sk_buff *skb,
struct tls_record_info *record, struct tls_record_info *record,
u32 tcp_seq, int mss, bool tcp_push_no_fin, u32 tcp_seq, int mss, bool tcp_push_no_fin,
u32 data_len, u32 skb_offset,
struct sge_eth_txq *q, u32 tls_end_offset) struct sge_eth_txq *q, u32 tls_end_offset)
{ {
u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record); u32 tls_rec_offset = tcp_seq - tls_record_start_seq(record);
u8 prior_data[16] = {0}; u8 prior_data[16] = {0};
u32 prior_data_len = 0; u32 prior_data_len = 0;
u32 data_len;
/* check if the skb is ending in middle of tag/HASH, its a big /* check if the skb is ending in middle of tag/HASH, its a big
* trouble, send the packet before the HASH. * trouble, send the packet before the HASH.
*/ */
int remaining_record = tls_end_offset - skb->data_len; int remaining_record = tls_end_offset - data_len;
if (remaining_record > 0 && if (remaining_record > 0 &&
remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) { remaining_record < TLS_CIPHER_AES_GCM_128_TAG_SIZE) {
int trimmed_len = skb->data_len - int trimmed_len = 0;
(TLS_CIPHER_AES_GCM_128_TAG_SIZE - remaining_record);
struct sk_buff *tmp_skb = NULL;
/* don't process the pkt if it is only a partial tag */
if (skb->data_len < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
goto out;
WARN_ON(trimmed_len > skb->data_len); if (tls_end_offset > TLS_CIPHER_AES_GCM_128_TAG_SIZE)
trimmed_len = data_len -
(TLS_CIPHER_AES_GCM_128_TAG_SIZE -
remaining_record);
if (!trimmed_len)
return FALLBACK;
/* shift to those many bytes */ WARN_ON(trimmed_len > data_len);
tmp_skb = alloc_skb(0, GFP_KERNEL);
if (unlikely(!tmp_skb))
goto out;
chcr_ktls_skb_shift(tmp_skb, skb, trimmed_len); data_len = trimmed_len;
/* free the last trimmed portion */
dev_kfree_skb_any(skb);
skb = tmp_skb;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts); atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
} }
data_len = skb->data_len;
/* check if it is only the header part. */
if (tls_rec_offset + data_len <= (TLS_HEADER_SIZE + tx_info->iv_size)) {
if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
tcp_push_no_fin, q,
tx_info->port_id, prior_data,
data_len, skb_offset, prior_data_len))
goto out;
tx_info->prev_seq = tcp_seq + data_len;
return 0;
}
/* check if the middle record's start point is 16 byte aligned. CTR /* check if the middle record's start point is 16 byte aligned. CTR
* needs 16 byte aligned start point to start encryption. * needs 16 byte aligned start point to start encryption.
*/ */
...@@ -1818,9 +1911,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info, ...@@ -1818,9 +1911,6 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
} }
/* reset tcp_seq as per the prior_data_required len */ /* reset tcp_seq as per the prior_data_required len */
tcp_seq -= prior_data_len; tcp_seq -= prior_data_len;
/* include prio_data_len for further calculation.
*/
data_len += prior_data_len;
} }
/* reset snd una, so the middle record won't send the already /* reset snd una, so the middle record won't send the already
* sent part. * sent part.
...@@ -1829,37 +1919,54 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info, ...@@ -1829,37 +1919,54 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
goto out; goto out;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts); atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else { } else {
/* Else means, its a partial first part of the record. Check if
* its only the header, don't need to send for encryption then.
*/
if (data_len <= TLS_HEADER_SIZE + tx_info->iv_size) {
if (chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
tcp_push_no_fin, q,
tx_info->port_id,
prior_data,
prior_data_len)) {
goto out;
}
return 0;
}
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts); atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
} }
if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin, if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
mss, tls_rec_offset, prior_data, mss, tls_rec_offset, prior_data,
prior_data_len)) { prior_data_len, data_len, skb_offset)) {
goto out; goto out;
} }
tx_info->prev_seq = tcp_seq + data_len + prior_data_len;
return 0; return 0;
out: out:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
static int chcr_ktls_sw_fallback(struct sk_buff *skb,
struct chcr_ktls_info *tx_info,
struct sge_eth_txq *q)
{
u32 data_len, skb_offset;
struct sk_buff *nskb;
struct tcphdr *th;
nskb = tls_encrypt_skb(skb);
if (!nskb)
return 0;
th = tcp_hdr(nskb);
skb_offset = skb_transport_offset(nskb) + tcp_hdrlen(nskb);
data_len = nskb->len - skb_offset;
skb_tx_timestamp(nskb);
if (chcr_ktls_tunnel_pkt(tx_info, nskb, q))
goto out;
tx_info->prev_seq = ntohl(th->seq) + data_len;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_fallback);
return 0;
out:
dev_kfree_skb_any(nskb);
return 0;
}
/* nic tls TX handler */ /* nic tls TX handler */
static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
struct ch_ktls_port_stats_debug *port_stats; struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct ch_ktls_stats_debug *stats; struct ch_ktls_stats_debug *stats;
...@@ -1867,20 +1974,17 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1867,20 +1974,17 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
int data_len, qidx, ret = 0, mss; int data_len, qidx, ret = 0, mss;
struct tls_record_info *record; struct tls_record_info *record;
struct chcr_ktls_info *tx_info; struct chcr_ktls_info *tx_info;
u32 tls_end_offset, tcp_seq;
struct tls_context *tls_ctx; struct tls_context *tls_ctx;
struct sk_buff *local_skb;
struct sge_eth_txq *q; struct sge_eth_txq *q;
struct adapter *adap; struct adapter *adap;
unsigned long flags; unsigned long flags;
tcp_seq = ntohl(th->seq); tcp_seq = ntohl(th->seq);
skb_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
skb_data_len = skb->len - skb_offset;
data_len = skb_data_len;
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : skb->data_len; mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
/* check if we haven't set it for ktls offload */
if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
goto out;
tls_ctx = tls_get_ctx(skb->sk); tls_ctx = tls_get_ctx(skb->sk);
if (unlikely(tls_ctx->netdev != dev)) if (unlikely(tls_ctx->netdev != dev))
...@@ -1892,14 +1996,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1892,14 +1996,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!tx_info)) if (unlikely(!tx_info))
goto out; goto out;
/* don't touch the original skb, make a new skb to extract each records
* and send them separately.
*/
local_skb = alloc_skb(0, GFP_KERNEL);
if (unlikely(!local_skb))
return NETDEV_TX_BUSY;
adap = tx_info->adap; adap = tx_info->adap;
stats = &adap->ch_ktls_stats; stats = &adap->ch_ktls_stats;
port_stats = &stats->ktls_port[tx_info->port_id]; port_stats = &stats->ktls_port[tx_info->port_id];
...@@ -1914,20 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1914,20 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (ret) if (ret)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
/* update tcb */
ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, ntohl(th->seq),
ntohl(th->ack_seq),
ntohs(th->window));
if (ret) {
dev_kfree_skb_any(local_skb);
return NETDEV_TX_BUSY;
}
/* copy skb contents into local skb */
chcr_ktls_skb_copy(skb, local_skb);
/* go through the skb and send only one record at a time. */
data_len = skb->data_len;
/* TCP segments can be in received either complete or partial. /* TCP segments can be in received either complete or partial.
* chcr_end_part_handler will handle cases if complete record or end * chcr_end_part_handler will handle cases if complete record or end
* part of the record is received. Incase of partial end part of record, * part of the record is received. Incase of partial end part of record,
...@@ -1952,10 +2035,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1952,10 +2035,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
goto out; goto out;
} }
tls_end_offset = record->end_seq - tcp_seq;
pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
/* update tcb for the skb */
if (skb_data_len == data_len) {
u32 tx_max = tcp_seq;
if (!tls_record_is_start_marker(record) &&
tls_end_offset < TLS_CIPHER_AES_GCM_128_TAG_SIZE)
tx_max = record->end_seq -
TLS_CIPHER_AES_GCM_128_TAG_SIZE;
ret = chcr_ktls_xmit_tcb_cpls(tx_info, q, tx_max,
ntohl(th->ack_seq),
ntohs(th->window),
tls_end_offset !=
record->len);
if (ret) {
spin_unlock_irqrestore(&tx_ctx->base.lock,
flags);
goto out;
}
if (th->fin)
skb_get(skb);
}
if (unlikely(tls_record_is_start_marker(record))) { if (unlikely(tls_record_is_start_marker(record))) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data); atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
goto out; /* If tls_end_offset < data_len, means there is some
* data after start marker, which needs encryption, send
* plaintext first and take skb refcount. else send out
* complete pkt as plaintext.
*/
if (tls_end_offset < data_len)
skb_get(skb);
else
tls_end_offset = data_len;
ret = chcr_ktls_tx_plaintxt(tx_info, skb, tcp_seq, mss,
(!th->fin && th->psh), q,
tx_info->port_id, NULL,
tls_end_offset, skb_offset,
0);
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
if (ret) {
/* free the refcount taken earlier */
if (tls_end_offset < data_len)
dev_kfree_skb_any(skb);
goto out;
}
data_len -= tls_end_offset;
tcp_seq = record->end_seq;
skb_offset += tls_end_offset;
continue;
} }
/* increase page reference count of the record, so that there /* increase page reference count of the record, so that there
...@@ -1967,73 +2104,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1967,73 +2104,64 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* lock cleared */ /* lock cleared */
spin_unlock_irqrestore(&tx_ctx->base.lock, flags); spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
tls_end_offset = record->end_seq - tcp_seq;
pr_debug("seq 0x%x, end_seq 0x%x prev_seq 0x%x, datalen 0x%x\n",
tcp_seq, record->end_seq, tx_info->prev_seq, data_len);
/* if a tls record is finishing in this SKB */ /* if a tls record is finishing in this SKB */
if (tls_end_offset <= data_len) { if (tls_end_offset <= data_len) {
struct sk_buff *nskb = NULL; ret = chcr_end_part_handler(tx_info, skb, record,
if (tls_end_offset < data_len) {
nskb = alloc_skb(0, GFP_KERNEL);
if (unlikely(!nskb)) {
ret = -ENOMEM;
goto clear_ref;
}
chcr_ktls_skb_shift(nskb, local_skb,
tls_end_offset);
} else {
/* its the only record in this skb, directly
* point it.
*/
nskb = local_skb;
}
ret = chcr_end_part_handler(tx_info, nskb, record,
tcp_seq, mss, tcp_seq, mss,
(!th->fin && th->psh), q, (!th->fin && th->psh), q,
skb_offset,
tls_end_offset, tls_end_offset,
(nskb == local_skb)); skb_offset +
tls_end_offset == skb->len);
if (ret && nskb != local_skb)
dev_kfree_skb_any(local_skb);
data_len -= tls_end_offset; data_len -= tls_end_offset;
/* tcp_seq increment is required to handle next record. /* tcp_seq increment is required to handle next record.
*/ */
tcp_seq += tls_end_offset; tcp_seq += tls_end_offset;
skb_offset += tls_end_offset;
} else { } else {
ret = chcr_short_record_handler(tx_info, local_skb, ret = chcr_short_record_handler(tx_info, skb,
record, tcp_seq, mss, record, tcp_seq, mss,
(!th->fin && th->psh), (!th->fin && th->psh),
data_len, skb_offset,
q, tls_end_offset); q, tls_end_offset);
data_len = 0; data_len = 0;
} }
clear_ref:
/* clear the frag ref count which increased locally before */ /* clear the frag ref count which increased locally before */
for (i = 0; i < record->num_frags; i++) { for (i = 0; i < record->num_frags; i++) {
/* clear the frag ref count */ /* clear the frag ref count */
__skb_frag_unref(&record->frags[i]); __skb_frag_unref(&record->frags[i]);
} }
/* if any failure, come out from the loop. */ /* if any failure, come out from the loop. */
if (ret) if (ret) {
goto out; if (th->fin)
dev_kfree_skb_any(skb);
if (ret == FALLBACK)
return chcr_ktls_sw_fallback(skb, tx_info, q);
return NETDEV_TX_OK;
}
/* length should never be less than 0 */ /* length should never be less than 0 */
WARN_ON(data_len < 0); WARN_ON(data_len < 0);
} while (data_len > 0); } while (data_len > 0);
tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
atomic64_inc(&port_stats->ktls_tx_encrypted_packets); atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes); atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
/* tcp finish is set, send a separate tcp msg including all the options /* tcp finish is set, send a separate tcp msg including all the options
* as well. * as well.
*/ */
if (th->fin) if (th->fin) {
chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan); chcr_ktls_write_tcp_options(tx_info, skb, q, tx_info->tx_chan);
dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
out: out:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\ #define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
sizeof(struct cpl_tx_sec_pdu)) sizeof(struct cpl_tx_sec_pdu))
#define FALLBACK 35
enum ch_ktls_open_state { enum ch_ktls_open_state {
CH_KTLS_OPEN_SUCCESS = 0, CH_KTLS_OPEN_SUCCESS = 0,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment