Commit 2355a677 authored by Ayush Sawal's avatar Ayush Sawal Committed by David S. Miller

cxgb4/chtls/cxgbit: Keeping the max ofld immediate data size same in cxgb4 and ulds

The Max imm data size in cxgb4 is not similar to the max imm data size
in the chtls. This caused an mismatch in output of is_ofld_imm() of
cxgb4 and chtls. So fixed this by keeping the max wreq size of imm data
same in both chtls and cxgb4 as MAX_IMM_OFLD_TX_DATA_WR_LEN.

As cxgb4's max imm. data value for ofld packets is changed to
MAX_IMM_OFLD_TX_DATA_WR_LEN. Using the same in cxgbit also.

Fixes: 36bedb3f ("crypto: chtls - Inline TLS record Tx")
Signed-off-by: default avatarAyush Sawal <ayush.sawal@chelsio.com>
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d0a0bbe7
...@@ -46,6 +46,9 @@ ...@@ -46,6 +46,9 @@
#define MAX_ULD_QSETS 16 #define MAX_ULD_QSETS 16
#define MAX_ULD_NPORTS 4 #define MAX_ULD_NPORTS 4
/* ulp_mem_io + ulptx_idata + payload + padding */
#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
/* CPL message priority levels */ /* CPL message priority levels */
enum { enum {
CPL_PRIORITY_DATA = 0, /* data messages */ CPL_PRIORITY_DATA = 0, /* data messages */
......
...@@ -2842,17 +2842,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) ...@@ -2842,17 +2842,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
* @skb: the packet * @skb: the packet
* *
* Returns true if a packet can be sent as an offload WR with immediate * Returns true if a packet can be sent as an offload WR with immediate
* data. We currently use the same limit as for Ethernet packets. * data.
* FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
* However, FW_ULPTX_WR commands have a 256 byte immediate only
* payload limit.
*/ */
static inline int is_ofld_imm(const struct sk_buff *skb) static inline int is_ofld_imm(const struct sk_buff *skb)
{ {
struct work_request_hdr *req = (struct work_request_hdr *)skb->data; struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
if (opcode == FW_CRYPTO_LOOKASIDE_WR) if (unlikely(opcode == FW_ULPTX_WR))
return skb->len <= MAX_IMM_ULPTX_WR_LEN;
else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
return skb->len <= SGE_MAX_WR_LEN; return skb->len <= SGE_MAX_WR_LEN;
else else
return skb->len <= MAX_IMM_TX_PKT_LEN; return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
} }
/** /**
......
...@@ -50,9 +50,6 @@ ...@@ -50,9 +50,6 @@
#define MIN_RCV_WND (24 * 1024U) #define MIN_RCV_WND (24 * 1024U)
#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000)) #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
/* ulp_mem_io + ulptx_idata + payload + padding */
#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */ /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
#define TX_HEADER_LEN \ #define TX_HEADER_LEN \
(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr)) (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
......
...@@ -86,8 +86,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb) ...@@ -86,8 +86,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)) if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
length += sizeof(struct cpl_tx_data_iso); length += sizeof(struct cpl_tx_data_iso);
#define MAX_IMM_TX_PKT_LEN 256 return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
return length <= MAX_IMM_TX_PKT_LEN;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment