Commit 820a38d8 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-08-16 (iavf, i40e)

This series contains updates to iavf and i40e drivers.

Piotr adds checks for unsupported Flow Director rules on iavf.

Andrii replaces incorrect 'write' messaging on read operations for i40e.

* '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  i40e: fix misleading debug logs
  iavf: fix FDIR rule fields masks validation
====================

Link: https://lore.kernel.org/r/20230816193308.1307535-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents e9bbd601 2f2beb88
......@@ -210,11 +210,11 @@ static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
* @hw: pointer to the HW structure.
* @module_pointer: module pointer location in words from the NVM beginning
* @offset: offset in words from module start
* @words: number of words to write
* @data: buffer with words to write to the Shadow RAM
* @words: number of words to read
* @data: buffer with words to read to the Shadow RAM
* @last_command: tells the AdminQ that this is the last command
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
* Reads a 16 bit words buffer to the Shadow RAM using the admin command.
**/
static int i40e_read_nvm_aq(struct i40e_hw *hw,
u8 module_pointer, u32 offset,
......@@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
*/
if ((offset + words) > hw->nvm.sr_size)
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: offset %d beyond Shadow RAM limit %d\n",
"NVM read error: offset %d beyond Shadow RAM limit %d\n",
(offset + words), hw->nvm.sr_size);
else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
/* We can write only up to 4KB (one sector), in one AQ write */
/* We can read only up to 4KB (one sector), in one AQ write */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write fail error: tried to write %d words, limit is %d.\n",
"NVM read fail error: tried to read %d words, limit is %d.\n",
words, I40E_SR_SECTOR_SIZE_IN_WORDS);
else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
!= (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
/* A single write cannot spread over two sectors */
/* A single read cannot spread over two sectors */
i40e_debug(hw, I40E_DEBUG_NVM,
"NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
"NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
offset, words);
else
ret_code = i40e_aq_read_nvm(hw, module_pointer,
......
......@@ -1289,6 +1289,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
fltr->ip_ver = 4;
break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
......@@ -1300,6 +1301,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
fltr->ip_ver = 4;
break;
case IPV4_USER_FLOW:
fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
......@@ -1312,6 +1314,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
fltr->ip_ver = 4;
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
......@@ -1330,6 +1333,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
fltr->ip_ver = 6;
break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
......@@ -1345,6 +1349,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
sizeof(struct in6_addr));
fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
fltr->ip_ver = 6;
break;
case IPV6_USER_FLOW:
memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
......@@ -1361,6 +1366,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
fltr->ip_ver = 6;
break;
case ETHER_FLOW:
fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
......@@ -1371,6 +1377,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
return -EINVAL;
}
err = iavf_validate_fdir_fltr_masks(adapter, fltr);
if (err)
return err;
if (iavf_fdir_is_dup_fltr(adapter, fltr))
return -EEXIST;
......
......@@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = {
}
};
static const struct in6_addr ipv6_addr_zero_mask = {
.in6_u = {
.u6_addr8 = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
}
}
};
/**
* iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks
* @adapter: pointer to the VF adapter structure
* @fltr: Flow Director filter data structure
*
* Returns 0 if all masks of packet fields are either full or empty. Returns
* error on at least one partial mask.
*/
int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr)
{
if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX))
goto partial_mask;
if (fltr->ip_ver == 4) {
if (fltr->ip_mask.v4_addrs.src_ip &&
fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX))
goto partial_mask;
if (fltr->ip_mask.v4_addrs.dst_ip &&
fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX))
goto partial_mask;
if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX)
goto partial_mask;
} else if (fltr->ip_ver == 6) {
if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask,
sizeof(struct in6_addr)) &&
memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
sizeof(struct in6_addr)))
goto partial_mask;
if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask,
sizeof(struct in6_addr)) &&
memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
sizeof(struct in6_addr)))
goto partial_mask;
if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX)
goto partial_mask;
}
if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX)
goto partial_mask;
if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX))
goto partial_mask;
if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX))
goto partial_mask;
if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX))
goto partial_mask;
if (fltr->ip_mask.l4_header &&
fltr->ip_mask.l4_header != htonl(U32_MAX))
goto partial_mask;
return 0;
partial_mask:
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n");
return -EOPNOTSUPP;
}
/**
* iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
* @fltr: Flow Director filter data structure
......@@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
}
fltr->ip_ver = 4;
return 0;
}
......@@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
}
fltr->ip_ver = 6;
return 0;
}
......
......@@ -110,6 +110,8 @@ struct iavf_fdir_fltr {
struct virtchnl_fdir_add vc_add_msg;
};
int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
struct iavf_fdir_fltr *fltr);
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment