Commit 853b0df9 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2021-03-22

This series contains updates to ice and iavf drivers.

Haiyue Wang says:

The Intel E810 Series supports a programmable pipeline for a domain
specific protocols classification, for example GTP by Dynamic Device
Personalization (DDP) profile.

The E810 PF has introduced flex-bytes support by ethtool user-def option
allowing for packet deeper matching based on an offset and value for DDP
usage.

For making VF also benefit from this flexible protocol classification,
some new virtchnl messages are defined and handled by PF, so VF can
query this new flow director capability, and use ethtool with extending
the user-def option to configure Rx flow classification.

The new user-def 0xAAAABBBBCCCCDDDD: BBBB is the 2 byte pattern while
AAAA corresponds to its offset in the packet. Similarly DDDD is the 2
byte pattern with CCCC being the corresponding offset. The offset ranges
from 0x0 to 0x1F7 (up to 504 bytes into the packet). The offset starts
from the beginning of the packet.

This feature can be used to allow customers to set flow director rules
for protocols headers that are beyond standard ones supported by
ethtool (e.g. PFCP or GTP-U).

Like for matching GTP-U's TEID value 0x10203040:
ethtool -N ens787f0v0 flow-type udp4 dst-port 2152 \
    user-def 0x002e102000303040 action 13
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ec8136cd a6379db8
......@@ -11,5 +11,5 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \
iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o
......@@ -37,6 +37,7 @@
#include "iavf_type.h"
#include <linux/avf/virtchnl.h>
#include "iavf_txrx.h"
#include "iavf_fdir.h"
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "iavf: "
......@@ -300,6 +301,8 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
#define IAVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
#define IAVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
#define IAVF_FLAG_AQ_ADD_FDIR_FILTER BIT(25)
#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
/* OS defined structs */
struct net_device *netdev;
......@@ -340,6 +343,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN)
#define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
#define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_FDIR_PF)
struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */
struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */
struct virtchnl_version_info pf_version;
......@@ -362,6 +367,11 @@ struct iavf_adapter {
/* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters;
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
struct list_head fdir_list_head;
spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
};
......@@ -432,6 +442,8 @@ void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
void iavf_del_cloud_filter(struct iavf_adapter *adapter);
void iavf_add_fdir_filter(struct iavf_adapter *adapter);
void iavf_del_fdir_filter(struct iavf_adapter *adapter);
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr);
#endif /* _IAVF_H_ */
......@@ -827,6 +827,623 @@ static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
return __iavf_set_coalesce(netdev, ec, queue);
}
/**
* iavf_fltr_to_ethtool_flow - convert filter type values to ethtool
* flow type values
* @flow: filter type to be converted
*
* Returns the corresponding ethtool flow type.
*/
static int iavf_fltr_to_ethtool_flow(enum iavf_fdir_flow_type flow)
{
switch (flow) {
case IAVF_FDIR_FLOW_IPV4_TCP:
return TCP_V4_FLOW;
case IAVF_FDIR_FLOW_IPV4_UDP:
return UDP_V4_FLOW;
case IAVF_FDIR_FLOW_IPV4_SCTP:
return SCTP_V4_FLOW;
case IAVF_FDIR_FLOW_IPV4_AH:
return AH_V4_FLOW;
case IAVF_FDIR_FLOW_IPV4_ESP:
return ESP_V4_FLOW;
case IAVF_FDIR_FLOW_IPV4_OTHER:
return IPV4_USER_FLOW;
case IAVF_FDIR_FLOW_IPV6_TCP:
return TCP_V6_FLOW;
case IAVF_FDIR_FLOW_IPV6_UDP:
return UDP_V6_FLOW;
case IAVF_FDIR_FLOW_IPV6_SCTP:
return SCTP_V6_FLOW;
case IAVF_FDIR_FLOW_IPV6_AH:
return AH_V6_FLOW;
case IAVF_FDIR_FLOW_IPV6_ESP:
return ESP_V6_FLOW;
case IAVF_FDIR_FLOW_IPV6_OTHER:
return IPV6_USER_FLOW;
case IAVF_FDIR_FLOW_NON_IP_L2:
return ETHER_FLOW;
default:
/* 0 is undefined ethtool flow */
return 0;
}
}
/**
* iavf_ethtool_flow_to_fltr - convert ethtool flow type to filter enum
* @eth: Ethtool flow type to be converted
*
* Returns flow enum
*/
static enum iavf_fdir_flow_type iavf_ethtool_flow_to_fltr(int eth)
{
switch (eth) {
case TCP_V4_FLOW:
return IAVF_FDIR_FLOW_IPV4_TCP;
case UDP_V4_FLOW:
return IAVF_FDIR_FLOW_IPV4_UDP;
case SCTP_V4_FLOW:
return IAVF_FDIR_FLOW_IPV4_SCTP;
case AH_V4_FLOW:
return IAVF_FDIR_FLOW_IPV4_AH;
case ESP_V4_FLOW:
return IAVF_FDIR_FLOW_IPV4_ESP;
case IPV4_USER_FLOW:
return IAVF_FDIR_FLOW_IPV4_OTHER;
case TCP_V6_FLOW:
return IAVF_FDIR_FLOW_IPV6_TCP;
case UDP_V6_FLOW:
return IAVF_FDIR_FLOW_IPV6_UDP;
case SCTP_V6_FLOW:
return IAVF_FDIR_FLOW_IPV6_SCTP;
case AH_V6_FLOW:
return IAVF_FDIR_FLOW_IPV6_AH;
case ESP_V6_FLOW:
return IAVF_FDIR_FLOW_IPV6_ESP;
case IPV6_USER_FLOW:
return IAVF_FDIR_FLOW_IPV6_OTHER;
case ETHER_FLOW:
return IAVF_FDIR_FLOW_NON_IP_L2;
default:
return IAVF_FDIR_FLOW_NONE;
}
}
/**
* iavf_is_mask_valid - check mask field set
* @mask: full mask to check
* @field: field for which mask should be valid
*
* If the mask is fully set return true. If it is not valid for field return
* false.
*/
static bool iavf_is_mask_valid(u64 mask, u64 field)
{
return (mask & field) == field;
}
/**
* iavf_parse_rx_flow_user_data - deconstruct user-defined data
* @fsp: pointer to ethtool Rx flow specification
* @fltr: pointer to Flow Director filter for userdef data storage
*
* Returns 0 on success, negative error value on failure
*/
static int
iavf_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
struct iavf_fdir_fltr *fltr)
{
struct iavf_flex_word *flex;
int i, cnt = 0;
if (!(fsp->flow_type & FLOW_EXT))
return 0;
for (i = 0; i < 2; i++) {
#define IAVF_USERDEF_FLEX_WORD_M GENMASK(15, 0)
#define IAVF_USERDEF_FLEX_OFFS_S 16
#define IAVF_USERDEF_FLEX_OFFS_M GENMASK(31, IAVF_USERDEF_FLEX_OFFS_S)
#define IAVF_USERDEF_FLEX_FLTR_M GENMASK(31, 0)
u32 value = be32_to_cpu(fsp->h_ext.data[i]);
u32 mask = be32_to_cpu(fsp->m_ext.data[i]);
if (!value || !mask)
continue;
if (!iavf_is_mask_valid(mask, IAVF_USERDEF_FLEX_FLTR_M))
return -EINVAL;
/* 504 is the maximum value for offsets, and offset is measured
* from the start of the MAC address.
*/
#define IAVF_USERDEF_FLEX_MAX_OFFS_VAL 504
flex = &fltr->flex_words[cnt++];
flex->word = value & IAVF_USERDEF_FLEX_WORD_M;
flex->offset = (value & IAVF_USERDEF_FLEX_OFFS_M) >>
IAVF_USERDEF_FLEX_OFFS_S;
if (flex->offset > IAVF_USERDEF_FLEX_MAX_OFFS_VAL)
return -EINVAL;
}
fltr->flex_cnt = cnt;
return 0;
}
/**
* iavf_fill_rx_flow_ext_data - fill the additional data
* @fsp: pointer to ethtool Rx flow specification
* @fltr: pointer to Flow Director filter to get additional data
*/
static void
iavf_fill_rx_flow_ext_data(struct ethtool_rx_flow_spec *fsp,
struct iavf_fdir_fltr *fltr)
{
if (!fltr->ext_mask.usr_def[0] && !fltr->ext_mask.usr_def[1])
return;
fsp->flow_type |= FLOW_EXT;
memcpy(fsp->h_ext.data, fltr->ext_data.usr_def, sizeof(fsp->h_ext.data));
memcpy(fsp->m_ext.data, fltr->ext_mask.usr_def, sizeof(fsp->m_ext.data));
}
/**
* iavf_get_ethtool_fdir_entry - fill ethtool structure with Flow Director filter data
* @adapter: the VF adapter structure that contains filter list
* @cmd: ethtool command data structure to receive the filter data
*
* Returns 0 as expected for success by ethtool
*/
static int
iavf_get_ethtool_fdir_entry(struct iavf_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
struct iavf_fdir_fltr *rule = NULL;
int ret = 0;
if (!FDIR_FLTR_SUPPORT(adapter))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
rule = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
if (!rule) {
ret = -EINVAL;
goto release_lock;
}
fsp->flow_type = iavf_fltr_to_ethtool_flow(rule->flow_type);
memset(&fsp->m_u, 0, sizeof(fsp->m_u));
memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
switch (fsp->flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
fsp->h_u.tcp_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
fsp->h_u.tcp_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
fsp->h_u.tcp_ip4_spec.psrc = rule->ip_data.src_port;
fsp->h_u.tcp_ip4_spec.pdst = rule->ip_data.dst_port;
fsp->h_u.tcp_ip4_spec.tos = rule->ip_data.tos;
fsp->m_u.tcp_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
fsp->m_u.tcp_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
fsp->m_u.tcp_ip4_spec.psrc = rule->ip_mask.src_port;
fsp->m_u.tcp_ip4_spec.pdst = rule->ip_mask.dst_port;
fsp->m_u.tcp_ip4_spec.tos = rule->ip_mask.tos;
break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
fsp->h_u.ah_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
fsp->h_u.ah_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
fsp->h_u.ah_ip4_spec.spi = rule->ip_data.spi;
fsp->h_u.ah_ip4_spec.tos = rule->ip_data.tos;
fsp->m_u.ah_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
fsp->m_u.ah_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
fsp->m_u.ah_ip4_spec.spi = rule->ip_mask.spi;
fsp->m_u.ah_ip4_spec.tos = rule->ip_mask.tos;
break;
case IPV4_USER_FLOW:
fsp->h_u.usr_ip4_spec.ip4src = rule->ip_data.v4_addrs.src_ip;
fsp->h_u.usr_ip4_spec.ip4dst = rule->ip_data.v4_addrs.dst_ip;
fsp->h_u.usr_ip4_spec.l4_4_bytes = rule->ip_data.l4_header;
fsp->h_u.usr_ip4_spec.tos = rule->ip_data.tos;
fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
fsp->h_u.usr_ip4_spec.proto = rule->ip_data.proto;
fsp->m_u.usr_ip4_spec.ip4src = rule->ip_mask.v4_addrs.src_ip;
fsp->m_u.usr_ip4_spec.ip4dst = rule->ip_mask.v4_addrs.dst_ip;
fsp->m_u.usr_ip4_spec.l4_4_bytes = rule->ip_mask.l4_header;
fsp->m_u.usr_ip4_spec.tos = rule->ip_mask.tos;
fsp->m_u.usr_ip4_spec.ip_ver = 0xFF;
fsp->m_u.usr_ip4_spec.proto = rule->ip_mask.proto;
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
sizeof(struct in6_addr));
fsp->h_u.tcp_ip6_spec.psrc = rule->ip_data.src_port;
fsp->h_u.tcp_ip6_spec.pdst = rule->ip_data.dst_port;
fsp->h_u.tcp_ip6_spec.tclass = rule->ip_data.tclass;
memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
sizeof(struct in6_addr));
fsp->m_u.tcp_ip6_spec.psrc = rule->ip_mask.src_port;
fsp->m_u.tcp_ip6_spec.pdst = rule->ip_mask.dst_port;
fsp->m_u.tcp_ip6_spec.tclass = rule->ip_mask.tclass;
break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
memcpy(fsp->h_u.ah_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
sizeof(struct in6_addr));
fsp->h_u.ah_ip6_spec.spi = rule->ip_data.spi;
fsp->h_u.ah_ip6_spec.tclass = rule->ip_data.tclass;
memcpy(fsp->m_u.ah_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
sizeof(struct in6_addr));
fsp->m_u.ah_ip6_spec.spi = rule->ip_mask.spi;
fsp->m_u.ah_ip6_spec.tclass = rule->ip_mask.tclass;
break;
case IPV6_USER_FLOW:
memcpy(fsp->h_u.usr_ip6_spec.ip6src, &rule->ip_data.v6_addrs.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->h_u.usr_ip6_spec.ip6dst, &rule->ip_data.v6_addrs.dst_ip,
sizeof(struct in6_addr));
fsp->h_u.usr_ip6_spec.l4_4_bytes = rule->ip_data.l4_header;
fsp->h_u.usr_ip6_spec.tclass = rule->ip_data.tclass;
fsp->h_u.usr_ip6_spec.l4_proto = rule->ip_data.proto;
memcpy(fsp->m_u.usr_ip6_spec.ip6src, &rule->ip_mask.v6_addrs.src_ip,
sizeof(struct in6_addr));
memcpy(fsp->m_u.usr_ip6_spec.ip6dst, &rule->ip_mask.v6_addrs.dst_ip,
sizeof(struct in6_addr));
fsp->m_u.usr_ip6_spec.l4_4_bytes = rule->ip_mask.l4_header;
fsp->m_u.usr_ip6_spec.tclass = rule->ip_mask.tclass;
fsp->m_u.usr_ip6_spec.l4_proto = rule->ip_mask.proto;
break;
case ETHER_FLOW:
fsp->h_u.ether_spec.h_proto = rule->eth_data.etype;
fsp->m_u.ether_spec.h_proto = rule->eth_mask.etype;
break;
default:
ret = -EINVAL;
break;
}
iavf_fill_rx_flow_ext_data(fsp, rule);
if (rule->action == VIRTCHNL_ACTION_DROP)
fsp->ring_cookie = RX_CLS_FLOW_DISC;
else
fsp->ring_cookie = rule->q_index;
release_lock:
spin_unlock_bh(&adapter->fdir_fltr_lock);
return ret;
}
/**
* iavf_get_fdir_fltr_ids - fill buffer with filter IDs of active filters
* @adapter: the VF adapter structure containing the filter list
* @cmd: ethtool command data structure
* @rule_locs: ethtool array passed in from OS to receive filter IDs
*
* Returns 0 as expected for success by ethtool
*/
static int
iavf_get_fdir_fltr_ids(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct iavf_fdir_fltr *fltr;
unsigned int cnt = 0;
int val = 0;
if (!FDIR_FLTR_SUPPORT(adapter))
return -EOPNOTSUPP;
cmd->data = IAVF_MAX_FDIR_FILTERS;
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fltr, &adapter->fdir_list_head, list) {
if (cnt == cmd->rule_cnt) {
val = -EMSGSIZE;
goto release_lock;
}
rule_locs[cnt] = fltr->loc;
cnt++;
}
release_lock:
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (!val)
cmd->rule_cnt = cnt;
return val;
}
/**
* iavf_add_fdir_fltr_info - Set the input set for Flow Director filter
* @adapter: pointer to the VF adapter structure
* @fsp: pointer to ethtool Rx flow specification
* @fltr: filter structure
*/
static int
iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spec *fsp,
struct iavf_fdir_fltr *fltr)
{
u32 flow_type, q_index = 0;
enum virtchnl_action act;
int err;
if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
act = VIRTCHNL_ACTION_DROP;
} else {
q_index = fsp->ring_cookie;
if (q_index >= adapter->num_active_queues)
return -EINVAL;
act = VIRTCHNL_ACTION_QUEUE;
}
fltr->action = act;
fltr->loc = fsp->location;
fltr->q_index = q_index;
if (fsp->flow_type & FLOW_EXT) {
memcpy(fltr->ext_data.usr_def, fsp->h_ext.data,
sizeof(fltr->ext_data.usr_def));
memcpy(fltr->ext_mask.usr_def, fsp->m_ext.data,
sizeof(fltr->ext_mask.usr_def));
}
flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
fltr->flow_type = iavf_ethtool_flow_to_fltr(flow_type);
switch (flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
fltr->ip_data.v4_addrs.src_ip = fsp->h_u.tcp_ip4_spec.ip4src;
fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
fltr->ip_data.src_port = fsp->h_u.tcp_ip4_spec.psrc;
fltr->ip_data.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
fltr->ip_data.tos = fsp->h_u.tcp_ip4_spec.tos;
fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.tcp_ip4_spec.ip4src;
fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.tcp_ip4_spec.ip4dst;
fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
fltr->ip_data.v4_addrs.src_ip = fsp->h_u.ah_ip4_spec.ip4src;
fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.ah_ip4_spec.ip4dst;
fltr->ip_data.spi = fsp->h_u.ah_ip4_spec.spi;
fltr->ip_data.tos = fsp->h_u.ah_ip4_spec.tos;
fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.ah_ip4_spec.ip4src;
fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
break;
case IPV4_USER_FLOW:
fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
fltr->ip_data.v4_addrs.dst_ip = fsp->h_u.usr_ip4_spec.ip4dst;
fltr->ip_data.l4_header = fsp->h_u.usr_ip4_spec.l4_4_bytes;
fltr->ip_data.tos = fsp->h_u.usr_ip4_spec.tos;
fltr->ip_data.proto = fsp->h_u.usr_ip4_spec.proto;
fltr->ip_mask.v4_addrs.src_ip = fsp->m_u.usr_ip4_spec.ip4src;
fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.usr_ip4_spec.ip4dst;
fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
fltr->ip_data.src_port = fsp->h_u.tcp_ip6_spec.psrc;
fltr->ip_data.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
fltr->ip_data.tclass = fsp->h_u.tcp_ip6_spec.tclass;
memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.ah_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.ah_ip6_spec.ip6dst,
sizeof(struct in6_addr));
fltr->ip_data.spi = fsp->h_u.ah_ip6_spec.spi;
fltr->ip_data.tclass = fsp->h_u.ah_ip6_spec.tclass;
memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.ah_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.ah_ip6_spec.ip6dst,
sizeof(struct in6_addr));
fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
break;
case IPV6_USER_FLOW:
memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&fltr->ip_data.v6_addrs.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
fltr->ip_data.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes;
fltr->ip_data.tclass = fsp->h_u.usr_ip6_spec.tclass;
fltr->ip_data.proto = fsp->h_u.usr_ip6_spec.l4_proto;
memcpy(&fltr->ip_mask.v6_addrs.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
sizeof(struct in6_addr));
memcpy(&fltr->ip_mask.v6_addrs.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
sizeof(struct in6_addr));
fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
break;
case ETHER_FLOW:
fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
fltr->eth_mask.etype = fsp->m_u.ether_spec.h_proto;
break;
default:
/* not doing un-parsed flow types */
return -EINVAL;
}
if (iavf_fdir_is_dup_fltr(adapter, fltr))
return -EEXIST;
err = iavf_parse_rx_flow_user_data(fsp, fltr);
if (err)
return err;
return iavf_fill_fdir_add_msg(adapter, fltr);
}
/**
* iavf_add_fdir_ethtool - add Flow Director filter
* @adapter: pointer to the VF adapter structure
* @cmd: command to add Flow Director filter
*
* Returns 0 on success and negative values for failure
*/
static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = &cmd->fs;
struct iavf_fdir_fltr *fltr;
int count = 50;
int err;
if (!FDIR_FLTR_SUPPORT(adapter))
return -EOPNOTSUPP;
if (fsp->flow_type & FLOW_MAC_EXT)
return -EINVAL;
if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
dev_err(&adapter->pdev->dev,
"Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
IAVF_MAX_FDIR_FILTERS);
return -ENOSPC;
}
spin_lock_bh(&adapter->fdir_fltr_lock);
if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
spin_unlock_bh(&adapter->fdir_fltr_lock);
return -EEXIST;
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
if (!fltr)
return -ENOMEM;
while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
&adapter->crit_section)) {
if (--count == 0) {
kfree(fltr);
return -EINVAL;
}
udelay(1);
}
err = iavf_add_fdir_fltr_info(adapter, fsp, fltr);
if (err)
goto ret;
spin_lock_bh(&adapter->fdir_fltr_lock);
iavf_fdir_list_add_fltr(adapter, fltr);
adapter->fdir_active_fltr++;
fltr->state = IAVF_FDIR_FLTR_ADD_REQUEST;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER;
spin_unlock_bh(&adapter->fdir_fltr_lock);
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
ret:
if (err && fltr)
kfree(fltr);
clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
return err;
}
/**
* iavf_del_fdir_ethtool - delete Flow Director filter
* @adapter: pointer to the VF adapter structure
* @cmd: command to delete Flow Director filter
*
* Returns 0 on success and negative values for failure
*/
static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rxnfc *cmd)
{
struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
struct iavf_fdir_fltr *fltr = NULL;
int err = 0;
if (!FDIR_FLTR_SUPPORT(adapter))
return -EOPNOTSUPP;
spin_lock_bh(&adapter->fdir_fltr_lock);
fltr = iavf_find_fdir_fltr_by_loc(adapter, fsp->location);
if (fltr) {
if (fltr->state == IAVF_FDIR_FLTR_ACTIVE) {
fltr->state = IAVF_FDIR_FLTR_DEL_REQUEST;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
} else {
err = -EBUSY;
}
} else if (adapter->fdir_active_fltr) {
err = -EINVAL;
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST)
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
return err;
}
/**
* iavf_set_rxnfc - command to set Rx flow rules.
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
*
* Returns 0 for success and negative values for errors
*/
static int iavf_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
int ret = -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
ret = iavf_add_fdir_ethtool(adapter, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
ret = iavf_del_fdir_ethtool(adapter, cmd);
break;
default:
break;
}
return ret;
}
/**
* iavf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
......@@ -846,6 +1463,19 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
cmd->data = adapter->num_active_queues;
ret = 0;
break;
case ETHTOOL_GRXCLSRLCNT:
if (!FDIR_FLTR_SUPPORT(adapter))
break;
cmd->rule_cnt = adapter->fdir_active_fltr;
cmd->data = IAVF_MAX_FDIR_FILTERS;
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
ret = iavf_get_ethtool_fdir_entry(adapter, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
ret = iavf_get_fdir_fltr_ids(adapter, cmd, (u32 *)rule_locs);
break;
case ETHTOOL_GRXFH:
netdev_info(netdev,
"RSS hash info is not available to vf, use pf.\n");
......@@ -1025,6 +1655,7 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.set_coalesce = iavf_set_coalesce,
.get_per_queue_coalesce = iavf_get_per_queue_coalesce,
.set_per_queue_coalesce = iavf_set_per_queue_coalesce,
.set_rxnfc = iavf_set_rxnfc,
.get_rxnfc = iavf_get_rxnfc,
.get_rxfh_indir_size = iavf_get_rxfh_indir_size,
.get_rxfh = iavf_get_rxfh,
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020, Intel Corporation. */
/* flow director ethtool support for iavf */
#include "iavf.h"
#define GTPU_PORT 2152
#define NAT_T_ESP_PORT 4500
#define PFCP_PORT 8805
static const struct in6_addr ipv6_addr_full_mask = {
.in6_u = {
.u6_addr8 = {
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
}
}
};
/**
* iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
* @fltr: Flow Director filter data structure
*/
static u16 iavf_pkt_udp_no_pay_len(struct iavf_fdir_fltr *fltr)
{
return sizeof(struct ethhdr) +
(fltr->ip_ver == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
sizeof(struct udphdr);
}
/**
* iavf_fill_fdir_gtpu_hdr - fill the GTP-U protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the GTP-U protocol header is set successfully
*/
static int
iavf_fill_fdir_gtpu_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
struct virtchnl_proto_hdr *ghdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
struct virtchnl_proto_hdr *ehdr = NULL; /* Extension Header if it exists */
u16 adj_offs, hdr_offs;
int i;
VIRTCHNL_SET_PROTO_HDR_TYPE(ghdr, GTPU_IP);
adj_offs = iavf_pkt_udp_no_pay_len(fltr);
for (i = 0; i < fltr->flex_cnt; i++) {
#define IAVF_GTPU_HDR_TEID_OFFS0 4
#define IAVF_GTPU_HDR_TEID_OFFS1 6
#define IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS 10
#define IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS 13
#define IAVF_GTPU_PSC_EXTHDR_TYPE 0x85 /* PDU Session Container Extension Header */
if (fltr->flex_words[i].offset < adj_offs)
return -EINVAL;
hdr_offs = fltr->flex_words[i].offset - adj_offs;
switch (hdr_offs) {
case IAVF_GTPU_HDR_TEID_OFFS0:
case IAVF_GTPU_HDR_TEID_OFFS1: {
__be16 *pay_word = (__be16 *)ghdr->buffer;
pay_word[hdr_offs >> 1] = htons(fltr->flex_words[i].word);
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ghdr, GTPU_IP, TEID);
}
break;
case IAVF_GTPU_HDR_N_PDU_AND_NEXT_EXTHDR_OFFS:
if ((fltr->flex_words[i].word & 0xff) != IAVF_GTPU_PSC_EXTHDR_TYPE)
return -EOPNOTSUPP;
if (!ehdr)
ehdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
VIRTCHNL_SET_PROTO_HDR_TYPE(ehdr, GTPU_EH);
break;
case IAVF_GTPU_HDR_PSC_PDU_TYPE_AND_QFI_OFFS:
if (!ehdr)
return -EINVAL;
ehdr->buffer[1] = fltr->flex_words[i].word & 0x3F;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(ehdr, GTPU_EH, QFI);
break;
default:
return -EINVAL;
}
}
uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
return 0;
}
/**
* iavf_fill_fdir_pfcp_hdr - fill the PFCP protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the PFCP protocol header is set successfully
*/
static int
iavf_fill_fdir_pfcp_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
u16 adj_offs, hdr_offs;
int i;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, PFCP);
adj_offs = iavf_pkt_udp_no_pay_len(fltr);
for (i = 0; i < fltr->flex_cnt; i++) {
#define IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS 0
if (fltr->flex_words[i].offset < adj_offs)
return -EINVAL;
hdr_offs = fltr->flex_words[i].offset - adj_offs;
switch (hdr_offs) {
case IAVF_PFCP_HDR_SFIELD_AND_MSG_TYPE_OFFS:
hdr->buffer[0] = (fltr->flex_words[i].word >> 8) & 0xff;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, PFCP, S_FIELD);
break;
default:
return -EINVAL;
}
}
uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
return 0;
}
/**
* iavf_fill_fdir_nat_t_esp_hdr - fill the NAT-T-ESP protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the NAT-T-ESP protocol header is set successfully
*/
static int
iavf_fill_fdir_nat_t_esp_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *uhdr = &proto_hdrs->proto_hdr[proto_hdrs->count - 1];
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
u16 adj_offs, hdr_offs;
u32 spi = 0;
int i;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
adj_offs = iavf_pkt_udp_no_pay_len(fltr);
for (i = 0; i < fltr->flex_cnt; i++) {
#define IAVF_NAT_T_ESP_SPI_OFFS0 0
#define IAVF_NAT_T_ESP_SPI_OFFS1 2
if (fltr->flex_words[i].offset < adj_offs)
return -EINVAL;
hdr_offs = fltr->flex_words[i].offset - adj_offs;
switch (hdr_offs) {
case IAVF_NAT_T_ESP_SPI_OFFS0:
spi |= fltr->flex_words[i].word << 16;
break;
case IAVF_NAT_T_ESP_SPI_OFFS1:
spi |= fltr->flex_words[i].word;
break;
default:
return -EINVAL;
}
}
if (!spi)
return -EOPNOTSUPP; /* Not support IKE Header Format with SPI 0 */
*(__be32 *)hdr->buffer = htonl(spi);
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
uhdr->field_selector = 0; /* The PF ignores the UDP header fields */
return 0;
}
/**
* iavf_fill_fdir_udp_flex_pay_hdr - fill the UDP payload header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the UDP payload defined protocol header is set successfully
*/
static int
iavf_fill_fdir_udp_flex_pay_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
int err;
switch (ntohs(fltr->ip_data.dst_port)) {
case GTPU_PORT:
err = iavf_fill_fdir_gtpu_hdr(fltr, proto_hdrs);
break;
case NAT_T_ESP_PORT:
err = iavf_fill_fdir_nat_t_esp_hdr(fltr, proto_hdrs);
break;
case PFCP_PORT:
err = iavf_fill_fdir_pfcp_hdr(fltr, proto_hdrs);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
/**
* iavf_fill_fdir_ip4_hdr - fill the IPv4 protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the IPv4 protocol header is set successfully
*/
static int
iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
struct iphdr *iph = (struct iphdr *)hdr->buffer;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
if (fltr->ip_mask.tos == U8_MAX) {
iph->tos = fltr->ip_data.tos;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
}
if (fltr->ip_mask.proto == U8_MAX) {
iph->protocol = fltr->ip_data.proto;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
}
if (fltr->ip_mask.v4_addrs.src_ip == htonl(U32_MAX)) {
iph->saddr = fltr->ip_data.v4_addrs.src_ip;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
}
if (fltr->ip_mask.v4_addrs.dst_ip == htonl(U32_MAX)) {
iph->daddr = fltr->ip_data.v4_addrs.dst_ip;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
}
fltr->ip_ver = 4;
return 0;
}
/**
* iavf_fill_fdir_ip6_hdr - fill the IPv6 protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the IPv6 protocol header is set successfully
*/
static int
iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
struct ipv6hdr *iph = (struct ipv6hdr *)hdr->buffer;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
if (fltr->ip_mask.tclass == U8_MAX) {
iph->priority = (fltr->ip_data.tclass >> 4) & 0xF;
iph->flow_lbl[0] = (fltr->ip_data.tclass << 4) & 0xF0;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
}
if (fltr->ip_mask.proto == U8_MAX) {
iph->nexthdr = fltr->ip_data.proto;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
}
if (!memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
sizeof(struct in6_addr))) {
memcpy(&iph->saddr, &fltr->ip_data.v6_addrs.src_ip,
sizeof(struct in6_addr));
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
}
if (!memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
sizeof(struct in6_addr))) {
memcpy(&iph->daddr, &fltr->ip_data.v6_addrs.dst_ip,
sizeof(struct in6_addr));
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
}
fltr->ip_ver = 6;
return 0;
}
/**
* iavf_fill_fdir_tcp_hdr - fill the TCP protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the TCP protocol header is set successfully
*/
static int
iavf_fill_fdir_tcp_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
struct tcphdr *tcph = (struct tcphdr *)hdr->buffer;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
if (fltr->ip_mask.src_port == htons(U16_MAX)) {
tcph->source = fltr->ip_data.src_port;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
}
if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
tcph->dest = fltr->ip_data.dst_port;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
}
return 0;
}
/**
* iavf_fill_fdir_udp_hdr - fill the UDP protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the UDP protocol header is set successfully
*/
static int
iavf_fill_fdir_udp_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
struct udphdr *udph = (struct udphdr *)hdr->buffer;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
if (fltr->ip_mask.src_port == htons(U16_MAX)) {
udph->source = fltr->ip_data.src_port;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
}
if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
udph->dest = fltr->ip_data.dst_port;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
}
if (!fltr->flex_cnt)
return 0;
return iavf_fill_fdir_udp_flex_pay_hdr(fltr, proto_hdrs);
}
/**
* iavf_fill_fdir_sctp_hdr - fill the SCTP protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the SCTP protocol header is set successfully
*/
static int
iavf_fill_fdir_sctp_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
struct sctphdr *sctph = (struct sctphdr *)hdr->buffer;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
if (fltr->ip_mask.src_port == htons(U16_MAX)) {
sctph->source = fltr->ip_data.src_port;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
}
if (fltr->ip_mask.dst_port == htons(U16_MAX)) {
sctph->dest = fltr->ip_data.dst_port;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
}
return 0;
}
/**
* iavf_fill_fdir_ah_hdr - fill the AH protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the AH protocol header is set successfully
*/
static int
iavf_fill_fdir_ah_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
struct ip_auth_hdr *ah = (struct ip_auth_hdr *)hdr->buffer;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
if (fltr->ip_mask.spi == htonl(U32_MAX)) {
ah->spi = fltr->ip_data.spi;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
}
return 0;
}
/**
* iavf_fill_fdir_esp_hdr - fill the ESP protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the ESP protocol header is set successfully
*/
static int
iavf_fill_fdir_esp_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
struct ip_esp_hdr *esph = (struct ip_esp_hdr *)hdr->buffer;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
if (fltr->ip_mask.spi == htonl(U32_MAX)) {
esph->spi = fltr->ip_data.spi;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
}
return 0;
}
/**
* iavf_fill_fdir_l4_hdr - fill the L4 protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the L4 protocol header is set successfully
*/
static int
iavf_fill_fdir_l4_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *hdr;
__be32 *l4_4_data;
if (!fltr->ip_mask.proto) /* IPv4/IPv6 header only */
return 0;
hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
l4_4_data = (__be32 *)hdr->buffer;
/* L2TPv3 over IP with 'Session ID' */
if (fltr->ip_data.proto == 115 && fltr->ip_mask.l4_header == htonl(U32_MAX)) {
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
*l4_4_data = fltr->ip_data.l4_header;
} else {
return -EOPNOTSUPP;
}
return 0;
}
/**
* iavf_fill_fdir_eth_hdr - fill the Ethernet protocol header
* @fltr: Flow Director filter data structure
* @proto_hdrs: Flow Director protocol headers data structure
*
* Returns 0 if the Ethernet protocol header is set successfully
*/
static int
iavf_fill_fdir_eth_hdr(struct iavf_fdir_fltr *fltr,
struct virtchnl_proto_hdrs *proto_hdrs)
{
struct virtchnl_proto_hdr *hdr = &proto_hdrs->proto_hdr[proto_hdrs->count++];
struct ethhdr *ehdr = (struct ethhdr *)hdr->buffer;
VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
if (fltr->eth_mask.etype == htons(U16_MAX)) {
if (fltr->eth_data.etype == htons(ETH_P_IP) ||
fltr->eth_data.etype == htons(ETH_P_IPV6))
return -EOPNOTSUPP;
ehdr->h_proto = fltr->eth_data.etype;
VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
}
return 0;
}
/**
* iavf_fill_fdir_add_msg - fill the Flow Director filter into virtchnl message
* @adapter: pointer to the VF adapter structure
* @fltr: Flow Director filter data structure
*
* Returns 0 if the add Flow Director virtchnl message is filled successfully
*/
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
{
struct virtchnl_fdir_add *vc_msg = &fltr->vc_add_msg;
struct virtchnl_proto_hdrs *proto_hdrs;
int err;
proto_hdrs = &vc_msg->rule_cfg.proto_hdrs;
err = iavf_fill_fdir_eth_hdr(fltr, proto_hdrs); /* L2 always exists */
if (err)
return err;
switch (fltr->flow_type) {
case IAVF_FDIR_FLOW_IPV4_TCP:
err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV4_UDP:
err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV4_SCTP:
err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV4_AH:
err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV4_ESP:
err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV4_OTHER:
err = iavf_fill_fdir_ip4_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV6_TCP:
err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_tcp_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV6_UDP:
err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_udp_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV6_SCTP:
err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_sctp_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV6_AH:
err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_ah_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV6_ESP:
err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_esp_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_IPV6_OTHER:
err = iavf_fill_fdir_ip6_hdr(fltr, proto_hdrs) |
iavf_fill_fdir_l4_hdr(fltr, proto_hdrs);
break;
case IAVF_FDIR_FLOW_NON_IP_L2:
break;
default:
err = -EINVAL;
break;
}
if (err)
return err;
vc_msg->vsi_id = adapter->vsi.id;
vc_msg->rule_cfg.action_set.count = 1;
vc_msg->rule_cfg.action_set.actions[0].type = fltr->action;
vc_msg->rule_cfg.action_set.actions[0].act_conf.queue.index = fltr->q_index;
return 0;
}
/**
* iavf_fdir_flow_proto_name - get the flow protocol name
* @flow_type: Flow Director filter flow type
**/
static const char *iavf_fdir_flow_proto_name(enum iavf_fdir_flow_type flow_type)
{
switch (flow_type) {
case IAVF_FDIR_FLOW_IPV4_TCP:
case IAVF_FDIR_FLOW_IPV6_TCP:
return "TCP";
case IAVF_FDIR_FLOW_IPV4_UDP:
case IAVF_FDIR_FLOW_IPV6_UDP:
return "UDP";
case IAVF_FDIR_FLOW_IPV4_SCTP:
case IAVF_FDIR_FLOW_IPV6_SCTP:
return "SCTP";
case IAVF_FDIR_FLOW_IPV4_AH:
case IAVF_FDIR_FLOW_IPV6_AH:
return "AH";
case IAVF_FDIR_FLOW_IPV4_ESP:
case IAVF_FDIR_FLOW_IPV6_ESP:
return "ESP";
case IAVF_FDIR_FLOW_IPV4_OTHER:
case IAVF_FDIR_FLOW_IPV6_OTHER:
return "Other";
case IAVF_FDIR_FLOW_NON_IP_L2:
return "Ethernet";
default:
return NULL;
}
}
/**
* iavf_print_fdir_fltr
* @adapter: adapter structure
* @fltr: Flow Director filter to print
*
* Print the Flow Director filter
**/
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
{
const char *proto = iavf_fdir_flow_proto_name(fltr->flow_type);
if (!proto)
return;
switch (fltr->flow_type) {
case IAVF_FDIR_FLOW_IPV4_TCP:
case IAVF_FDIR_FLOW_IPV4_UDP:
case IAVF_FDIR_FLOW_IPV4_SCTP:
dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: dst_port %hu src_port %hu\n",
fltr->loc,
&fltr->ip_data.v4_addrs.dst_ip,
&fltr->ip_data.v4_addrs.src_ip,
proto,
ntohs(fltr->ip_data.dst_port),
ntohs(fltr->ip_data.src_port));
break;
case IAVF_FDIR_FLOW_IPV4_AH:
case IAVF_FDIR_FLOW_IPV4_ESP:
dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 %s: SPI %u\n",
fltr->loc,
&fltr->ip_data.v4_addrs.dst_ip,
&fltr->ip_data.v4_addrs.src_ip,
proto,
ntohl(fltr->ip_data.spi));
break;
case IAVF_FDIR_FLOW_IPV4_OTHER:
dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI4 src_ip %pI4 proto: %u L4_bytes: 0x%x\n",
fltr->loc,
&fltr->ip_data.v4_addrs.dst_ip,
&fltr->ip_data.v4_addrs.src_ip,
fltr->ip_data.proto,
ntohl(fltr->ip_data.l4_header));
break;
case IAVF_FDIR_FLOW_IPV6_TCP:
case IAVF_FDIR_FLOW_IPV6_UDP:
case IAVF_FDIR_FLOW_IPV6_SCTP:
dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: dst_port %hu src_port %hu\n",
fltr->loc,
&fltr->ip_data.v6_addrs.dst_ip,
&fltr->ip_data.v6_addrs.src_ip,
proto,
ntohs(fltr->ip_data.dst_port),
ntohs(fltr->ip_data.src_port));
break;
case IAVF_FDIR_FLOW_IPV6_AH:
case IAVF_FDIR_FLOW_IPV6_ESP:
dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 %s: SPI %u\n",
fltr->loc,
&fltr->ip_data.v6_addrs.dst_ip,
&fltr->ip_data.v6_addrs.src_ip,
proto,
ntohl(fltr->ip_data.spi));
break;
case IAVF_FDIR_FLOW_IPV6_OTHER:
dev_info(&adapter->pdev->dev, "Rule ID: %u dst_ip: %pI6 src_ip %pI6 proto: %u L4_bytes: 0x%x\n",
fltr->loc,
&fltr->ip_data.v6_addrs.dst_ip,
&fltr->ip_data.v6_addrs.src_ip,
fltr->ip_data.proto,
ntohl(fltr->ip_data.l4_header));
break;
case IAVF_FDIR_FLOW_NON_IP_L2:
dev_info(&adapter->pdev->dev, "Rule ID: %u eth_type: 0x%x\n",
fltr->loc,
ntohs(fltr->eth_data.etype));
break;
default:
break;
}
}
/**
* iavf_fdir_is_dup_fltr - test if filter is already in list
* @adapter: pointer to the VF adapter structure
* @fltr: Flow Director filter data structure
*
* Returns true if the filter is found in the list
*/
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
{
struct iavf_fdir_fltr *tmp;
bool ret = false;
list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
if (tmp->flow_type != fltr->flow_type)
continue;
if (!memcmp(&tmp->eth_data, &fltr->eth_data,
sizeof(fltr->eth_data)) &&
!memcmp(&tmp->ip_data, &fltr->ip_data,
sizeof(fltr->ip_data)) &&
!memcmp(&tmp->ext_data, &fltr->ext_data,
sizeof(fltr->ext_data))) {
ret = true;
break;
}
}
return ret;
}
/**
* iavf_find_fdir_fltr_by_loc - find filter with location
* @adapter: pointer to the VF adapter structure
* @loc: location to find.
*
* Returns pointer to Flow Director filter if found or null
*/
struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc)
{
struct iavf_fdir_fltr *rule;
list_for_each_entry(rule, &adapter->fdir_list_head, list)
if (rule->loc == loc)
return rule;
return NULL;
}
/**
* iavf_fdir_list_add_fltr - add a new node to the flow director filter list
* @adapter: pointer to the VF adapter structure
* @fltr: filter node to add to structure
*/
void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
{
struct iavf_fdir_fltr *rule, *parent = NULL;
list_for_each_entry(rule, &adapter->fdir_list_head, list) {
if (rule->loc >= fltr->loc)
break;
parent = rule;
}
if (parent)
list_add(&fltr->list, &parent->list);
else
list_add(&fltr->list, &adapter->fdir_list_head);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2021, Intel Corporation. */
#ifndef _IAVF_FDIR_H_
#define _IAVF_FDIR_H_
struct iavf_adapter;
/* State of Flow Director filter */
enum iavf_fdir_fltr_state_t {
IAVF_FDIR_FLTR_ADD_REQUEST, /* User requests to add filter */
IAVF_FDIR_FLTR_ADD_PENDING, /* Filter pending add by the PF */
IAVF_FDIR_FLTR_DEL_REQUEST, /* User requests to delete filter */
IAVF_FDIR_FLTR_DEL_PENDING, /* Filter pending delete by the PF */
IAVF_FDIR_FLTR_ACTIVE, /* Filter is active */
};
enum iavf_fdir_flow_type {
/* NONE - used for undef/error */
IAVF_FDIR_FLOW_NONE = 0,
IAVF_FDIR_FLOW_IPV4_TCP,
IAVF_FDIR_FLOW_IPV4_UDP,
IAVF_FDIR_FLOW_IPV4_SCTP,
IAVF_FDIR_FLOW_IPV4_AH,
IAVF_FDIR_FLOW_IPV4_ESP,
IAVF_FDIR_FLOW_IPV4_OTHER,
IAVF_FDIR_FLOW_IPV6_TCP,
IAVF_FDIR_FLOW_IPV6_UDP,
IAVF_FDIR_FLOW_IPV6_SCTP,
IAVF_FDIR_FLOW_IPV6_AH,
IAVF_FDIR_FLOW_IPV6_ESP,
IAVF_FDIR_FLOW_IPV6_OTHER,
IAVF_FDIR_FLOW_NON_IP_L2,
/* MAX - this must be last and add anything new just above it */
IAVF_FDIR_FLOW_PTYPE_MAX,
};
struct iavf_flex_word {
u16 offset;
u16 word;
};
struct iavf_ipv4_addrs {
__be32 src_ip;
__be32 dst_ip;
};
struct iavf_ipv6_addrs {
struct in6_addr src_ip;
struct in6_addr dst_ip;
};
struct iavf_fdir_eth {
__be16 etype;
};
struct iavf_fdir_ip {
union {
struct iavf_ipv4_addrs v4_addrs;
struct iavf_ipv6_addrs v6_addrs;
};
__be16 src_port;
__be16 dst_port;
__be32 l4_header; /* first 4 bytes of the layer 4 header */
__be32 spi; /* security parameter index for AH/ESP */
union {
u8 tos;
u8 tclass;
};
u8 proto;
};
struct iavf_fdir_extra {
u32 usr_def[2];
};
/* bookkeeping of Flow Director filters */
struct iavf_fdir_fltr {
enum iavf_fdir_fltr_state_t state;
struct list_head list;
enum iavf_fdir_flow_type flow_type;
struct iavf_fdir_eth eth_data;
struct iavf_fdir_eth eth_mask;
struct iavf_fdir_ip ip_data;
struct iavf_fdir_ip ip_mask;
struct iavf_fdir_extra ext_data;
struct iavf_fdir_extra ext_mask;
enum virtchnl_action action;
/* flex byte filter data */
u8 ip_ver; /* used to adjust the flex offset, 4 : IPv4, 6 : IPv6 */
u8 flex_cnt;
struct iavf_flex_word flex_words[2];
u32 flow_id;
u32 loc; /* Rule location inside the flow table */
u32 q_index;
struct virtchnl_fdir_add vc_add_msg;
};
int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
void iavf_fdir_list_add_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
struct iavf_fdir_fltr *iavf_find_fdir_fltr_by_loc(struct iavf_adapter *adapter, u32 loc);
#endif /* _IAVF_FDIR_H_ */
......@@ -959,8 +959,9 @@ void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct iavf_vlan_filter *vlf;
struct iavf_mac_filter *f;
struct iavf_cloud_filter *cf;
struct iavf_fdir_fltr *fdir;
struct iavf_mac_filter *f;
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
......@@ -996,6 +997,13 @@ void iavf_down(struct iavf_adapter *adapter)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
/* remove all Flow Director filters */
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __IAVF_RESETTING) {
/* cancel any current operation */
......@@ -1007,6 +1015,7 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
......@@ -1629,6 +1638,14 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_add_cloud_filter(adapter);
return 0;
}
if (adapter->aq_required & IAVF_FLAG_AQ_ADD_FDIR_FILTER) {
iavf_add_fdir_filter(adapter);
return IAVF_SUCCESS;
}
if (adapter->aq_required & IAVF_FLAG_AQ_DEL_FDIR_FILTER) {
iavf_del_fdir_filter(adapter);
return IAVF_SUCCESS;
}
return -EAGAIN;
}
......@@ -3738,10 +3755,12 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->mac_vlan_list_lock);
spin_lock_init(&adapter->cloud_filter_list_lock);
spin_lock_init(&adapter->fdir_fltr_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
INIT_LIST_HEAD(&adapter->cloud_filter_list);
INIT_LIST_HEAD(&adapter->fdir_list_head);
INIT_WORK(&adapter->reset_task, iavf_reset_task);
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
......@@ -3845,6 +3864,7 @@ static void iavf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct iavf_adapter *adapter = netdev_priv(netdev);
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_mac_filter *f, *ftmp;
struct iavf_cloud_filter *cf, *cftmp;
......@@ -3926,6 +3946,13 @@ static void iavf_remove(struct pci_dev *pdev)
}
spin_unlock_bh(&adapter->cloud_filter_list_lock);
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head, list) {
list_del(&fdir->list);
kfree(fdir);
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
......
......@@ -140,6 +140,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
VIRTCHNL_VF_OFFLOAD_FDIR_PF |
VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
......@@ -1197,6 +1198,101 @@ void iavf_del_cloud_filter(struct iavf_adapter *adapter)
kfree(f);
}
/**
* iavf_add_fdir_filter
* @adapter: the VF adapter structure
*
* Request that the PF add Flow Director filters as specified
* by the user via ethtool.
**/
void iavf_add_fdir_filter(struct iavf_adapter *adapter)
{
struct iavf_fdir_fltr *fdir;
struct virtchnl_fdir_add *f;
bool process_fltr = false;
int len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot add Flow Director filter, command %d pending\n",
adapter->current_op);
return;
}
len = sizeof(struct virtchnl_fdir_add);
f = kzalloc(len, GFP_KERNEL);
if (!f)
return;
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
process_fltr = true;
fdir->state = IAVF_FDIR_FLTR_ADD_PENDING;
memcpy(f, &fdir->vc_add_msg, len);
break;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (!process_fltr) {
/* prevent iavf_add_fdir_filter() from being called when there
* are no filters to add
*/
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_FDIR_FILTER;
kfree(f);
return;
}
adapter->current_op = VIRTCHNL_OP_ADD_FDIR_FILTER;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_FDIR_FILTER, (u8 *)f, len);
kfree(f);
}
/**
* iavf_del_fdir_filter
* @adapter: the VF adapter structure
*
* Request that the PF delete Flow Director filters as specified
* by the user via ethtool.
**/
void iavf_del_fdir_filter(struct iavf_adapter *adapter)
{
struct iavf_fdir_fltr *fdir;
struct virtchnl_fdir_del f;
bool process_fltr = false;
int len;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot remove Flow Director filter, command %d pending\n",
adapter->current_op);
return;
}
len = sizeof(struct virtchnl_fdir_del);
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_REQUEST) {
process_fltr = true;
memset(&f, 0, len);
f.vsi_id = fdir->vc_add_msg.vsi_id;
f.flow_id = fdir->flow_id;
fdir->state = IAVF_FDIR_FLTR_DEL_PENDING;
break;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
if (!process_fltr) {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_FDIR_FILTER;
return;
}
adapter->current_op = VIRTCHNL_OP_DEL_FDIR_FILTER;
iavf_send_pf_msg(adapter, VIRTCHNL_OP_DEL_FDIR_FILTER, (u8 *)&f, len);
}
/**
* iavf_request_reset
* @adapter: adapter structure
......@@ -1357,6 +1453,50 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER: {
struct iavf_fdir_fltr *fdir, *fdir_tmp;
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry_safe(fdir, fdir_tmp,
&adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter, error %s\n",
iavf_stat_str(&adapter->hw,
v_retval));
iavf_print_fdir_fltr(adapter, fdir);
if (msglen)
dev_err(&adapter->pdev->dev,
"%s\n", msg);
list_del(&fdir->list);
kfree(fdir);
adapter->fdir_active_fltr--;
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
}
break;
case VIRTCHNL_OP_DEL_FDIR_FILTER: {
struct iavf_fdir_fltr *fdir;
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(fdir, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to del Flow Director filter, error %s\n",
iavf_stat_str(&adapter->hw,
v_retval));
iavf_print_fdir_fltr(adapter, fdir);
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
}
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
dev_warn(&adapter->pdev->dev, "Changing VLAN Stripping is not allowed when Port VLAN is configured\n");
break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval, iavf_stat_str(&adapter->hw, v_retval),
......@@ -1490,6 +1630,58 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER: {
struct virtchnl_fdir_add *add_fltr = (struct virtchnl_fdir_add *)msg;
struct iavf_fdir_fltr *fdir, *fdir_tmp;
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry_safe(fdir, fdir_tmp,
&adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_ADD_PENDING) {
if (add_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is added\n",
fdir->loc);
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
fdir->flow_id = add_fltr->flow_id;
} else {
dev_info(&adapter->pdev->dev, "Failed to add Flow Director filter with status: %d\n",
add_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
list_del(&fdir->list);
kfree(fdir);
adapter->fdir_active_fltr--;
}
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
}
break;
case VIRTCHNL_OP_DEL_FDIR_FILTER: {
struct virtchnl_fdir_del *del_fltr = (struct virtchnl_fdir_del *)msg;
struct iavf_fdir_fltr *fdir, *fdir_tmp;
spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry_safe(fdir, fdir_tmp, &adapter->fdir_list_head,
list) {
if (fdir->state == IAVF_FDIR_FLTR_DEL_PENDING) {
if (del_fltr->status == VIRTCHNL_FDIR_SUCCESS) {
dev_info(&adapter->pdev->dev, "Flow Director filter with location %u is deleted\n",
fdir->loc);
list_del(&fdir->list);
kfree(fdir);
adapter->fdir_active_fltr--;
} else {
fdir->state = IAVF_FDIR_FLTR_ACTIVE;
dev_info(&adapter->pdev->dev, "Failed to delete Flow Director filter with status: %d\n",
del_fltr->status);
iavf_print_fdir_fltr(adapter, fdir);
}
}
}
spin_unlock_bh(&adapter->fdir_fltr_lock);
}
break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
......
......@@ -26,7 +26,7 @@ ice-y := ice_main.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
......@@ -73,7 +73,7 @@
#define ICE_MIN_LAN_TXRX_MSIX 1
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 1
#define ICE_FDIR_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
......@@ -84,6 +84,8 @@
#define ICE_MAX_LG_RSS_QS 256
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
......@@ -229,6 +231,7 @@ enum ice_state {
__ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
__ICE_PHY_INIT_COMPLETE,
__ICE_FD_VF_FLUSH_CTX, /* set at FD Rx IRQ or timeout */
__ICE_STATE_NBITS /* must be last */
};
......
......@@ -1679,6 +1679,10 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
input->flex_offset = userdata.flex_offset;
}
input->cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
/* input struct is added to the HW filter list */
ice_fdir_update_list_entry(pf, input, fsp->location);
......
......@@ -40,6 +40,204 @@ static const u8 ice_fdir_ipv4_pkt[] = {
0x00, 0x00
};
static const u8 ice_fdir_udp4_gtpu4_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
};
static const u8 ice_fdir_tcp4_gtpu4_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x58, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
0x00, 0x28, 0x00, 0x00, 0x40, 0x00, 0x40, 0x06,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ice_fdir_icmp4_gtpu4_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x4c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
0x00, 0x1c, 0x00, 0x00, 0x40, 0x00, 0x40, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
};
static const u8 ice_fdir_ipv4_gtpu4_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x44, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x08, 0x68, 0x08, 0x68, 0x00, 0x00,
0x00, 0x00, 0x34, 0xff, 0x00, 0x28, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x02, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x00,
0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
};
static const u8 ice_fdir_ipv4_l2tpv3_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ice_fdir_ipv6_l2tpv3_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x40, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
};
static const u8 ice_fdir_ipv4_esp_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x32,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00
};
static const u8 ice_fdir_ipv6_esp_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
0x00, 0x00, 0x00, 0x00, 0x32, 0x40, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ice_fdir_ipv4_ah_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x14, 0x00, 0x00, 0x40, 0x00, 0x40, 0x33,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00
};
static const u8 ice_fdir_ipv6_ah_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
0x00, 0x00, 0x00, 0x00, 0x33, 0x40, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ice_fdir_ipv4_nat_t_esp_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x1C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x11, 0x94, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
};
static const u8 ice_fdir_ipv6_nat_t_esp_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
0x00, 0x00, 0x00, 0x08, 0x11, 0x40, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x11, 0x94, 0x00, 0x00, 0x00, 0x08,
};
static const u8 ice_fdir_ipv4_pfcp_node_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00,
0x00, 0x00, 0x20, 0x00, 0x00, 0x10, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
};
static const u8 ice_fdir_ipv4_pfcp_session_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
0x00, 0x2C, 0x00, 0x00, 0x40, 0x00, 0x40, 0x11,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x22, 0x65, 0x22, 0x65, 0x00, 0x00,
0x00, 0x00, 0x21, 0x00, 0x00, 0x10, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00,
};
static const u8 ice_fdir_ipv6_pfcp_node_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65,
0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00,
0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ice_fdir_ipv6_pfcp_session_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
0x00, 0x00, 0x00, 0x18, 0x11, 0x40, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x65,
0x22, 0x65, 0x00, 0x00, 0x00, 0x00, 0x21, 0x00,
0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ice_fdir_non_ip_l2_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
static const u8 ice_fdir_tcpv6_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x86, 0xDD, 0x60, 0x00,
......@@ -238,6 +436,111 @@ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
sizeof(ice_fdir_ipv4_pkt), ice_fdir_ipv4_pkt,
sizeof(ice_fdir_ip4_tun_pkt), ice_fdir_ip4_tun_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP,
sizeof(ice_fdir_udp4_gtpu4_pkt),
ice_fdir_udp4_gtpu4_pkt,
sizeof(ice_fdir_udp4_gtpu4_pkt),
ice_fdir_udp4_gtpu4_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP,
sizeof(ice_fdir_tcp4_gtpu4_pkt),
ice_fdir_tcp4_gtpu4_pkt,
sizeof(ice_fdir_tcp4_gtpu4_pkt),
ice_fdir_tcp4_gtpu4_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP,
sizeof(ice_fdir_icmp4_gtpu4_pkt),
ice_fdir_icmp4_gtpu4_pkt,
sizeof(ice_fdir_icmp4_gtpu4_pkt),
ice_fdir_icmp4_gtpu4_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER,
sizeof(ice_fdir_ipv4_gtpu4_pkt),
ice_fdir_ipv4_gtpu4_pkt,
sizeof(ice_fdir_ipv4_gtpu4_pkt),
ice_fdir_ipv4_gtpu4_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3,
sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt,
sizeof(ice_fdir_ipv4_l2tpv3_pkt), ice_fdir_ipv4_l2tpv3_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3,
sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt,
sizeof(ice_fdir_ipv6_l2tpv3_pkt), ice_fdir_ipv6_l2tpv3_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_ESP,
sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt,
sizeof(ice_fdir_ipv4_esp_pkt), ice_fdir_ipv4_esp_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV6_ESP,
sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt,
sizeof(ice_fdir_ipv6_esp_pkt), ice_fdir_ipv6_esp_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_AH,
sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt,
sizeof(ice_fdir_ipv4_ah_pkt), ice_fdir_ipv4_ah_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV6_AH,
sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt,
sizeof(ice_fdir_ipv6_ah_pkt), ice_fdir_ipv6_ah_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP,
sizeof(ice_fdir_ipv4_nat_t_esp_pkt),
ice_fdir_ipv4_nat_t_esp_pkt,
sizeof(ice_fdir_ipv4_nat_t_esp_pkt),
ice_fdir_ipv4_nat_t_esp_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP,
sizeof(ice_fdir_ipv6_nat_t_esp_pkt),
ice_fdir_ipv6_nat_t_esp_pkt,
sizeof(ice_fdir_ipv6_nat_t_esp_pkt),
ice_fdir_ipv6_nat_t_esp_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE,
sizeof(ice_fdir_ipv4_pfcp_node_pkt),
ice_fdir_ipv4_pfcp_node_pkt,
sizeof(ice_fdir_ipv4_pfcp_node_pkt),
ice_fdir_ipv4_pfcp_node_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION,
sizeof(ice_fdir_ipv4_pfcp_session_pkt),
ice_fdir_ipv4_pfcp_session_pkt,
sizeof(ice_fdir_ipv4_pfcp_session_pkt),
ice_fdir_ipv4_pfcp_session_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE,
sizeof(ice_fdir_ipv6_pfcp_node_pkt),
ice_fdir_ipv6_pfcp_node_pkt,
sizeof(ice_fdir_ipv6_pfcp_node_pkt),
ice_fdir_ipv6_pfcp_node_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION,
sizeof(ice_fdir_ipv6_pfcp_session_pkt),
ice_fdir_ipv6_pfcp_session_pkt,
sizeof(ice_fdir_ipv6_pfcp_session_pkt),
ice_fdir_ipv6_pfcp_session_pkt,
},
{
ICE_FLTR_PTYPE_NON_IP_L2,
sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt,
sizeof(ice_fdir_non_ip_l2_pkt), ice_fdir_non_ip_l2_pkt,
},
{
ICE_FLTR_PTYPE_NONF_IPV6_TCP,
sizeof(ice_fdir_tcpv6_pkt), ice_fdir_tcpv6_pkt,
......@@ -374,21 +677,31 @@ ice_fdir_get_prgm_desc(struct ice_hw *hw, struct ice_fdir_fltr *input,
if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_YES;
fdir_fltr_ctx.qindex = 0;
} else if (input->dest_ctl ==
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER) {
fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;
fdir_fltr_ctx.qindex = 0;
} else {
if (input->dest_ctl ==
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP)
fdir_fltr_ctx.toq = input->q_region;
fdir_fltr_ctx.drop = ICE_FXD_FLTR_QW0_DROP_NO;
fdir_fltr_ctx.qindex = input->q_index;
}
fdir_fltr_ctx.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
fdir_fltr_ctx.cnt_ena = input->cnt_ena;
fdir_fltr_ctx.cnt_index = input->cnt_index;
fdir_fltr_ctx.fd_vsi = ice_get_hw_vsi_num(hw, input->dest_vsi);
fdir_fltr_ctx.evict_ena = ICE_FXD_FLTR_QW0_EVICT_ENA_FALSE;
fdir_fltr_ctx.toq_prio = 3;
if (input->dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER)
fdir_fltr_ctx.toq_prio = 0;
else
fdir_fltr_ctx.toq_prio = 3;
fdir_fltr_ctx.pcmd = add ? ICE_FXD_FLTR_QW1_PCMD_ADD :
ICE_FXD_FLTR_QW1_PCMD_REMOVE;
fdir_fltr_ctx.swap = ICE_FXD_FLTR_QW1_SWAP_NOT_SET;
fdir_fltr_ctx.comp_q = ICE_FXD_FLTR_QW0_COMP_Q_ZERO;
fdir_fltr_ctx.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL;
fdir_fltr_ctx.fdid_prio = 3;
fdir_fltr_ctx.comp_report = input->comp_report;
fdir_fltr_ctx.fdid_prio = input->fdid_prio;
fdir_fltr_ctx.desc_prof = 1;
fdir_fltr_ctx.desc_prof_prio = 3;
ice_set_fd_desc_val(&fdir_fltr_ctx, fdesc);
......@@ -470,6 +783,55 @@ static void ice_pkt_insert_ipv6_addr(u8 *pkt, int offset, __be32 *addr)
sizeof(*addr));
}
/**
* ice_pkt_insert_u6_qfi - insert a u6 value QFI into a memory buffer for GTPU
* @pkt: packet buffer
* @offset: offset into buffer
* @data: 8 bit value to convert and insert into pkt at offset
*
* This function is designed for inserting QFI (6 bits) for GTPU.
*/
static void ice_pkt_insert_u6_qfi(u8 *pkt, int offset, u8 data)
{
u8 ret;
ret = (data & 0x3F) + (*(pkt + offset) & 0xC0);
memcpy(pkt + offset, &ret, sizeof(ret));
}
/**
* ice_pkt_insert_u8 - insert a u8 value into a memory buffer.
* @pkt: packet buffer
* @offset: offset into buffer
* @data: 8 bit value to convert and insert into pkt at offset
*/
static void ice_pkt_insert_u8(u8 *pkt, int offset, u8 data)
{
memcpy(pkt + offset, &data, sizeof(data));
}
/**
* ice_pkt_insert_u8_tc - insert a u8 value into a memory buffer for TC ipv6.
* @pkt: packet buffer
* @offset: offset into buffer
* @data: 8 bit value to convert and insert into pkt at offset
*
* This function is designed for inserting Traffic Class (TC) for IPv6,
* since that TC is not aligned in number of bytes. Here we split it out
* into two part and fill each byte with data copy from pkt, then insert
* the two bytes data one by one.
*/
static void ice_pkt_insert_u8_tc(u8 *pkt, int offset, u8 data)
{
u8 high, low;
high = (data >> 4) + (*(pkt + offset) & 0xF0);
memcpy(pkt + offset, &high, sizeof(high));
low = (*(pkt + offset + 1) & 0x0F) + ((data & 0x0F) << 4);
memcpy(pkt + offset + 1, &low, sizeof(low));
}
/**
* ice_pkt_insert_u16 - insert a be16 value into a memory buffer
* @pkt: packet buffer
......@@ -492,6 +854,16 @@ static void ice_pkt_insert_u32(u8 *pkt, int offset, __be32 data)
memcpy(pkt + offset, &data, sizeof(data));
}
/**
* ice_pkt_insert_mac_addr - insert a MAC addr into a memory buffer.
* @pkt: packet buffer
* @addr: MAC address to convert and insert into pkt at offset
*/
static void ice_pkt_insert_mac_addr(u8 *pkt, u8 *addr)
{
ether_addr_copy(pkt, addr);
}
/**
* ice_fdir_get_gen_prgm_pkt - generate a training packet
* @hw: pointer to the hardware structure
......@@ -520,11 +892,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
case IPPROTO_SCTP:
flow = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
break;
case IPPROTO_IP:
default:
flow = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
break;
default:
return ICE_ERR_PARAM;
}
} else if (input->flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
switch (input->ip.v6.proto) {
......@@ -537,11 +907,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
case IPPROTO_SCTP:
flow = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
break;
case IPPROTO_IP:
default:
flow = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
break;
default:
return ICE_ERR_PARAM;
}
} else {
flow = input->flow_type;
......@@ -580,6 +948,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_TCP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
if (frag)
loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
break;
......@@ -592,6 +963,11 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
ice_pkt_insert_mac_addr(loc + ETH_ALEN,
input->ext_data.src_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
......@@ -602,13 +978,87 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_SCTP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
input->ip.v4.dst_ip);
ice_pkt_insert_u16(loc, ICE_IPV4_PROTO_OFFSET, 0);
ice_pkt_insert_u8(loc, ICE_IPV4_TOS_OFFSET, input->ip.v4.tos);
ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
ice_pkt_insert_u8(loc, ICE_IPV4_PROTO_OFFSET,
input->ip.v4.proto);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
input->ip.v4.dst_ip);
ice_pkt_insert_u32(loc, ICE_IPV4_GTPU_TEID_OFFSET,
input->gtpu_data.teid);
ice_pkt_insert_u6_qfi(loc, ICE_IPV4_GTPU_QFI_OFFSET,
input->gtpu_data.qfi);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
ice_pkt_insert_u32(loc, ICE_IPV4_L2TPV3_SESS_ID_OFFSET,
input->l2tpv3_data.session_id);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
ice_pkt_insert_u32(loc, ICE_IPV6_L2TPV3_SESS_ID_OFFSET,
input->l2tpv3_data.session_id);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
ice_pkt_insert_u32(loc, ICE_IPV4_ESP_SPI_OFFSET,
input->ip.v4.sec_parm_idx);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
ice_pkt_insert_u32(loc, ICE_IPV6_ESP_SPI_OFFSET,
input->ip.v6.sec_parm_idx);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_AH:
ice_pkt_insert_u32(loc, ICE_IPV4_AH_SPI_OFFSET,
input->ip.v4.sec_parm_idx);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_AH:
ice_pkt_insert_u32(loc, ICE_IPV6_AH_SPI_OFFSET,
input->ip.v6.sec_parm_idx);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
ice_pkt_insert_u32(loc, ICE_IPV4_SRC_ADDR_OFFSET,
input->ip.v4.dst_ip);
ice_pkt_insert_u32(loc, ICE_IPV4_NAT_T_ESP_SPI_OFFSET,
input->ip.v4.sec_parm_idx);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
input->ip.v6.src_ip);
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
input->ip.v6.dst_ip);
ice_pkt_insert_u32(loc, ICE_IPV6_NAT_T_ESP_SPI_OFFSET,
input->ip.v6.sec_parm_idx);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
ice_pkt_insert_u16(loc, ICE_IPV4_UDP_SRC_PORT_OFFSET,
input->ip.v4.dst_port);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
break;
case ICE_FLTR_PTYPE_NON_IP_L2:
ice_pkt_insert_u16(loc, ICE_MAC_ETHTYPE_OFFSET,
input->ext_data.ether_type);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
......@@ -619,6 +1069,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v6.src_port);
ice_pkt_insert_u16(loc, ICE_IPV6_TCP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
......@@ -629,6 +1082,9 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v6.src_port);
ice_pkt_insert_u16(loc, ICE_IPV6_UDP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
......@@ -639,12 +1095,20 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
input->ip.v6.src_port);
ice_pkt_insert_u16(loc, ICE_IPV6_SCTP_SRC_PORT_OFFSET,
input->ip.v6.dst_port);
ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_DST_ADDR_OFFSET,
input->ip.v6.src_ip);
ice_pkt_insert_ipv6_addr(loc, ICE_IPV6_SRC_ADDR_OFFSET,
input->ip.v6.dst_ip);
ice_pkt_insert_u8_tc(loc, ICE_IPV6_TC_OFFSET, input->ip.v6.tc);
ice_pkt_insert_u8(loc, ICE_IPV6_HLIM_OFFSET, input->ip.v6.hlim);
ice_pkt_insert_u8(loc, ICE_IPV6_PROTO_OFFSET,
input->ip.v6.proto);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
break;
default:
return ICE_ERR_PARAM;
......
......@@ -25,6 +25,25 @@
#define ICE_IPV6_UDP_DST_PORT_OFFSET 56
#define ICE_IPV6_SCTP_SRC_PORT_OFFSET 54
#define ICE_IPV6_SCTP_DST_PORT_OFFSET 56
#define ICE_MAC_ETHTYPE_OFFSET 12
#define ICE_IPV4_TOS_OFFSET 15
#define ICE_IPV4_TTL_OFFSET 22
#define ICE_IPV6_TC_OFFSET 14
#define ICE_IPV6_HLIM_OFFSET 21
#define ICE_IPV6_PROTO_OFFSET 20
#define ICE_IPV4_GTPU_TEID_OFFSET 46
#define ICE_IPV4_GTPU_QFI_OFFSET 56
#define ICE_IPV4_L2TPV3_SESS_ID_OFFSET 34
#define ICE_IPV6_L2TPV3_SESS_ID_OFFSET 54
#define ICE_IPV4_ESP_SPI_OFFSET 34
#define ICE_IPV6_ESP_SPI_OFFSET 54
#define ICE_IPV4_AH_SPI_OFFSET 38
#define ICE_IPV6_AH_SPI_OFFSET 58
#define ICE_IPV4_NAT_T_ESP_SPI_OFFSET 42
#define ICE_IPV6_NAT_T_ESP_SPI_OFFSET 62
#define ICE_FDIR_MAX_FLTRS 16384
/* IP v4 has 2 flag bits that enable fragment processing: DF and MF. DF
* requests that the packet not be fragmented. MF indicates that a packet has
* been fragmented.
......@@ -34,6 +53,8 @@
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX,
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP,
ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER,
};
enum ice_fltr_prgm_desc_fd_status {
......@@ -86,6 +107,7 @@ struct ice_fdir_v4 {
u8 tos;
u8 ip_ver;
u8 proto;
u8 ttl;
};
#define ICE_IPV6_ADDR_LEN_AS_U32 4
......@@ -99,10 +121,35 @@ struct ice_fdir_v6 {
__be32 sec_parm_idx; /* security parameter index */
u8 tc;
u8 proto;
u8 hlim;
};
struct ice_fdir_udp_gtp {
u8 flags;
u8 msg_type;
__be16 rsrvd_len;
__be32 teid;
__be16 rsrvd_seq_nbr;
u8 rsrvd_n_pdu_nbr;
u8 rsrvd_next_ext_type;
u8 rsvrd_ext_len;
u8 pdu_type:4,
spare:4;
u8 ppp:1,
rqi:1,
qfi:6;
u32 rsvrd;
u8 next_ext;
};
struct ice_fdir_l2tpv3 {
__be32 session_id;
};
struct ice_fdir_extra {
u8 dst_mac[ETH_ALEN]; /* dest MAC address */
u8 src_mac[ETH_ALEN]; /* src MAC address */
__be16 ether_type; /* for NON_IP_L2 */
u32 usr_def[2]; /* user data */
__be16 vlan_type; /* VLAN ethertype */
__be16 vlan_tag; /* VLAN tag info */
......@@ -117,11 +164,19 @@ struct ice_fdir_fltr {
struct ice_fdir_v6 v6;
} ip, mask;
struct ice_fdir_udp_gtp gtpu_data;
struct ice_fdir_udp_gtp gtpu_mask;
struct ice_fdir_l2tpv3 l2tpv3_data;
struct ice_fdir_l2tpv3 l2tpv3_mask;
struct ice_fdir_extra ext_data;
struct ice_fdir_extra ext_mask;
/* flex byte filter data */
__be16 flex_word;
/* queue region size (=2^q_region) */
u8 q_region;
u16 flex_offset;
u16 flex_fltr;
......@@ -129,9 +184,12 @@ struct ice_fdir_fltr {
u16 q_index;
u16 dest_vsi;
u8 dest_ctl;
u8 cnt_ena;
u8 fltr_status;
u16 cnt_index;
u32 fltr_id;
u8 fdid_prio;
u8 comp_report;
};
/* Dummy packet filter definition structure */
......
......@@ -2361,18 +2361,82 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
}
/**
* ice_find_prof_id - find profile ID for a given field vector
* ice_prof_has_mask_idx - determine if profile index masking is identical
* @hw: pointer to the hardware structure
* @blk: HW block
* @prof: profile to check
* @idx: profile index to check
* @mask: mask to match
*/
static bool
ice_prof_has_mask_idx(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 idx,
u16 mask)
{
bool expect_no_mask = false;
bool found = false;
bool match = false;
u16 i;
/* If mask is 0x0000 or 0xffff, then there is no masking */
if (mask == 0 || mask == 0xffff)
expect_no_mask = true;
/* Scan the enabled masks on this profile, for the specified idx */
for (i = hw->blk[blk].masks.first; i < hw->blk[blk].masks.first +
hw->blk[blk].masks.count; i++)
if (hw->blk[blk].es.mask_ena[prof] & BIT(i))
if (hw->blk[blk].masks.masks[i].in_use &&
hw->blk[blk].masks.masks[i].idx == idx) {
found = true;
if (hw->blk[blk].masks.masks[i].mask == mask)
match = true;
break;
}
if (expect_no_mask) {
if (found)
return false;
} else {
if (!match)
return false;
}
return true;
}
/**
* ice_prof_has_mask - determine if profile masking is identical
* @hw: pointer to the hardware structure
* @blk: HW block
* @prof: profile to check
* @masks: masks to match
*/
static bool
ice_prof_has_mask(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 *masks)
{
u16 i;
/* es->mask_ena[prof] will have the mask */
for (i = 0; i < hw->blk[blk].es.fvw; i++)
if (!ice_prof_has_mask_idx(hw, blk, prof, i, masks[i]))
return false;
return true;
}
/**
* ice_find_prof_id_with_mask - find profile ID for a given field vector
* @hw: pointer to the hardware structure
* @blk: HW block
* @fv: field vector to search for
* @masks: masks for FV
* @prof_id: receives the profile ID
*/
static enum ice_status
ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u8 *prof_id)
ice_find_prof_id_with_mask(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u16 *masks, u8 *prof_id)
{
struct ice_es *es = &hw->blk[blk].es;
u16 off;
u8 i;
/* For FD, we don't want to re-use a existed profile with the same
......@@ -2382,11 +2446,15 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
return ICE_ERR_DOES_NOT_EXIST;
for (i = 0; i < (u8)es->count; i++) {
off = i * es->fvw;
u16 off = i * es->fvw;
if (memcmp(&es->t[off], fv, es->fvw * sizeof(*fv)))
continue;
/* check if masks settings are the same for this profile */
if (masks && !ice_prof_has_mask(hw, blk, i, masks))
continue;
*prof_id = i;
return 0;
}
......@@ -2438,20 +2506,22 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
* ice_alloc_tcam_ent - allocate hardware TCAM entry
* @hw: pointer to the HW struct
* @blk: the block to allocate the TCAM for
* @btm: true to allocate from bottom of table, false to allocate from top
* @tcam_idx: pointer to variable to receive the TCAM entry
*
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
static enum ice_status
ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 *tcam_idx)
ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
u16 *tcam_idx)
{
u16 res_type;
if (!ice_tcam_ent_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM;
return ice_alloc_hw_res(hw, res_type, 1, true, tcam_idx);
return ice_alloc_hw_res(hw, res_type, 1, btm, tcam_idx);
}
/**
......@@ -2536,6 +2606,330 @@ ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
return 0;
}
/**
* ice_write_prof_mask_reg - write profile mask register
* @hw: pointer to the HW struct
* @blk: hardware block
* @mask_idx: mask index
* @idx: index of the FV which will use the mask
* @mask: the 16-bit mask
*/
static void
ice_write_prof_mask_reg(struct ice_hw *hw, enum ice_block blk, u16 mask_idx,
u16 idx, u16 mask)
{
u32 offset;
u32 val;
switch (blk) {
case ICE_BLK_RSS:
offset = GLQF_HMASK(mask_idx);
val = (idx << GLQF_HMASK_MSK_INDEX_S) & GLQF_HMASK_MSK_INDEX_M;
val |= (mask << GLQF_HMASK_MASK_S) & GLQF_HMASK_MASK_M;
break;
case ICE_BLK_FD:
offset = GLQF_FDMASK(mask_idx);
val = (idx << GLQF_FDMASK_MSK_INDEX_S) & GLQF_FDMASK_MSK_INDEX_M;
val |= (mask << GLQF_FDMASK_MASK_S) & GLQF_FDMASK_MASK_M;
break;
default:
ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
blk);
return;
}
wr32(hw, offset, val);
ice_debug(hw, ICE_DBG_PKG, "write mask, blk %d (%d): %x = %x\n",
blk, idx, offset, val);
}
/**
* ice_write_prof_mask_enable_res - write profile mask enable register
* @hw: pointer to the HW struct
* @blk: hardware block
* @prof_id: profile ID
* @enable_mask: enable mask
*/
static void
ice_write_prof_mask_enable_res(struct ice_hw *hw, enum ice_block blk,
u16 prof_id, u32 enable_mask)
{
u32 offset;
switch (blk) {
case ICE_BLK_RSS:
offset = GLQF_HMASK_SEL(prof_id);
break;
case ICE_BLK_FD:
offset = GLQF_FDMASK_SEL(prof_id);
break;
default:
ice_debug(hw, ICE_DBG_PKG, "No profile masks for block %d\n",
blk);
return;
}
wr32(hw, offset, enable_mask);
ice_debug(hw, ICE_DBG_PKG, "write mask enable, blk %d (%d): %x = %x\n",
blk, prof_id, offset, enable_mask);
}
/**
* ice_init_prof_masks - initial prof masks
* @hw: pointer to the HW struct
* @blk: hardware block
*/
static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
{
u16 per_pf;
u16 i;
mutex_init(&hw->blk[blk].masks.lock);
per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
hw->blk[blk].masks.count = per_pf;
hw->blk[blk].masks.first = hw->pf_id * per_pf;
memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
for (i = hw->blk[blk].masks.first;
i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
ice_write_prof_mask_reg(hw, blk, i, 0, 0);
}
/**
* ice_init_all_prof_masks - initialize all prof masks
* @hw: pointer to the HW struct
*/
static void ice_init_all_prof_masks(struct ice_hw *hw)
{
ice_init_prof_masks(hw, ICE_BLK_RSS);
ice_init_prof_masks(hw, ICE_BLK_FD);
}
/**
* ice_alloc_prof_mask - allocate profile mask
* @hw: pointer to the HW struct
* @blk: hardware block
* @idx: index of FV which will use the mask
* @mask: the 16-bit mask
* @mask_idx: variable to receive the mask index
*/
static enum ice_status
ice_alloc_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 idx, u16 mask,
u16 *mask_idx)
{
bool found_unused = false, found_copy = false;
enum ice_status status = ICE_ERR_MAX_LIMIT;
u16 unused_idx = 0, copy_idx = 0;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
return ICE_ERR_PARAM;
mutex_lock(&hw->blk[blk].masks.lock);
for (i = hw->blk[blk].masks.first;
i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++)
if (hw->blk[blk].masks.masks[i].in_use) {
/* if mask is in use and it exactly duplicates the
* desired mask and index, then in can be reused
*/
if (hw->blk[blk].masks.masks[i].mask == mask &&
hw->blk[blk].masks.masks[i].idx == idx) {
found_copy = true;
copy_idx = i;
break;
}
} else {
/* save off unused index, but keep searching in case
* there is an exact match later on
*/
if (!found_unused) {
found_unused = true;
unused_idx = i;
}
}
if (found_copy)
i = copy_idx;
else if (found_unused)
i = unused_idx;
else
goto err_ice_alloc_prof_mask;
/* update mask for a new entry */
if (found_unused) {
hw->blk[blk].masks.masks[i].in_use = true;
hw->blk[blk].masks.masks[i].mask = mask;
hw->blk[blk].masks.masks[i].idx = idx;
hw->blk[blk].masks.masks[i].ref = 0;
ice_write_prof_mask_reg(hw, blk, i, idx, mask);
}
hw->blk[blk].masks.masks[i].ref++;
*mask_idx = i;
status = 0;
err_ice_alloc_prof_mask:
mutex_unlock(&hw->blk[blk].masks.lock);
return status;
}
/**
* ice_free_prof_mask - free profile mask
* @hw: pointer to the HW struct
* @blk: hardware block
* @mask_idx: index of mask
*/
static enum ice_status
ice_free_prof_mask(struct ice_hw *hw, enum ice_block blk, u16 mask_idx)
{
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
return ICE_ERR_PARAM;
if (!(mask_idx >= hw->blk[blk].masks.first &&
mask_idx < hw->blk[blk].masks.first + hw->blk[blk].masks.count))
return ICE_ERR_DOES_NOT_EXIST;
mutex_lock(&hw->blk[blk].masks.lock);
if (!hw->blk[blk].masks.masks[mask_idx].in_use)
goto exit_ice_free_prof_mask;
if (hw->blk[blk].masks.masks[mask_idx].ref > 1) {
hw->blk[blk].masks.masks[mask_idx].ref--;
goto exit_ice_free_prof_mask;
}
/* remove mask */
hw->blk[blk].masks.masks[mask_idx].in_use = false;
hw->blk[blk].masks.masks[mask_idx].mask = 0;
hw->blk[blk].masks.masks[mask_idx].idx = 0;
/* update mask as unused entry */
ice_debug(hw, ICE_DBG_PKG, "Free mask, blk %d, mask %d\n", blk,
mask_idx);
ice_write_prof_mask_reg(hw, blk, mask_idx, 0, 0);
exit_ice_free_prof_mask:
mutex_unlock(&hw->blk[blk].masks.lock);
return 0;
}
/**
* ice_free_prof_masks - free all profile masks for a profile
* @hw: pointer to the HW struct
* @blk: hardware block
* @prof_id: profile ID
*/
static enum ice_status
ice_free_prof_masks(struct ice_hw *hw, enum ice_block blk, u16 prof_id)
{
u32 mask_bm;
u16 i;
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
return ICE_ERR_PARAM;
mask_bm = hw->blk[blk].es.mask_ena[prof_id];
for (i = 0; i < BITS_PER_BYTE * sizeof(mask_bm); i++)
if (mask_bm & BIT(i))
ice_free_prof_mask(hw, blk, i);
return 0;
}
/**
* ice_shutdown_prof_masks - releases lock for masking
* @hw: pointer to the HW struct
* @blk: hardware block
*
* This should be called before unloading the driver
*/
static void ice_shutdown_prof_masks(struct ice_hw *hw, enum ice_block blk)
{
u16 i;
mutex_lock(&hw->blk[blk].masks.lock);
for (i = hw->blk[blk].masks.first;
i < hw->blk[blk].masks.first + hw->blk[blk].masks.count; i++) {
ice_write_prof_mask_reg(hw, blk, i, 0, 0);
hw->blk[blk].masks.masks[i].in_use = false;
hw->blk[blk].masks.masks[i].idx = 0;
hw->blk[blk].masks.masks[i].mask = 0;
}
mutex_unlock(&hw->blk[blk].masks.lock);
mutex_destroy(&hw->blk[blk].masks.lock);
}
/**
* ice_shutdown_all_prof_masks - releases all locks for masking
* @hw: pointer to the HW struct
*
* This should be called before unloading the driver
*/
static void ice_shutdown_all_prof_masks(struct ice_hw *hw)
{
ice_shutdown_prof_masks(hw, ICE_BLK_RSS);
ice_shutdown_prof_masks(hw, ICE_BLK_FD);
}
/**
* ice_update_prof_masking - set registers according to masking
* @hw: pointer to the HW struct
* @blk: hardware block
* @prof_id: profile ID
* @masks: masks
*/
static enum ice_status
ice_update_prof_masking(struct ice_hw *hw, enum ice_block blk, u16 prof_id,
u16 *masks)
{
bool err = false;
u32 ena_mask = 0;
u16 idx;
u16 i;
/* Only support FD and RSS masking, otherwise nothing to be done */
if (blk != ICE_BLK_RSS && blk != ICE_BLK_FD)
return 0;
for (i = 0; i < hw->blk[blk].es.fvw; i++)
if (masks[i] && masks[i] != 0xFFFF) {
if (!ice_alloc_prof_mask(hw, blk, i, masks[i], &idx)) {
ena_mask |= BIT(idx);
} else {
/* not enough bitmaps */
err = true;
break;
}
}
if (err) {
/* free any bitmaps we have allocated */
for (i = 0; i < BITS_PER_BYTE * sizeof(ena_mask); i++)
if (ena_mask & BIT(i))
ice_free_prof_mask(hw, blk, i);
return ICE_ERR_OUT_OF_RANGE;
}
/* enable the masks for this profile */
ice_write_prof_mask_enable_res(hw, blk, prof_id, ena_mask);
/* store enabled masks with profile so that they can be freed later */
hw->blk[blk].es.mask_ena[prof_id] = ena_mask;
return 0;
}
/**
* ice_write_es - write an extraction sequence to hardware
* @hw: pointer to the HW struct
......@@ -2575,6 +2969,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
if (hw->blk[blk].es.ref_count[prof_id] > 0) {
if (!--hw->blk[blk].es.ref_count[prof_id]) {
ice_write_es(hw, blk, prof_id, NULL);
ice_free_prof_masks(hw, blk, prof_id);
return ice_free_prof_id(hw, blk, prof_id);
}
}
......@@ -2937,6 +3332,7 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.mask_ena);
}
list_for_each_entry_safe(r, rt, &hw->rss_list_head, l_entry) {
......@@ -2944,6 +3340,7 @@ void ice_free_hw_tbls(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), r);
}
mutex_destroy(&hw->rss_locks);
ice_shutdown_all_prof_masks(hw);
memset(hw->blk, 0, sizeof(hw->blk));
}
......@@ -2997,6 +3394,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
memset(es->t, 0, es->count * sizeof(*es->t) * es->fvw);
memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
memset(es->written, 0, es->count * sizeof(*es->written));
memset(es->mask_ena, 0, es->count * sizeof(*es->mask_ena));
}
}
......@@ -3010,6 +3408,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
mutex_init(&hw->rss_locks);
INIT_LIST_HEAD(&hw->rss_list_head);
ice_init_all_prof_masks(hw);
for (i = 0; i < ICE_BLK_COUNT; i++) {
struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
struct ice_prof_tcam *prof = &hw->blk[i].prof;
......@@ -3112,6 +3511,11 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
sizeof(*es->written), GFP_KERNEL);
if (!es->written)
goto err;
es->mask_ena = devm_kcalloc(ice_hw_to_dev(hw), es->count,
sizeof(*es->mask_ena), GFP_KERNEL);
if (!es->mask_ena)
goto err;
}
return 0;
......@@ -3711,22 +4115,79 @@ ice_update_fd_swap(struct ice_hw *hw, u16 prof_id, struct ice_fv_word *es)
return 0;
}
/* The entries here needs to match the order of enum ice_ptype_attrib */
static const struct ice_ptype_attrib_info ice_ptype_attributes[] = {
{ ICE_GTP_PDU_EH, ICE_GTP_PDU_FLAG_MASK },
{ ICE_GTP_SESSION, ICE_GTP_FLAGS_MASK },
{ ICE_GTP_DOWNLINK, ICE_GTP_FLAGS_MASK },
{ ICE_GTP_UPLINK, ICE_GTP_FLAGS_MASK },
};
/**
* ice_get_ptype_attrib_info - get PTYPE attribute information
* @type: attribute type
* @info: pointer to variable to the attribute information
*/
static void
ice_get_ptype_attrib_info(enum ice_ptype_attrib_type type,
struct ice_ptype_attrib_info *info)
{
*info = ice_ptype_attributes[type];
}
/**
* ice_add_prof_attrib - add any PTG with attributes to profile
* @prof: pointer to the profile to which PTG entries will be added
* @ptg: PTG to be added
* @ptype: PTYPE that needs to be looked up
* @attr: array of attributes that will be considered
* @attr_cnt: number of elements in the attribute array
*/
static enum ice_status
ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype,
const struct ice_ptype_attributes *attr, u16 attr_cnt)
{
bool found = false;
u16 i;
for (i = 0; i < attr_cnt; i++)
if (attr[i].ptype == ptype) {
found = true;
prof->ptg[prof->ptg_cnt] = ptg;
ice_get_ptype_attrib_info(attr[i].attrib,
&prof->attr[prof->ptg_cnt]);
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
return ICE_ERR_MAX_LIMIT;
}
if (!found)
return ICE_ERR_DOES_NOT_EXIST;
return 0;
}
/**
* ice_add_prof - add profile
* @hw: pointer to the HW struct
* @blk: hardware block
* @id: profile tracking ID
* @ptypes: array of bitmaps indicating ptypes (ICE_FLOW_PTYPE_MAX bits)
* @attr: array of attributes
* @attr_cnt: number of elements in attr array
* @es: extraction sequence (length of array is determined by the block)
* @masks: mask for extraction sequence
*
* This function registers a profile, which matches a set of PTGs with a
* This function registers a profile, which matches a set of PTYPES with a
* particular extraction sequence. While the hardware profile is allocated
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es)
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks)
{
u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
......@@ -3740,7 +4201,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
mutex_lock(&hw->blk[blk].es.prof_map_lock);
/* search for existing profile */
status = ice_find_prof_id(hw, blk, es, &prof_id);
status = ice_find_prof_id_with_mask(hw, blk, es, masks, &prof_id);
if (status) {
/* allocate profile ID */
status = ice_alloc_prof_id(hw, blk, &prof_id);
......@@ -3758,6 +4219,9 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
if (status)
goto err_ice_add_prof;
}
status = ice_update_prof_masking(hw, blk, prof_id, masks);
if (status)
goto err_ice_add_prof;
/* and write new es */
ice_write_es(hw, blk, prof_id, es);
......@@ -3792,7 +4256,6 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
BITS_PER_BYTE) {
u16 ptype;
u8 ptg;
u8 m;
ptype = byte * BITS_PER_BYTE + bit;
......@@ -3807,15 +4270,25 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
continue;
set_bit(ptg, ptgs_used);
prof->ptg[prof->ptg_cnt] = ptg;
if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
/* Check to see there are any attributes for
* this PTYPE, and add them if found.
*/
status = ice_add_prof_attrib(prof, ptg, ptype,
attr, attr_cnt);
if (status == ICE_ERR_MAX_LIMIT)
break;
if (status) {
/* This is simple a PTYPE/PTG with no
* attribute
*/
prof->ptg[prof->ptg_cnt] = ptg;
prof->attr[prof->ptg_cnt].flags = 0;
prof->attr[prof->ptg_cnt].mask = 0;
/* nothing left in byte, then exit */
m = ~(u8)((1 << (bit + 1)) - 1);
if (!(ptypes[byte] & m))
break;
if (++prof->ptg_cnt >=
ICE_MAX_PTG_PER_PROFILE)
break;
}
}
bytes--;
......@@ -4326,7 +4799,12 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
}
/* for re-enabling, reallocate a TCAM */
status = ice_alloc_tcam_ent(hw, blk, &tcam->tcam_idx);
/* for entries with empty attribute masks, allocate entry from
* the bottom of the TCAM table; otherwise, allocate from the
* top of the table in order to give it higher priority
*/
status = ice_alloc_tcam_ent(hw, blk, tcam->attr.mask == 0,
&tcam->tcam_idx);
if (status)
return status;
......@@ -4336,8 +4814,8 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
return ICE_ERR_NO_MEMORY;
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
tcam->ptg, vsig, 0, 0, vl_msk, dc_msk,
nm_msk);
tcam->ptg, vsig, 0, tcam->attr.flags,
vl_msk, dc_msk, nm_msk);
if (status)
goto err_ice_prof_tcam_ena_dis;
......@@ -4485,7 +4963,12 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
}
/* allocate the TCAM entry index */
status = ice_alloc_tcam_ent(hw, blk, &tcam_idx);
/* for entries with empty attribute masks, allocate entry from
* the bottom of the TCAM table; otherwise, allocate from the
* top of the table in order to give it higher priority
*/
status = ice_alloc_tcam_ent(hw, blk, map->attr[i].mask == 0,
&tcam_idx);
if (status) {
devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
......@@ -4494,6 +4977,7 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
t->tcam[i].ptg = map->ptg[i];
t->tcam[i].prof_id = map->prof_id;
t->tcam[i].tcam_idx = tcam_idx;
t->tcam[i].attr = map->attr[i];
t->tcam[i].in_use = true;
p->type = ICE_TCAM_ADD;
......
......@@ -27,7 +27,8 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
struct ice_fv_word *es);
const struct ice_ptype_attributes *attr, u16 attr_cnt,
struct ice_fv_word *es, u16 *masks);
enum ice_status
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
enum ice_status
......
......@@ -190,6 +190,64 @@ enum ice_sect {
ICE_SECT_COUNT
};
#define ICE_MAC_IPV4_GTPU_IPV4_FRAG 331
#define ICE_MAC_IPV4_GTPU_IPV4_PAY 332
#define ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY 333
#define ICE_MAC_IPV4_GTPU_IPV4_TCP 334
#define ICE_MAC_IPV4_GTPU_IPV4_ICMP 335
#define ICE_MAC_IPV6_GTPU_IPV4_FRAG 336
#define ICE_MAC_IPV6_GTPU_IPV4_PAY 337
#define ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY 338
#define ICE_MAC_IPV6_GTPU_IPV4_TCP 339
#define ICE_MAC_IPV6_GTPU_IPV4_ICMP 340
#define ICE_MAC_IPV4_GTPU_IPV6_FRAG 341
#define ICE_MAC_IPV4_GTPU_IPV6_PAY 342
#define ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY 343
#define ICE_MAC_IPV4_GTPU_IPV6_TCP 344
#define ICE_MAC_IPV4_GTPU_IPV6_ICMPV6 345
#define ICE_MAC_IPV6_GTPU_IPV6_FRAG 346
#define ICE_MAC_IPV6_GTPU_IPV6_PAY 347
#define ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY 348
#define ICE_MAC_IPV6_GTPU_IPV6_TCP 349
#define ICE_MAC_IPV6_GTPU_IPV6_ICMPV6 350
/* Attributes that can modify PTYPE definitions.
*
* These values will represent special attributes for PTYPEs, which will
* resolve into metadata packet flags definitions that can be used in the TCAM
* for identifying a PTYPE with specific characteristics.
*/
enum ice_ptype_attrib_type {
/* GTP PTYPEs */
ICE_PTYPE_ATTR_GTP_PDU_EH,
ICE_PTYPE_ATTR_GTP_SESSION,
ICE_PTYPE_ATTR_GTP_DOWNLINK,
ICE_PTYPE_ATTR_GTP_UPLINK,
};
struct ice_ptype_attrib_info {
u16 flags;
u16 mask;
};
/* TCAM flag definitions */
#define ICE_GTP_PDU BIT(14)
#define ICE_GTP_PDU_LINK BIT(13)
/* GTP attributes */
#define ICE_GTP_PDU_FLAG_MASK (ICE_GTP_PDU)
#define ICE_GTP_PDU_EH ICE_GTP_PDU
#define ICE_GTP_FLAGS_MASK (ICE_GTP_PDU | ICE_GTP_PDU_LINK)
#define ICE_GTP_SESSION 0
#define ICE_GTP_DOWNLINK ICE_GTP_PDU
#define ICE_GTP_UPLINK (ICE_GTP_PDU | ICE_GTP_PDU_LINK)
struct ice_ptype_attributes {
u16 ptype;
enum ice_ptype_attrib_type attrib;
};
/* package labels */
struct ice_label {
__le16 value;
......@@ -335,6 +393,7 @@ struct ice_es {
u16 count;
u16 fvw;
u16 *ref_count;
u32 *mask_ena;
struct list_head prof_map;
struct ice_fv_word *t;
struct mutex prof_map_lock; /* protect access to profiles list */
......@@ -372,12 +431,14 @@ struct ice_prof_map {
u8 prof_id;
u8 ptg_cnt;
u8 ptg[ICE_MAX_PTG_PER_PROFILE];
struct ice_ptype_attrib_info attr[ICE_MAX_PTG_PER_PROFILE];
};
#define ICE_INVALID_TCAM 0xFFFF
struct ice_tcam_inf {
u16 tcam_idx;
struct ice_ptype_attrib_info attr;
u8 ptg;
u8 prof_id;
u8 in_use;
......@@ -478,6 +539,21 @@ struct ice_prof_redir {
u16 count;
};
struct ice_mask {
u16 mask; /* 16-bit mask */
u16 idx; /* index */
u16 ref; /* reference count */
u8 in_use; /* non-zero if used */
};
struct ice_masks {
struct mutex lock; /* lock to protect this structure */
u16 first; /* first mask owned by the PF */
u16 count; /* number of masks owned by the PF */
#define ICE_PROF_MASK_COUNT 32
struct ice_mask masks[ICE_PROF_MASK_COUNT];
};
/* Tables per block */
struct ice_blk_info {
struct ice_xlt1 xlt1;
......@@ -485,6 +561,7 @@ struct ice_blk_info {
struct ice_prof_tcam prof;
struct ice_prof_redir prof_redir;
struct ice_es es;
struct ice_masks masks;
u8 overwrite; /* set to true to allow overwrite of table entries */
u8 is_list_init;
};
......@@ -513,6 +590,7 @@ struct ice_chs_chg {
u16 vsig;
u16 orig_vsig;
u16 tcam_idx;
struct ice_ptype_attrib_info attr;
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
......
......@@ -9,18 +9,50 @@ struct ice_flow_field_info {
enum ice_flow_seg_hdr hdr;
s16 off; /* Offset from start of a protocol header, in bits */
u16 size; /* Size of fields in bits */
u16 mask; /* 16-bit mask for field */
};
#define ICE_FLOW_FLD_INFO(_hdr, _offset_bytes, _size_bytes) { \
.hdr = _hdr, \
.off = (_offset_bytes) * BITS_PER_BYTE, \
.size = (_size_bytes) * BITS_PER_BYTE, \
.mask = 0, \
}
#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
.hdr = _hdr, \
.off = (_offset_bytes) * BITS_PER_BYTE, \
.size = (_size_bytes) * BITS_PER_BYTE, \
.mask = _mask, \
}
/* Table containing properties of supported protocol header fields */
static const
struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
/* Ether */
/* ICE_FLOW_FIELD_IDX_ETH_DA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, ETH_ALEN),
/* ICE_FLOW_FIELD_IDX_ETH_SA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, ETH_ALEN, ETH_ALEN),
/* ICE_FLOW_FIELD_IDX_S_VLAN */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 12, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_C_VLAN */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_VLAN, 14, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_ETH_TYPE */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ETH, 0, sizeof(__be16)),
/* IPv4 / IPv6 */
/* ICE_FLOW_FIELD_IDX_IPV4_DSCP */
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV4, 0, 1, 0x00fc),
/* ICE_FLOW_FIELD_IDX_IPV6_DSCP */
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_IPV6, 0, 1, 0x0ff0),
/* ICE_FLOW_FIELD_IDX_IPV4_TTL */
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0xff00),
/* ICE_FLOW_FIELD_IDX_IPV4_PROT */
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 8, 1, 0x00ff),
/* ICE_FLOW_FIELD_IDX_IPV6_TTL */
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0x00ff),
/* ICE_FLOW_FIELD_IDX_IPV6_PROT */
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_NONE, 6, 1, 0xff00),
/* ICE_FLOW_FIELD_IDX_IPV4_SA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 12, sizeof(struct in_addr)),
/* ICE_FLOW_FIELD_IDX_IPV4_DA */
......@@ -42,21 +74,111 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 0, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
/* ARP */
/* ICE_FLOW_FIELD_IDX_ARP_SIP */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
/* ICE_FLOW_FIELD_IDX_ARP_DIP */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 24, sizeof(struct in_addr)),
/* ICE_FLOW_FIELD_IDX_ARP_SHA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 8, ETH_ALEN),
/* ICE_FLOW_FIELD_IDX_ARP_DHA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 18, ETH_ALEN),
/* ICE_FLOW_FIELD_IDX_ARP_OP */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 6, sizeof(__be16)),
/* ICMP */
/* ICE_FLOW_FIELD_IDX_ICMP_TYPE */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 0, 1),
/* ICE_FLOW_FIELD_IDX_ICMP_CODE */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ICMP, 1, 1),
/* GRE */
/* ICE_FLOW_FIELD_IDX_GRE_KEYID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GRE, 12,
sizeof_field(struct gre_full_hdr, key)),
/* GTP */
/* ICE_FLOW_FIELD_IDX_GTPC_TEID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPC_TEID, 12, sizeof(__be32)),
/* ICE_FLOW_FIELD_IDX_GTPU_IP_TEID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_IP, 12, sizeof(__be32)),
/* ICE_FLOW_FIELD_IDX_GTPU_EH_TEID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_EH, 12, sizeof(__be32)),
/* ICE_FLOW_FIELD_IDX_GTPU_EH_QFI */
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16),
0x3f00),
/* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)),
/* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)),
/* PPPoE */
/* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)),
/* PFCP */
/* ICE_FLOW_FIELD_IDX_PFCP_SEID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PFCP_SESSION, 12, sizeof(__be64)),
/* L2TPv3 */
/* ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV3, 0, sizeof(__be32)),
/* ESP */
/* ICE_FLOW_FIELD_IDX_ESP_SPI */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ESP, 0, sizeof(__be32)),
/* AH */
/* ICE_FLOW_FIELD_IDX_AH_SPI */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)),
/* NAT_T_ESP */
/* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
*
* Packet types for packets with an Outer/First/Single IPv4 header
* Packet types for packets with an Outer/First/Single MAC header
*/
static const u32 ice_ptypes_mac_ofos[] = {
0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
0x0000077E, 0x00000000, 0x00000000, 0x00000000,
0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Innermost/Last MAC VLAN header */
static const u32 ice_ptypes_macvlan_il[] = {
0x00000000, 0xBC000000, 0x000001DF, 0xF0000000,
0x0000077E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv4 header, does NOT
* include IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos[] = {
0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
0x00000000, 0x000FC000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv4 header, includes
* IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos_all[] = {
0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
......@@ -67,7 +189,7 @@ static const u32 ice_ptypes_ipv4_ofos[] = {
static const u32 ice_ptypes_ipv4_il[] = {
0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
0x0000000E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x001FF800, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
......@@ -75,12 +197,28 @@ static const u32 ice_ptypes_ipv4_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv6 header */
/* Packet types for packets with an Outer/First/Single IPv6 header, does NOT
* include IPv6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
0x00000000, 0x03F00000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outer/First/Single IPv6 header, includes
* IPv6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos_all[] = {
0x00000000, 0x00000000, 0x77000000, 0x10002000,
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
......@@ -91,7 +229,7 @@ static const u32 ice_ptypes_ipv6_ofos[] = {
static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
0x00000770, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
......@@ -111,6 +249,18 @@ static const u32 ice_ipv4_ofos_no_l4[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outermost/First ARP header */
static const u32 ice_ptypes_arp_of[] = {
0x00000800, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
static const u32 ice_ipv4_il_no_l4[] = {
0x60000000, 0x18043008, 0x80000002, 0x6010c021,
......@@ -153,7 +303,7 @@ static const u32 ice_ipv6_il_no_l4[] = {
static const u32 ice_ptypes_udp_il[] = {
0x81000000, 0x20204040, 0x04000010, 0x80810102,
0x00000040, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00410000, 0x90842000, 0x00000007,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
......@@ -165,7 +315,7 @@ static const u32 ice_ptypes_udp_il[] = {
static const u32 ice_ptypes_tcp_il[] = {
0x04000000, 0x80810102, 0x10000040, 0x02040408,
0x00000102, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00820000, 0x21084000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
......@@ -177,6 +327,18 @@ static const u32 ice_ptypes_tcp_il[] = {
static const u32 ice_ptypes_sctp_il[] = {
0x08000000, 0x01020204, 0x20000081, 0x04080810,
0x00000204, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x01040000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outermost/First ICMP header */
static const u32 ice_ptypes_icmp_of[] = {
0x10000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
......@@ -185,6 +347,18 @@ static const u32 ice_ptypes_sctp_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Innermost/Last ICMP header */
static const u32 ice_ptypes_icmp_il[] = {
0x00000000, 0x02040408, 0x40000102, 0x08101020,
0x00000408, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x42108000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Outermost/First GRE header */
static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
......@@ -197,6 +371,218 @@ static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with an Innermost/Last MAC header */
static const u32 ice_ptypes_mac_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for GTPC */
static const u32 ice_ptypes_gtpc[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000180, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for GTPC with TEID */
static const u32 ice_ptypes_gtpc_tid[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000060, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for GTPU */
static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_PDU_EH },
{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_PDU_EH },
};
static const struct ice_ptype_attributes ice_attr_gtpu_down[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_DOWNLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_DOWNLINK },
};
static const struct ice_ptype_attributes ice_attr_gtpu_up[] = {
{ ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_UPLINK },
{ ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_UPLINK },
};
static const u32 ice_ptypes_gtpu[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x7FFFFE00, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for PPPoE */
static const u32 ice_ptypes_pppoe[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x03ffe000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with PFCP NODE header */
static const u32 ice_ptypes_pfcp_node[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x80000000, 0x00000002,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with PFCP SESSION header */
static const u32 ice_ptypes_pfcp_session[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000005,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for L2TPv3 */
static const u32 ice_ptypes_l2tpv3[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000300,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for ESP */
static const u32 ice_ptypes_esp[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000003, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for AH */
static const u32 ice_ptypes_ah[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x0000000C, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Packet types for packets with NAT_T ESP header */
static const u32 ice_ptypes_nat_t_esp[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000030, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
static const u32 ice_ptypes_mac_non_ip_ofos[] = {
0x00000846, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00400000, 0x03FFF000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
/* Manage parameters and info. used during the creation of a flow profile */
struct ice_flow_prof_params {
enum ice_block blk;
......@@ -208,12 +594,30 @@ struct ice_flow_prof_params {
* This will give us the direction flags.
*/
struct ice_fv_word es[ICE_MAX_FV_WORDS];
/* attributes can be used to add attributes to a particular PTYPE */
const struct ice_ptype_attributes *attr;
u16 attr_cnt;
u16 mask[ICE_MAX_FV_WORDS];
DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX);
};
#define ICE_FLOW_RSS_HDRS_INNER_MASK \
(ICE_FLOW_SEG_HDR_PPPOE | ICE_FLOW_SEG_HDR_GTPC | \
ICE_FLOW_SEG_HDR_GTPC_TEID | ICE_FLOW_SEG_HDR_GTPU | \
ICE_FLOW_SEG_HDR_PFCP_SESSION | ICE_FLOW_SEG_HDR_L2TPV3 | \
ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_AH | \
ICE_FLOW_SEG_HDR_NAT_T_ESP)
#define ICE_FLOW_SEG_HDRS_L2_MASK \
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_SEG_HDRS_L3_MASK \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_ARP)
#define ICE_FLOW_SEG_HDRS_L4_MASK \
(ICE_FLOW_SEG_HDR_ICMP | ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | \
ICE_FLOW_SEG_HDR_SCTP)
/* mask for L4 protocols that are NOT part of IPv4/6 OTHER PTYPE groups */
#define ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER \
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
/**
......@@ -243,8 +647,11 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
/* Sizes of fixed known protocol headers without header options */
#define ICE_FLOW_PROT_HDR_SZ_MAC 14
#define ICE_FLOW_PROT_HDR_SZ_MAC_VLAN (ICE_FLOW_PROT_HDR_SZ_MAC + 2)
#define ICE_FLOW_PROT_HDR_SZ_IPV4 20
#define ICE_FLOW_PROT_HDR_SZ_IPV6 40
#define ICE_FLOW_PROT_HDR_SZ_ARP 28
#define ICE_FLOW_PROT_HDR_SZ_ICMP 8
#define ICE_FLOW_PROT_HDR_SZ_TCP 20
#define ICE_FLOW_PROT_HDR_SZ_UDP 8
#define ICE_FLOW_PROT_HDR_SZ_SCTP 12
......@@ -256,16 +663,27 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
*/
static u16 ice_flow_calc_seg_sz(struct ice_flow_prof_params *params, u8 seg)
{
u16 sz = ICE_FLOW_PROT_HDR_SZ_MAC;
u16 sz;
/* L2 headers */
sz = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_VLAN) ?
ICE_FLOW_PROT_HDR_SZ_MAC_VLAN : ICE_FLOW_PROT_HDR_SZ_MAC;
/* L3 headers */
if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4)
sz += ICE_FLOW_PROT_HDR_SZ_IPV4;
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV6)
sz += ICE_FLOW_PROT_HDR_SZ_IPV6;
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ARP)
sz += ICE_FLOW_PROT_HDR_SZ_ARP;
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)
/* An L3 header is required if L4 is specified */
return 0;
/* L4 headers */
if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_ICMP)
sz += ICE_FLOW_PROT_HDR_SZ_ICMP;
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_TCP)
sz += ICE_FLOW_PROT_HDR_SZ_TCP;
else if (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_UDP)
sz += ICE_FLOW_PROT_HDR_SZ_UDP;
......@@ -298,8 +716,39 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
hdrs = prof->segs[i].hdrs;
if (hdrs & ICE_FLOW_SEG_HDR_ETH) {
src = !i ? (const unsigned long *)ice_ptypes_mac_ofos :
(const unsigned long *)ice_ptypes_mac_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
}
if (i && hdrs & ICE_FLOW_SEG_HDR_VLAN) {
src = (const unsigned long *)ice_ptypes_macvlan_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
}
if (!i && hdrs & ICE_FLOW_SEG_HDR_ARP) {
bitmap_and(params->ptypes, params->ptypes,
(const unsigned long *)ice_ptypes_arp_of,
ICE_FLOW_PTYPE_MAX);
}
if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
(hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
src = i ? (const unsigned long *)ice_ptypes_ipv4_il :
(const unsigned long *)ice_ptypes_ipv4_ofos_all;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
(hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)) {
src = i ? (const unsigned long *)ice_ptypes_ipv6_il :
(const unsigned long *)ice_ptypes_ipv6_ofos_all;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
(const unsigned long *)ice_ipv4_il_no_l4;
bitmap_and(params->ptypes, params->ptypes, src,
......@@ -310,7 +759,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
!(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK_NO_OTHER)) {
src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
(const unsigned long *)ice_ipv6_il_no_l4;
bitmap_and(params->ptypes, params->ptypes, src,
......@@ -322,6 +771,20 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
ICE_FLOW_PTYPE_MAX);
}
if (hdrs & ICE_FLOW_SEG_HDR_ETH_NON_IP) {
src = (const unsigned long *)ice_ptypes_mac_non_ip_ofos;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_PPPOE) {
src = (const unsigned long *)ice_ptypes_pppoe;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else {
src = (const unsigned long *)ice_ptypes_pppoe;
bitmap_andnot(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
}
if (hdrs & ICE_FLOW_SEG_HDR_UDP) {
src = (const unsigned long *)ice_ptypes_udp_il;
bitmap_and(params->ptypes, params->ptypes, src,
......@@ -334,12 +797,89 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
src = (const unsigned long *)ice_ptypes_sctp_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
}
if (hdrs & ICE_FLOW_SEG_HDR_ICMP) {
src = !i ? (const unsigned long *)ice_ptypes_icmp_of :
(const unsigned long *)ice_ptypes_icmp_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GRE) {
if (!i) {
src = (const unsigned long *)ice_ptypes_gre_of;
bitmap_and(params->ptypes, params->ptypes,
src, ICE_FLOW_PTYPE_MAX);
}
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC) {
src = (const unsigned long *)ice_ptypes_gtpc;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPC_TEID) {
src = (const unsigned long *)ice_ptypes_gtpc_tid;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN) {
src = (const unsigned long *)ice_ptypes_gtpu;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
/* Attributes for GTP packet with downlink */
params->attr = ice_attr_gtpu_down;
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down);
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP) {
src = (const unsigned long *)ice_ptypes_gtpu;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
/* Attributes for GTP packet with uplink */
params->attr = ice_attr_gtpu_up;
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up);
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH) {
src = (const unsigned long *)ice_ptypes_gtpu;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
/* Attributes for GTP packet with Extension Header */
params->attr = ice_attr_gtpu_eh;
params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_eh);
} else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
src = (const unsigned long *)ice_ptypes_gtpu;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_L2TPV3) {
src = (const unsigned long *)ice_ptypes_l2tpv3;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_ESP) {
src = (const unsigned long *)ice_ptypes_esp;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_AH) {
src = (const unsigned long *)ice_ptypes_ah;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_NAT_T_ESP) {
src = (const unsigned long *)ice_ptypes_nat_t_esp;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
}
if (hdrs & ICE_FLOW_SEG_HDR_PFCP) {
if (hdrs & ICE_FLOW_SEG_HDR_PFCP_NODE)
src = (const unsigned long *)ice_ptypes_pfcp_node;
else
src = (const unsigned long *)ice_ptypes_pfcp_session;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
} else {
src = (const unsigned long *)ice_ptypes_pfcp_node;
bitmap_andnot(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
src = (const unsigned long *)ice_ptypes_pfcp_session;
bitmap_andnot(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
}
}
......@@ -352,6 +892,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
* @params: information about the flow to be processed
* @seg: packet segment index of the field to be extracted
* @fld: ID of field to be extracted
* @match: bit field of all fields
*
* This function determines the protocol ID, offset, and size of the given
* field. It then allocates one or more extraction sequence entries for the
......@@ -359,17 +900,73 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
*/
static enum ice_status
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld)
u8 seg, enum ice_flow_field fld, u64 match)
{
enum ice_flow_field sib = ICE_FLOW_FIELD_IDX_MAX;
enum ice_prot_id prot_id = ICE_PROT_ID_INVAL;
u8 fv_words = hw->blk[params->blk].es.fvw;
struct ice_flow_fld_info *flds;
u16 cnt, ese_bits, i;
u16 sib_mask = 0;
u16 mask;
u16 off;
flds = params->prof->segs[seg].fields;
switch (fld) {
case ICE_FLOW_FIELD_IDX_ETH_DA:
case ICE_FLOW_FIELD_IDX_ETH_SA:
case ICE_FLOW_FIELD_IDX_S_VLAN:
case ICE_FLOW_FIELD_IDX_C_VLAN:
prot_id = seg == 0 ? ICE_PROT_MAC_OF_OR_S : ICE_PROT_MAC_IL;
break;
case ICE_FLOW_FIELD_IDX_ETH_TYPE:
prot_id = seg == 0 ? ICE_PROT_ETYPE_OL : ICE_PROT_ETYPE_IL;
break;
case ICE_FLOW_FIELD_IDX_IPV4_DSCP:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
break;
case ICE_FLOW_FIELD_IDX_IPV6_DSCP:
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
break;
case ICE_FLOW_FIELD_IDX_IPV4_TTL:
case ICE_FLOW_FIELD_IDX_IPV4_PROT:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
/* TTL and PROT share the same extraction seq. entry.
* Each is considered a sibling to the other in terms of sharing
* the same extraction sequence entry.
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV4_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV4_PROT;
else if (fld == ICE_FLOW_FIELD_IDX_IPV4_PROT)
sib = ICE_FLOW_FIELD_IDX_IPV4_TTL;
/* If the sibling field is also included, that field's
* mask needs to be included.
*/
if (match & BIT(sib))
sib_mask = ice_flds_info[sib].mask;
break;
case ICE_FLOW_FIELD_IDX_IPV6_TTL:
case ICE_FLOW_FIELD_IDX_IPV6_PROT:
prot_id = seg == 0 ? ICE_PROT_IPV6_OF_OR_S : ICE_PROT_IPV6_IL;
/* TTL and PROT share the same extraction seq. entry.
* Each is considered a sibling to the other in terms of sharing
* the same extraction sequence entry.
*/
if (fld == ICE_FLOW_FIELD_IDX_IPV6_TTL)
sib = ICE_FLOW_FIELD_IDX_IPV6_PROT;
else if (fld == ICE_FLOW_FIELD_IDX_IPV6_PROT)
sib = ICE_FLOW_FIELD_IDX_IPV6_TTL;
/* If the sibling field is also included, that field's
* mask needs to be included.
*/
if (match & BIT(sib))
sib_mask = ice_flds_info[sib].mask;
break;
case ICE_FLOW_FIELD_IDX_IPV4_SA:
case ICE_FLOW_FIELD_IDX_IPV4_DA:
prot_id = seg == 0 ? ICE_PROT_IPV4_OF_OR_S : ICE_PROT_IPV4_IL;
......@@ -380,6 +977,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
break;
case ICE_FLOW_FIELD_IDX_TCP_SRC_PORT:
case ICE_FLOW_FIELD_IDX_TCP_DST_PORT:
case ICE_FLOW_FIELD_IDX_TCP_FLAGS:
prot_id = ICE_PROT_TCP_IL;
break;
case ICE_FLOW_FIELD_IDX_UDP_SRC_PORT:
......@@ -390,6 +988,49 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
case ICE_FLOW_FIELD_IDX_SCTP_DST_PORT:
prot_id = ICE_PROT_SCTP_IL;
break;
case ICE_FLOW_FIELD_IDX_GTPC_TEID:
case ICE_FLOW_FIELD_IDX_GTPU_IP_TEID:
case ICE_FLOW_FIELD_IDX_GTPU_UP_TEID:
case ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID:
case ICE_FLOW_FIELD_IDX_GTPU_EH_TEID:
case ICE_FLOW_FIELD_IDX_GTPU_EH_QFI:
/* GTP is accessed through UDP OF protocol */
prot_id = ICE_PROT_UDP_OF;
break;
case ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID:
prot_id = ICE_PROT_PPPOE;
break;
case ICE_FLOW_FIELD_IDX_PFCP_SEID:
prot_id = ICE_PROT_UDP_IL_OR_S;
break;
case ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID:
prot_id = ICE_PROT_L2TPV3;
break;
case ICE_FLOW_FIELD_IDX_ESP_SPI:
prot_id = ICE_PROT_ESP_F;
break;
case ICE_FLOW_FIELD_IDX_AH_SPI:
prot_id = ICE_PROT_ESP_2;
break;
case ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI:
prot_id = ICE_PROT_UDP_IL_OR_S;
break;
case ICE_FLOW_FIELD_IDX_ARP_SIP:
case ICE_FLOW_FIELD_IDX_ARP_DIP:
case ICE_FLOW_FIELD_IDX_ARP_SHA:
case ICE_FLOW_FIELD_IDX_ARP_DHA:
case ICE_FLOW_FIELD_IDX_ARP_OP:
prot_id = ICE_PROT_ARP_OF;
break;
case ICE_FLOW_FIELD_IDX_ICMP_TYPE:
case ICE_FLOW_FIELD_IDX_ICMP_CODE:
/* ICMP type and code share the same extraction seq. entry */
prot_id = (params->prof->segs[seg].hdrs & ICE_FLOW_SEG_HDR_IPV4) ?
ICE_PROT_ICMP_IL : ICE_PROT_ICMPV6_IL;
sib = fld == ICE_FLOW_FIELD_IDX_ICMP_TYPE ?
ICE_FLOW_FIELD_IDX_ICMP_CODE :
ICE_FLOW_FIELD_IDX_ICMP_TYPE;
break;
case ICE_FLOW_FIELD_IDX_GRE_KEYID:
prot_id = ICE_PROT_GRE_OF;
break;
......@@ -407,6 +1048,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
ICE_FLOW_FV_EXTRACT_SZ;
flds[fld].xtrct.disp = (u8)(ice_flds_info[fld].off % ese_bits);
flds[fld].xtrct.idx = params->es_cnt;
flds[fld].xtrct.mask = ice_flds_info[fld].mask;
/* Adjust the next field-entry index after accommodating the number of
* entries this field consumes
......@@ -416,24 +1058,34 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
/* Fill in the extraction sequence entries needed for this field */
off = flds[fld].xtrct.off;
mask = flds[fld].xtrct.mask;
for (i = 0; i < cnt; i++) {
u8 idx;
/* Make sure the number of extraction sequence required
* does not exceed the block's capability
/* Only consume an extraction sequence entry if there is no
* sibling field associated with this field or the sibling entry
* already extracts the word shared with this field.
*/
if (params->es_cnt >= fv_words)
return ICE_ERR_MAX_LIMIT;
if (sib == ICE_FLOW_FIELD_IDX_MAX ||
flds[sib].xtrct.prot_id == ICE_PROT_ID_INVAL ||
flds[sib].xtrct.off != off) {
u8 idx;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
idx = fv_words - params->es_cnt - 1;
else
idx = params->es_cnt;
/* Make sure the number of extraction sequence required
* does not exceed the block's capability
*/
if (params->es_cnt >= fv_words)
return ICE_ERR_MAX_LIMIT;
params->es[idx].prot_id = prot_id;
params->es[idx].off = off;
params->es_cnt++;
/* some blocks require a reversed field vector layout */
if (hw->blk[params->blk].es.reverse)
idx = fv_words - params->es_cnt - 1;
else
idx = params->es_cnt;
params->es[idx].prot_id = prot_id;
params->es[idx].off = off;
params->mask[idx] = mask | sib_mask;
params->es_cnt++;
}
off += ICE_FLOW_FV_EXTRACT_SZ;
}
......@@ -533,14 +1185,15 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
u8 i;
for (i = 0; i < prof->segs_cnt; i++) {
u8 j;
u64 match = params->prof->segs[i].match;
enum ice_flow_field j;
for_each_set_bit(j, (unsigned long *)&prof->segs[i].match,
for_each_set_bit(j, (unsigned long *)&match,
ICE_FLOW_FIELD_IDX_MAX) {
status = ice_flow_xtract_fld(hw, params, i,
(enum ice_flow_field)j);
status = ice_flow_xtract_fld(hw, params, i, j, match);
if (status)
return status;
clear_bit(j, (unsigned long *)&match);
}
/* Process raw matching bytes */
......@@ -751,7 +1404,8 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
/* Add a HW profile for this flow profile */
status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
params->es);
params->attr, params->attr_cnt, params->es,
params->mask);
if (status) {
ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n");
goto out;
......@@ -1158,6 +1812,9 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
seg->raws_cnt++;
}
#define ICE_FLOW_RSS_SEG_HDR_L2_MASKS \
(ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN)
#define ICE_FLOW_RSS_SEG_HDR_L3_MASKS \
(ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6)
......@@ -1165,7 +1822,8 @@ ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len,
(ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_SCTP)
#define ICE_FLOW_RSS_SEG_HDR_VAL_MASKS \
(ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
(ICE_FLOW_RSS_SEG_HDR_L2_MASKS | \
ICE_FLOW_RSS_SEG_HDR_L3_MASKS | \
ICE_FLOW_RSS_SEG_HDR_L4_MASKS)
/**
......@@ -1193,7 +1851,8 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u64 hash_fields,
ICE_FLOW_SET_HDRS(segs, flow_hdr);
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS)
if (segs->hdrs & ~ICE_FLOW_RSS_SEG_HDR_VAL_MASKS &
~ICE_FLOW_RSS_HDRS_INNER_MASK & ~ICE_FLOW_SEG_HDR_IPV_OTHER)
return ICE_ERR_PARAM;
val = (u64)(segs->hdrs & ICE_FLOW_RSS_SEG_HDR_L3_MASKS);
......
......@@ -30,6 +30,80 @@
#define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT)
#define ICE_FLOW_HASH_GTP_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
#define ICE_FLOW_HASH_GTP_IPV4_TEID \
(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
#define ICE_FLOW_HASH_GTP_IPV6_TEID \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
#define ICE_FLOW_HASH_GTP_U_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID))
#define ICE_FLOW_HASH_GTP_U_IPV4_TEID \
(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID)
#define ICE_FLOW_HASH_GTP_U_IPV6_TEID \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID)
#define ICE_FLOW_HASH_GTP_U_EH_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID))
#define ICE_FLOW_HASH_GTP_U_EH_QFI \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI))
#define ICE_FLOW_HASH_GTP_U_IPV4_EH \
(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
ICE_FLOW_HASH_GTP_U_EH_QFI)
#define ICE_FLOW_HASH_GTP_U_IPV6_EH \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \
ICE_FLOW_HASH_GTP_U_EH_QFI)
#define ICE_FLOW_HASH_PPPOE_SESS_ID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID))
#define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \
(ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID)
#define ICE_FLOW_HASH_PPPOE_TCP_ID \
(ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
#define ICE_FLOW_HASH_PPPOE_UDP_ID \
(ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID)
#define ICE_FLOW_HASH_PFCP_SEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID))
#define ICE_FLOW_HASH_PFCP_IPV4_SEID \
(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID)
#define ICE_FLOW_HASH_PFCP_IPV6_SEID \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID)
#define ICE_FLOW_HASH_L2TPV3_SESS_ID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID))
#define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \
(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
#define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID)
#define ICE_FLOW_HASH_ESP_SPI \
(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI))
#define ICE_FLOW_HASH_ESP_IPV4_SPI \
(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI)
#define ICE_FLOW_HASH_ESP_IPV6_SPI \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI)
#define ICE_FLOW_HASH_AH_SPI \
(BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI))
#define ICE_FLOW_HASH_AH_IPV4_SPI \
(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI)
#define ICE_FLOW_HASH_AH_IPV6_SPI \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI)
#define ICE_FLOW_HASH_NAT_T_ESP_SPI \
(BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI))
#define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \
(ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
/* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
......@@ -38,16 +112,66 @@
*/
enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_NONE = 0x00000000,
ICE_FLOW_SEG_HDR_ETH = 0x00000001,
ICE_FLOW_SEG_HDR_VLAN = 0x00000002,
ICE_FLOW_SEG_HDR_IPV4 = 0x00000004,
ICE_FLOW_SEG_HDR_IPV6 = 0x00000008,
ICE_FLOW_SEG_HDR_ARP = 0x00000010,
ICE_FLOW_SEG_HDR_ICMP = 0x00000020,
ICE_FLOW_SEG_HDR_TCP = 0x00000040,
ICE_FLOW_SEG_HDR_UDP = 0x00000080,
ICE_FLOW_SEG_HDR_SCTP = 0x00000100,
ICE_FLOW_SEG_HDR_GRE = 0x00000200,
ICE_FLOW_SEG_HDR_GTPC = 0x00000400,
ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800,
ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000,
ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000,
ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000,
ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000,
ICE_FLOW_SEG_HDR_PPPOE = 0x00010000,
ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000,
ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000,
ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000,
ICE_FLOW_SEG_HDR_ESP = 0x00100000,
ICE_FLOW_SEG_HDR_AH = 0x00200000,
ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
/* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
* ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
*/
ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
};
/* These segments all have the same PTYPES, but are otherwise distinguished by
* the value of the gtp_eh_pdu and gtp_eh_pdu_link flags:
*
* gtp_eh_pdu gtp_eh_pdu_link
* ICE_FLOW_SEG_HDR_GTPU_IP 0 0
* ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care
* ICE_FLOW_SEG_HDR_GTPU_DWN 1 0
* ICE_FLOW_SEG_HDR_GTPU_UP 1 1
*/
#define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \
ICE_FLOW_SEG_HDR_GTPU_EH | \
ICE_FLOW_SEG_HDR_GTPU_DWN | \
ICE_FLOW_SEG_HDR_GTPU_UP)
#define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \
ICE_FLOW_SEG_HDR_PFCP_SESSION)
enum ice_flow_field {
/* L2 */
ICE_FLOW_FIELD_IDX_ETH_DA,
ICE_FLOW_FIELD_IDX_ETH_SA,
ICE_FLOW_FIELD_IDX_S_VLAN,
ICE_FLOW_FIELD_IDX_C_VLAN,
ICE_FLOW_FIELD_IDX_ETH_TYPE,
/* L3 */
ICE_FLOW_FIELD_IDX_IPV4_DSCP,
ICE_FLOW_FIELD_IDX_IPV6_DSCP,
ICE_FLOW_FIELD_IDX_IPV4_TTL,
ICE_FLOW_FIELD_IDX_IPV4_PROT,
ICE_FLOW_FIELD_IDX_IPV6_TTL,
ICE_FLOW_FIELD_IDX_IPV6_PROT,
ICE_FLOW_FIELD_IDX_IPV4_SA,
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
......@@ -59,9 +183,42 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_UDP_DST_PORT,
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
ICE_FLOW_FIELD_IDX_TCP_FLAGS,
/* ARP */
ICE_FLOW_FIELD_IDX_ARP_SIP,
ICE_FLOW_FIELD_IDX_ARP_DIP,
ICE_FLOW_FIELD_IDX_ARP_SHA,
ICE_FLOW_FIELD_IDX_ARP_DHA,
ICE_FLOW_FIELD_IDX_ARP_OP,
/* ICMP */
ICE_FLOW_FIELD_IDX_ICMP_TYPE,
ICE_FLOW_FIELD_IDX_ICMP_CODE,
/* GRE */
ICE_FLOW_FIELD_IDX_GRE_KEYID,
/* The total number of enums must not exceed 64 */
/* GTPC_TEID */
ICE_FLOW_FIELD_IDX_GTPC_TEID,
/* GTPU_IP */
ICE_FLOW_FIELD_IDX_GTPU_IP_TEID,
/* GTPU_EH */
ICE_FLOW_FIELD_IDX_GTPU_EH_TEID,
ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
/* GTPU_UP */
ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
/* GTPU_DWN */
ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
/* PPPoE */
ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
/* PFCP */
ICE_FLOW_FIELD_IDX_PFCP_SEID,
/* L2TPv3 */
ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
/* ESP */
ICE_FLOW_FIELD_IDX_ESP_SPI,
/* AH */
ICE_FLOW_FIELD_IDX_AH_SPI,
/* NAT_T ESP */
ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
......@@ -138,6 +295,7 @@ struct ice_flow_seg_xtrct {
u16 off; /* Starting offset of the field in header in bytes */
u8 idx; /* Index of FV entry used */
u8 disp; /* Displacement of field in bits fr. FV entry's start */
u16 mask; /* Mask for field */
};
enum ice_flow_fld_match_type {
......
......@@ -306,8 +306,23 @@
#define GLQF_FD_SIZE_FD_BSIZE_S 16
#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16)
#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512))
#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4))
#define GLQF_FDMASK_MAX_INDEX 31
#define GLQF_FDMASK_MSK_INDEX_S 0
#define GLQF_FDMASK_MSK_INDEX_M ICE_M(0x1F, 0)
#define GLQF_FDMASK_MASK_S 16
#define GLQF_FDMASK_MASK_M ICE_M(0xFFFF, 16)
#define GLQF_FDMASK_SEL(_i) (0x00410400 + ((_i) * 4))
#define GLQF_FDSWAP(_i, _j) (0x00413000 + ((_i) * 4 + (_j) * 512))
#define GLQF_HMASK(_i) (0x0040FC00 + ((_i) * 4))
#define GLQF_HMASK_MAX_INDEX 31
#define GLQF_HMASK_MSK_INDEX_S 0
#define GLQF_HMASK_MSK_INDEX_M ICE_M(0x1F, 0)
#define GLQF_HMASK_MASK_S 16
#define GLQF_HMASK_MASK_M ICE_M(0xFFFF, 16)
#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
#define GLQF_HMASK_SEL_MAX_INDEX 127
#define GLQF_HMASK_SEL_MASK_SEL_S 0
#define PFQF_FD_ENA 0x0043A000
#define PFQF_FD_ENA_FD_ENA_M BIT(0)
#define PFQF_FD_SIZE 0x00460100
......@@ -369,6 +384,9 @@
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
#define VSIQF_FD_CNT_FD_BCNT_S 16
#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16)
#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4))
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define PFPM_APM 0x000B8080
......
......@@ -55,6 +55,7 @@ struct ice_fltr_desc {
#define ICE_FXD_FLTR_QW0_COMP_REPORT_M \
(0x3ULL << ICE_FXD_FLTR_QW0_COMP_REPORT_S)
#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL 0x1ULL
#define ICE_FXD_FLTR_QW0_COMP_REPORT_SW 0x2ULL
#define ICE_FXD_FLTR_QW0_FD_SPACE_S 14
#define ICE_FXD_FLTR_QW0_FD_SPACE_M (0x3ULL << ICE_FXD_FLTR_QW0_FD_SPACE_S)
......@@ -128,6 +129,7 @@ struct ice_fltr_desc {
#define ICE_FXD_FLTR_QW1_FDID_PRI_S 25
#define ICE_FXD_FLTR_QW1_FDID_PRI_M (0x7ULL << ICE_FXD_FLTR_QW1_FDID_PRI_S)
#define ICE_FXD_FLTR_QW1_FDID_PRI_ONE 0x1ULL
#define ICE_FXD_FLTR_QW1_FDID_PRI_THREE 0x3ULL
#define ICE_FXD_FLTR_QW1_FDID_MDID_S 28
#define ICE_FXD_FLTR_QW1_FDID_MDID_M (0xFULL << ICE_FXD_FLTR_QW1_FDID_MDID_S)
......@@ -138,6 +140,26 @@ struct ice_fltr_desc {
(0xFFFFFFFFULL << ICE_FXD_FLTR_QW1_FDID_S)
#define ICE_FXD_FLTR_QW1_FDID_ZERO 0x0ULL
/* definition for FD filter programming status descriptor WB format */
#define ICE_FXD_FLTR_WB_QW1_DD_S 0
#define ICE_FXD_FLTR_WB_QW1_DD_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_DD_S)
#define ICE_FXD_FLTR_WB_QW1_DD_YES 0x1ULL
#define ICE_FXD_FLTR_WB_QW1_PROG_ID_S 1
#define ICE_FXD_FLTR_WB_QW1_PROG_ID_M \
(0x3ULL << ICE_FXD_FLTR_WB_QW1_PROG_ID_S)
#define ICE_FXD_FLTR_WB_QW1_PROG_ADD 0x0ULL
#define ICE_FXD_FLTR_WB_QW1_PROG_DEL 0x1ULL
#define ICE_FXD_FLTR_WB_QW1_FAIL_S 4
#define ICE_FXD_FLTR_WB_QW1_FAIL_M (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_S)
#define ICE_FXD_FLTR_WB_QW1_FAIL_YES 0x1ULL
#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S 5
#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M \
(0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S)
#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL
struct ice_rx_ptype_decoded {
u32 ptype:10;
u32 known:1;
......
......@@ -343,6 +343,9 @@ static int ice_vsi_clear(struct ice_vsi *vsi)
pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
pf->next_vsi = vsi->idx;
if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL &&
vsi->vf_id != ICE_INVAL_VFID)
pf->next_vsi = vsi->idx;
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
......@@ -454,8 +457,8 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
goto unlock_pf;
}
if (vsi->type == ICE_VSI_CTRL) {
/* Use the last VSI slot as the index for the control VSI */
if (vsi->type == ICE_VSI_CTRL && vf_id == ICE_INVAL_VFID) {
/* Use the last VSI slot as the index for PF control VSI */
vsi->idx = pf->num_alloc_vsi - 1;
pf->ctrl_vsi_idx = vsi->idx;
pf->vsi[vsi->idx] = vsi;
......@@ -468,6 +471,9 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
pf->next_vsi);
}
if (vsi->type == ICE_VSI_CTRL && vf_id != ICE_INVAL_VFID)
pf->vf[vf_id].ctrl_vsi_idx = vsi->idx;
goto unlock_pf;
err_rings:
......@@ -506,7 +512,7 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
if (!b_val)
return -EPERM;
if (vsi->type != ICE_VSI_PF)
if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF))
return -EPERM;
if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
......@@ -517,6 +523,13 @@ static int ice_alloc_fd_res(struct ice_vsi *vsi)
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
if (vsi->type == ICE_VSI_VF) {
vsi->num_gfltr = 0;
/* each VSI gets same "best_effort" quota */
vsi->num_bfltr = b_val;
}
return 0;
}
......@@ -856,7 +869,8 @@ static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
u8 dflt_q_group, dflt_q_prio;
u16 dflt_q, report_q, val;
if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL)
if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
vsi->type != ICE_VSI_VF)
return;
val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
......@@ -1179,7 +1193,24 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors = vsi->num_q_vectors;
/* reserve slots from OS requested IRQs */
base = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->idx);
if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
struct ice_vf *vf;
int i;
ice_for_each_vf(pf, i) {
vf = &pf->vf[i];
if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI) {
base = pf->vsi[vf->ctrl_vsi_idx]->base_vector;
break;
}
}
if (i == pf->num_alloc_vfs)
base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
ICE_RES_VF_CTRL_VEC_ID);
} else {
base = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
}
if (base < 0) {
dev_err(dev, "%d MSI-X interrupts available. %s %d failed to get %d MSI-X vectors\n",
......@@ -2308,7 +2339,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
if (vsi_type == ICE_VSI_VF)
if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
else
vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
......@@ -2323,7 +2354,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (vsi->type == ICE_VSI_PF)
vsi->ethtype = ETH_P_PAUSE;
if (vsi->type == ICE_VSI_VF)
if (vsi->type == ICE_VSI_VF || vsi->type == ICE_VSI_CTRL)
vsi->vf_id = vf_id;
ice_alloc_fd_res(vsi);
......@@ -2770,7 +2801,24 @@ int ice_vsi_release(struct ice_vsi *vsi)
* many interrupts each VF needs. SR-IOV MSIX resources are also
* cleared in the same manner.
*/
if (vsi->type != ICE_VSI_VF) {
if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID) {
struct ice_vf *vf;
int i;
ice_for_each_vf(pf, i) {
vf = &pf->vf[i];
if (i != vsi->vf_id && vf->ctrl_vsi_idx != ICE_NO_VSI)
break;
}
if (i == pf->num_alloc_vfs) {
/* No other VFs left that have control VSI, reclaim SW
* interrupts back to the common pool
*/
ice_free_res(pf->irq_tracker, vsi->base_vector,
ICE_RES_VF_CTRL_VEC_ID);
pf->num_avail_sw_msix += vsi->num_q_vectors;
}
} else if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
......
......@@ -2071,6 +2071,7 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
ice_sync_arfs_fltrs(pf);
ice_flush_fdir_ctx(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
ice_service_task_complete(pf);
......@@ -2082,6 +2083,7 @@ static void ice_service_task(struct work_struct *work)
test_bit(__ICE_MDD_EVENT_PENDING, pf->state) ||
test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
test_bit(__ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
test_bit(__ICE_FD_VF_FLUSH_CTX, pf->state) ||
test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
......@@ -2220,8 +2222,13 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
q_vector->name, q_vector);
if (vsi->type == ICE_VSI_CTRL && vsi->vf_id != ICE_INVAL_VFID)
err = devm_request_irq(dev, irq_num, vsi->irq_handler,
IRQF_SHARED, q_vector->name,
q_vector);
else
err = devm_request_irq(dev, irq_num, vsi->irq_handler,
0, q_vector->name, q_vector);
if (err) {
netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
err);
......
......@@ -13,6 +13,9 @@
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
ICE_PROT_MAC_OF_OR_S = 1,
ICE_PROT_MAC_IL = 4,
ICE_PROT_ETYPE_OL = 9,
ICE_PROT_ETYPE_IL = 10,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
ICE_PROT_IPV6_OF_OR_S = 40,
......@@ -21,7 +24,14 @@ enum ice_prot_id {
ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
ICE_PROT_ESP_F = 88,
ICE_PROT_ESP_2 = 89,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_ICMP_IL = 98,
ICE_PROT_ICMPV6_IL = 100,
ICE_PROT_PPPOE = 103,
ICE_PROT_L2TPV3 = 104,
ICE_PROT_ARP_OF = 118,
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
......
......@@ -1115,6 +1115,11 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
dma_rmb();
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
struct ice_vsi *ctrl_vsi = rx_ring->vsi;
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
ctrl_vsi->vf_id != ICE_INVAL_VFID)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
ice_put_rx_buf(rx_ring, NULL, 0);
cleaned_count++;
continue;
......
......@@ -192,6 +192,24 @@ enum ice_fltr_ptype {
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP,
ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP,
ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER,
ICE_FLTR_PTYPE_NONF_IPV6_GTPU_IPV6_OTHER,
ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3,
ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3,
ICE_FLTR_PTYPE_NONF_IPV4_ESP,
ICE_FLTR_PTYPE_NONF_IPV6_ESP,
ICE_FLTR_PTYPE_NONF_IPV4_AH,
ICE_FLTR_PTYPE_NONF_IPV6_AH,
ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP,
ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP,
ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE,
ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION,
ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE,
ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION,
ICE_FLTR_PTYPE_NON_IP_L2,
ICE_FLTR_PTYPE_FRAG_IPV4,
ICE_FLTR_PTYPE_NONF_IPV6_UDP,
ICE_FLTR_PTYPE_NONF_IPV6_TCP,
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_flow.h"
#define to_fltr_conf_from_desc(p) \
container_of(p, struct virtchnl_fdir_fltr_conf, input)
#define ICE_FLOW_PROF_TYPE_S 0
#define ICE_FLOW_PROF_TYPE_M (0xFFFFFFFFULL << ICE_FLOW_PROF_TYPE_S)
#define ICE_FLOW_PROF_VSI_S 32
#define ICE_FLOW_PROF_VSI_M (0xFFFFFFFFULL << ICE_FLOW_PROF_VSI_S)
/* Flow profile ID format:
* [0:31] - flow type, flow + tun_offs
* [32:63] - VSI index
*/
#define ICE_FLOW_PROF_FD(vsi, flow, tun_offs) \
((u64)(((((flow) + (tun_offs)) & ICE_FLOW_PROF_TYPE_M)) | \
(((u64)(vsi) << ICE_FLOW_PROF_VSI_S) & ICE_FLOW_PROF_VSI_M)))
#define GTPU_TEID_OFFSET 4
#define GTPU_EH_QFI_OFFSET 1
#define GTPU_EH_QFI_MASK 0x3F
#define PFCP_S_OFFSET 0
#define PFCP_S_MASK 0x1
#define PFCP_PORT_NR 8805
#define FDIR_INSET_FLAG_ESP_S 0
#define FDIR_INSET_FLAG_ESP_M BIT_ULL(FDIR_INSET_FLAG_ESP_S)
#define FDIR_INSET_FLAG_ESP_UDP BIT_ULL(FDIR_INSET_FLAG_ESP_S)
#define FDIR_INSET_FLAG_ESP_IPSEC (0ULL << FDIR_INSET_FLAG_ESP_S)
enum ice_fdir_tunnel_type {
ICE_FDIR_TUNNEL_TYPE_NONE = 0,
ICE_FDIR_TUNNEL_TYPE_GTPU,
ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
};
struct virtchnl_fdir_fltr_conf {
struct ice_fdir_fltr input;
enum ice_fdir_tunnel_type ttype;
u64 inset_flag;
u32 flow_id;
};
static enum virtchnl_proto_hdr_type vc_pattern_ether[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_tcp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_TCP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_udp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_sctp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_SCTP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv6[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv6_tcp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_TCP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv6_udp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv6_sctp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_SCTP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_GTPU_IP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_gtpu_eh[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_GTPU_IP,
VIRTCHNL_PROTO_HDR_GTPU_EH,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_l2tpv3[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_L2TPV3,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv6_l2tpv3[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_L2TPV3,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_esp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_ESP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv6_esp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_ESP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_ah[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_AH,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv6_ah[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_AH,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_nat_t_esp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_ESP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv6_nat_t_esp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_ESP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv4_pfcp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_PFCP,
VIRTCHNL_PROTO_HDR_NONE,
};
static enum virtchnl_proto_hdr_type vc_pattern_ipv6_pfcp[] = {
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_PFCP,
VIRTCHNL_PROTO_HDR_NONE,
};
struct virtchnl_fdir_pattern_match_item {
enum virtchnl_proto_hdr_type *list;
u64 input_set;
u64 *meta;
};
static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_os[] = {
{vc_pattern_ipv4, 0, NULL},
{vc_pattern_ipv4_tcp, 0, NULL},
{vc_pattern_ipv4_udp, 0, NULL},
{vc_pattern_ipv4_sctp, 0, NULL},
{vc_pattern_ipv6, 0, NULL},
{vc_pattern_ipv6_tcp, 0, NULL},
{vc_pattern_ipv6_udp, 0, NULL},
{vc_pattern_ipv6_sctp, 0, NULL},
};
static const struct virtchnl_fdir_pattern_match_item vc_fdir_pattern_comms[] = {
{vc_pattern_ipv4, 0, NULL},
{vc_pattern_ipv4_tcp, 0, NULL},
{vc_pattern_ipv4_udp, 0, NULL},
{vc_pattern_ipv4_sctp, 0, NULL},
{vc_pattern_ipv6, 0, NULL},
{vc_pattern_ipv6_tcp, 0, NULL},
{vc_pattern_ipv6_udp, 0, NULL},
{vc_pattern_ipv6_sctp, 0, NULL},
{vc_pattern_ether, 0, NULL},
{vc_pattern_ipv4_gtpu, 0, NULL},
{vc_pattern_ipv4_gtpu_eh, 0, NULL},
{vc_pattern_ipv4_l2tpv3, 0, NULL},
{vc_pattern_ipv6_l2tpv3, 0, NULL},
{vc_pattern_ipv4_esp, 0, NULL},
{vc_pattern_ipv6_esp, 0, NULL},
{vc_pattern_ipv4_ah, 0, NULL},
{vc_pattern_ipv6_ah, 0, NULL},
{vc_pattern_ipv4_nat_t_esp, 0, NULL},
{vc_pattern_ipv6_nat_t_esp, 0, NULL},
{vc_pattern_ipv4_pfcp, 0, NULL},
{vc_pattern_ipv6_pfcp, 0, NULL},
};
struct virtchnl_fdir_inset_map {
enum virtchnl_proto_hdr_field field;
enum ice_flow_field fld;
u64 flag;
u64 mask;
};
static const struct virtchnl_fdir_inset_map fdir_inset_map[] = {
{VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV4_DSCP, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV4_PROT, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL, 0, 0},
{VIRTCHNL_PROTO_HDR_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV6_PROT, 0, 0},
{VIRTCHNL_PROTO_HDR_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, 0, 0},
{VIRTCHNL_PROTO_HDR_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
{VIRTCHNL_PROTO_HDR_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, 0, 0},
{VIRTCHNL_PROTO_HDR_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, 0, 0},
{VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, 0, 0},
{VIRTCHNL_PROTO_HDR_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, 0, 0},
{VIRTCHNL_PROTO_HDR_GTPU_IP_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, 0, 0},
{VIRTCHNL_PROTO_HDR_GTPU_EH_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, 0, 0},
{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI,
FDIR_INSET_FLAG_ESP_IPSEC, FDIR_INSET_FLAG_ESP_M},
{VIRTCHNL_PROTO_HDR_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
FDIR_INSET_FLAG_ESP_UDP, FDIR_INSET_FLAG_ESP_M},
{VIRTCHNL_PROTO_HDR_AH_SPI, ICE_FLOW_FIELD_IDX_AH_SPI, 0, 0},
{VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID, ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, 0, 0},
{VIRTCHNL_PROTO_HDR_PFCP_S_FIELD, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, 0, 0},
};
/**
* ice_vc_fdir_param_check
* @vf: pointer to the VF structure
* @vsi_id: VF relative VSI ID
*
* Check for the valid VSI ID, PF's state and VF's state
*
* Return: 0 on success, and -EINVAL on error.
*/
static int
ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
{
struct ice_pf *pf = vf->pf;
if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
return -EINVAL;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
return -EINVAL;
if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF))
return -EINVAL;
if (vsi_id != vf->lan_vsi_num)
return -EINVAL;
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
if (!pf->vsi[vf->lan_vsi_idx])
return -EINVAL;
return 0;
}
/**
* ice_vf_start_ctrl_vsi
* @vf: pointer to the VF structure
*
* Allocate ctrl_vsi for the first time and open the ctrl_vsi port for VF
*
* Return: 0 on success, and other on error.
*/
static int ice_vf_start_ctrl_vsi(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *ctrl_vsi;
struct device *dev;
int err;
dev = ice_pf_to_dev(pf);
if (vf->ctrl_vsi_idx != ICE_NO_VSI)
return -EEXIST;
ctrl_vsi = ice_vf_ctrl_vsi_setup(vf);
if (!ctrl_vsi) {
dev_dbg(dev, "Could not setup control VSI for VF %d\n",
vf->vf_id);
return -ENOMEM;
}
err = ice_vsi_open_ctrl(ctrl_vsi);
if (err) {
dev_dbg(dev, "Could not open control VSI for VF %d\n",
vf->vf_id);
goto err_vsi_open;
}
return 0;
err_vsi_open:
ice_vsi_release(ctrl_vsi);
if (vf->ctrl_vsi_idx != ICE_NO_VSI) {
pf->vsi[vf->ctrl_vsi_idx] = NULL;
vf->ctrl_vsi_idx = ICE_NO_VSI;
}
return err;
}
/**
* ice_vc_fdir_alloc_prof - allocate profile for this filter flow type
* @vf: pointer to the VF structure
* @flow: filter flow type
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_alloc_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
{
struct ice_vf_fdir *fdir = &vf->fdir;
if (!fdir->fdir_prof) {
fdir->fdir_prof = devm_kcalloc(ice_pf_to_dev(vf->pf),
ICE_FLTR_PTYPE_MAX,
sizeof(*fdir->fdir_prof),
GFP_KERNEL);
if (!fdir->fdir_prof)
return -ENOMEM;
}
if (!fdir->fdir_prof[flow]) {
fdir->fdir_prof[flow] = devm_kzalloc(ice_pf_to_dev(vf->pf),
sizeof(**fdir->fdir_prof),
GFP_KERNEL);
if (!fdir->fdir_prof[flow])
return -ENOMEM;
}
return 0;
}
/**
* ice_vc_fdir_free_prof - free profile for this filter flow type
* @vf: pointer to the VF structure
* @flow: filter flow type
*/
static void
ice_vc_fdir_free_prof(struct ice_vf *vf, enum ice_fltr_ptype flow)
{
struct ice_vf_fdir *fdir = &vf->fdir;
if (!fdir->fdir_prof)
return;
if (!fdir->fdir_prof[flow])
return;
devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof[flow]);
fdir->fdir_prof[flow] = NULL;
}
/**
* ice_vc_fdir_free_prof_all - free all the profile for this VF
* @vf: pointer to the VF structure
*/
static void ice_vc_fdir_free_prof_all(struct ice_vf *vf)
{
struct ice_vf_fdir *fdir = &vf->fdir;
enum ice_fltr_ptype flow;
if (!fdir->fdir_prof)
return;
for (flow = ICE_FLTR_PTYPE_NONF_NONE; flow < ICE_FLTR_PTYPE_MAX; flow++)
ice_vc_fdir_free_prof(vf, flow);
devm_kfree(ice_pf_to_dev(vf->pf), fdir->fdir_prof);
fdir->fdir_prof = NULL;
}
/**
* ice_vc_fdir_parse_flow_fld
* @proto_hdr: virtual channel protocol filter header
* @conf: FDIR configuration for each filter
* @fld: field type array
* @fld_cnt: field counter
*
* Parse the virtual channel filter header and store them into field type array
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_parse_flow_fld(struct virtchnl_proto_hdr *proto_hdr,
struct virtchnl_fdir_fltr_conf *conf,
enum ice_flow_field *fld, int *fld_cnt)
{
struct virtchnl_proto_hdr hdr;
u32 i;
memcpy(&hdr, proto_hdr, sizeof(hdr));
for (i = 0; (i < ARRAY_SIZE(fdir_inset_map)) &&
VIRTCHNL_GET_PROTO_HDR_FIELD(&hdr); i++)
if (VIRTCHNL_TEST_PROTO_HDR(&hdr, fdir_inset_map[i].field)) {
if (fdir_inset_map[i].mask &&
((fdir_inset_map[i].mask & conf->inset_flag) !=
fdir_inset_map[i].flag))
continue;
fld[*fld_cnt] = fdir_inset_map[i].fld;
*fld_cnt += 1;
if (*fld_cnt >= ICE_FLOW_FIELD_IDX_MAX)
return -EINVAL;
VIRTCHNL_DEL_PROTO_HDR_FIELD(&hdr,
fdir_inset_map[i].field);
}
return 0;
}
/**
* ice_vc_fdir_set_flow_fld
* @vf: pointer to the VF structure
* @fltr: virtual channel add cmd buffer
* @conf: FDIR configuration for each filter
* @seg: array of one or more packet segments that describe the flow
*
* Parse the virtual channel add msg buffer's field vector and store them into
* flow's packet segment field
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_set_flow_fld(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_fdir_fltr_conf *conf,
struct ice_flow_seg_info *seg)
{
struct virtchnl_fdir_rule *rule = &fltr->rule_cfg;
enum ice_flow_field fld[ICE_FLOW_FIELD_IDX_MAX];
struct device *dev = ice_pf_to_dev(vf->pf);
struct virtchnl_proto_hdrs *proto;
int fld_cnt = 0;
int i;
proto = &rule->proto_hdrs;
for (i = 0; i < proto->count; i++) {
struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
int ret;
ret = ice_vc_fdir_parse_flow_fld(hdr, conf, fld, &fld_cnt);
if (ret)
return ret;
}
if (fld_cnt == 0) {
dev_dbg(dev, "Empty input set for VF %d\n", vf->vf_id);
return -EINVAL;
}
for (i = 0; i < fld_cnt; i++)
ice_flow_set_fld(seg, fld[i],
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL,
ICE_FLOW_FLD_OFF_INVAL, false);
return 0;
}
/**
* ice_vc_fdir_set_flow_hdr - config the flow's packet segment header
* @vf: pointer to the VF structure
* @conf: FDIR configuration for each filter
* @seg: array of one or more packet segments that describe the flow
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_set_flow_hdr(struct ice_vf *vf,
struct virtchnl_fdir_fltr_conf *conf,
struct ice_flow_seg_info *seg)
{
enum ice_fltr_ptype flow = conf->input.flow_type;
enum ice_fdir_tunnel_type ttype = conf->ttype;
struct device *dev = ice_pf_to_dev(vf->pf);
switch (flow) {
case ICE_FLTR_PTYPE_NON_IP_L2:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_AH:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU) {
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
} else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH) {
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
ICE_FLOW_SEG_HDR_GTPU_IP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
} else {
dev_dbg(dev, "Invalid tunnel type 0x%x for VF %d\n",
flow, vf->vf_id);
return -EINVAL;
}
break;
case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_L2TPV3 |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_AH:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_AH |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_NODE |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_PFCP_SESSION |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER);
break;
default:
dev_dbg(dev, "Invalid flow type 0x%x for VF %d failed\n",
flow, vf->vf_id);
return -EINVAL;
}
return 0;
}
/**
* ice_vc_fdir_rem_prof - remove profile for this filter flow type
* @vf: pointer to the VF structure
* @flow: filter flow type
* @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
*/
static void
ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
{
struct ice_vf_fdir *fdir = &vf->fdir;
struct ice_fd_hw_prof *vf_prof;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vf_vsi;
struct device *dev;
struct ice_hw *hw;
u64 prof_id;
int i;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
if (!fdir->fdir_prof || !fdir->fdir_prof[flow])
return;
vf_prof = fdir->fdir_prof[flow];
vf_vsi = pf->vsi[vf->lan_vsi_idx];
if (!vf_vsi) {
dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
return;
}
if (!fdir->prof_entry_cnt[flow][tun])
return;
prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num,
flow, tun ? ICE_FLTR_PTYPE_MAX : 0);
for (i = 0; i < fdir->prof_entry_cnt[flow][tun]; i++)
if (vf_prof->entry_h[i][tun]) {
u16 vsi_num = ice_get_hw_vsi_num(hw, vf_prof->vsi_h[i]);
ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
ice_flow_rem_entry(hw, ICE_BLK_FD,
vf_prof->entry_h[i][tun]);
vf_prof->entry_h[i][tun] = 0;
}
ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
devm_kfree(dev, vf_prof->fdir_seg[tun]);
vf_prof->fdir_seg[tun] = NULL;
for (i = 0; i < vf_prof->cnt; i++)
vf_prof->vsi_h[i] = 0;
fdir->prof_entry_cnt[flow][tun] = 0;
}
/**
* ice_vc_fdir_rem_prof_all - remove profile for this VF
* @vf: pointer to the VF structure
*/
static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
{
enum ice_fltr_ptype flow;
for (flow = ICE_FLTR_PTYPE_NONF_NONE;
flow < ICE_FLTR_PTYPE_MAX; flow++) {
ice_vc_fdir_rem_prof(vf, flow, 0);
ice_vc_fdir_rem_prof(vf, flow, 1);
}
}
/**
* ice_vc_fdir_write_flow_prof
* @vf: pointer to the VF structure
* @flow: filter flow type
* @seg: array of one or more packet segments that describe the flow
* @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
*
* Write the flow's profile config and packet segment into the hardware
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
struct ice_flow_seg_info *seg, int tun)
{
struct ice_vf_fdir *fdir = &vf->fdir;
struct ice_vsi *vf_vsi, *ctrl_vsi;
struct ice_flow_seg_info *old_seg;
struct ice_flow_prof *prof = NULL;
struct ice_fd_hw_prof *vf_prof;
enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
u64 entry1_h = 0;
u64 entry2_h = 0;
u64 prof_id;
int ret;
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
vf_vsi = pf->vsi[vf->lan_vsi_idx];
if (!vf_vsi)
return -EINVAL;
ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
if (!ctrl_vsi)
return -EINVAL;
vf_prof = fdir->fdir_prof[flow];
old_seg = vf_prof->fdir_seg[tun];
if (old_seg) {
if (!memcmp(old_seg, seg, sizeof(*seg))) {
dev_dbg(dev, "Duplicated profile for VF %d!\n",
vf->vf_id);
return -EEXIST;
}
if (fdir->fdir_fltr_cnt[flow][tun]) {
ret = -EINVAL;
dev_dbg(dev, "Input set conflicts for VF %d\n",
vf->vf_id);
goto err_exit;
}
/* remove previously allocated profile */
ice_vc_fdir_rem_prof(vf, flow, tun);
}
prof_id = ICE_FLOW_PROF_FD(vf_vsi->vsi_num, flow,
tun ? ICE_FLTR_PTYPE_MAX : 0);
status = ice_flow_add_prof(hw, ICE_BLK_FD, ICE_FLOW_RX, prof_id, seg,
tun + 1, &prof);
ret = ice_status_to_errno(status);
if (ret) {
dev_dbg(dev, "Could not add VSI flow 0x%x for VF %d\n",
flow, vf->vf_id);
goto err_exit;
}
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
vf_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry1_h);
ret = ice_status_to_errno(status);
if (ret) {
dev_dbg(dev, "Could not add flow 0x%x VSI entry for VF %d\n",
flow, vf->vf_id);
goto err_prof;
}
status = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vf_vsi->idx,
ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
seg, &entry2_h);
ret = ice_status_to_errno(status);
if (ret) {
dev_dbg(dev,
"Could not add flow 0x%x Ctrl VSI entry for VF %d\n",
flow, vf->vf_id);
goto err_entry_1;
}
vf_prof->fdir_seg[tun] = seg;
vf_prof->cnt = 0;
fdir->prof_entry_cnt[flow][tun] = 0;
vf_prof->entry_h[vf_prof->cnt][tun] = entry1_h;
vf_prof->vsi_h[vf_prof->cnt] = vf_vsi->idx;
vf_prof->cnt++;
fdir->prof_entry_cnt[flow][tun]++;
vf_prof->entry_h[vf_prof->cnt][tun] = entry2_h;
vf_prof->vsi_h[vf_prof->cnt] = ctrl_vsi->idx;
vf_prof->cnt++;
fdir->prof_entry_cnt[flow][tun]++;
return 0;
err_entry_1:
ice_rem_prof_id_flow(hw, ICE_BLK_FD,
ice_get_hw_vsi_num(hw, vf_vsi->idx), prof_id);
ice_flow_rem_entry(hw, ICE_BLK_FD, entry1_h);
err_prof:
ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
err_exit:
return ret;
}
/**
* ice_vc_fdir_config_input_set
* @vf: pointer to the VF structure
* @fltr: virtual channel add cmd buffer
* @conf: FDIR configuration for each filter
* @tun: 0 implies non-tunnel type filter, 1 implies tunnel type filter
*
* Config the input set type and value for virtual channel add msg buffer
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_fdir_fltr_conf *conf, int tun)
{
struct ice_fdir_fltr *input = &conf->input;
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_flow_seg_info *seg;
enum ice_fltr_ptype flow;
int ret;
flow = input->flow_type;
ret = ice_vc_fdir_alloc_prof(vf, flow);
if (ret) {
dev_dbg(dev, "Alloc flow prof for VF %d failed\n", vf->vf_id);
return ret;
}
seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
if (!seg)
return -ENOMEM;
ret = ice_vc_fdir_set_flow_fld(vf, fltr, conf, seg);
if (ret) {
dev_dbg(dev, "Set flow field for VF %d failed\n", vf->vf_id);
goto err_exit;
}
ret = ice_vc_fdir_set_flow_hdr(vf, conf, seg);
if (ret) {
dev_dbg(dev, "Set flow hdr for VF %d failed\n", vf->vf_id);
goto err_exit;
}
ret = ice_vc_fdir_write_flow_prof(vf, flow, seg, tun);
if (ret == -EEXIST) {
devm_kfree(dev, seg);
} else if (ret) {
dev_dbg(dev, "Write flow profile for VF %d failed\n",
vf->vf_id);
goto err_exit;
}
return 0;
err_exit:
devm_kfree(dev, seg);
return ret;
}
/**
* ice_vc_fdir_match_pattern
* @fltr: virtual channel add cmd buffer
* @type: virtual channel protocol filter header type
*
* Matching the header type by comparing fltr and type's value.
*
* Return: true on success, and false on error.
*/
static bool
ice_vc_fdir_match_pattern(struct virtchnl_fdir_add *fltr,
enum virtchnl_proto_hdr_type *type)
{
struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
int i = 0;
while ((i < proto->count) &&
(*type == proto->proto_hdr[i].type) &&
(*type != VIRTCHNL_PROTO_HDR_NONE)) {
type++;
i++;
}
return ((i == proto->count) && (*type == VIRTCHNL_PROTO_HDR_NONE));
}
/**
* ice_vc_fdir_get_pattern - get while list pattern
* @vf: pointer to the VF info
* @len: filter list length
*
* Return: pointer to allowed filter list
*/
static const struct virtchnl_fdir_pattern_match_item *
ice_vc_fdir_get_pattern(struct ice_vf *vf, int *len)
{
const struct virtchnl_fdir_pattern_match_item *item;
struct ice_pf *pf = vf->pf;
struct ice_hw *hw;
hw = &pf->hw;
if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
sizeof(hw->active_pkg_name))) {
item = vc_fdir_pattern_comms;
*len = ARRAY_SIZE(vc_fdir_pattern_comms);
} else {
item = vc_fdir_pattern_os;
*len = ARRAY_SIZE(vc_fdir_pattern_os);
}
return item;
}
/**
* ice_vc_fdir_search_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
*
* Search for matched pattern from supported pattern list
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_search_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr)
{
const struct virtchnl_fdir_pattern_match_item *pattern;
int len, i;
pattern = ice_vc_fdir_get_pattern(vf, &len);
for (i = 0; i < len; i++)
if (ice_vc_fdir_match_pattern(fltr, pattern[i].list))
return 0;
return -EINVAL;
}
/**
* ice_vc_fdir_parse_pattern
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
* @conf: FDIR configuration for each filter
*
* Parse the virtual channel filter's pattern and store them into conf
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_fdir_fltr_conf *conf)
{
struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs;
enum virtchnl_proto_hdr_type l3 = VIRTCHNL_PROTO_HDR_NONE;
enum virtchnl_proto_hdr_type l4 = VIRTCHNL_PROTO_HDR_NONE;
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_fdir_fltr *input = &conf->input;
int i;
if (proto->count > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
dev_dbg(dev, "Invalid protocol count:0x%x for VF %d\n",
proto->count, vf->vf_id);
return -EINVAL;
}
for (i = 0; i < proto->count; i++) {
struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i];
struct ip_esp_hdr *esph;
struct ip_auth_hdr *ah;
struct sctphdr *sctph;
struct ipv6hdr *ip6h;
struct udphdr *udph;
struct tcphdr *tcph;
struct ethhdr *eth;
struct iphdr *iph;
u8 s_field;
u8 *rawh;
switch (hdr->type) {
case VIRTCHNL_PROTO_HDR_ETH:
eth = (struct ethhdr *)hdr->buffer;
input->flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
if (hdr->field_selector)
input->ext_data.ether_type = eth->h_proto;
break;
case VIRTCHNL_PROTO_HDR_IPV4:
iph = (struct iphdr *)hdr->buffer;
l3 = VIRTCHNL_PROTO_HDR_IPV4;
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
if (hdr->field_selector) {
input->ip.v4.src_ip = iph->saddr;
input->ip.v4.dst_ip = iph->daddr;
input->ip.v4.tos = iph->tos;
input->ip.v4.proto = iph->protocol;
}
break;
case VIRTCHNL_PROTO_HDR_IPV6:
ip6h = (struct ipv6hdr *)hdr->buffer;
l3 = VIRTCHNL_PROTO_HDR_IPV6;
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
if (hdr->field_selector) {
memcpy(input->ip.v6.src_ip,
ip6h->saddr.in6_u.u6_addr8,
sizeof(ip6h->saddr));
memcpy(input->ip.v6.dst_ip,
ip6h->daddr.in6_u.u6_addr8,
sizeof(ip6h->daddr));
input->ip.v6.tc = ((u8)(ip6h->priority) << 4) |
(ip6h->flow_lbl[0] >> 4);
input->ip.v6.proto = ip6h->nexthdr;
}
break;
case VIRTCHNL_PROTO_HDR_TCP:
tcph = (struct tcphdr *)hdr->buffer;
if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
if (hdr->field_selector) {
if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
input->ip.v4.src_port = tcph->source;
input->ip.v4.dst_port = tcph->dest;
} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
input->ip.v6.src_port = tcph->source;
input->ip.v6.dst_port = tcph->dest;
}
}
break;
case VIRTCHNL_PROTO_HDR_UDP:
udph = (struct udphdr *)hdr->buffer;
if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
if (hdr->field_selector) {
if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
input->ip.v4.src_port = udph->source;
input->ip.v4.dst_port = udph->dest;
} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
input->ip.v6.src_port = udph->source;
input->ip.v6.dst_port = udph->dest;
}
}
break;
case VIRTCHNL_PROTO_HDR_SCTP:
sctph = (struct sctphdr *)hdr->buffer;
if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
input->flow_type =
ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
input->flow_type =
ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
if (hdr->field_selector) {
if (l3 == VIRTCHNL_PROTO_HDR_IPV4) {
input->ip.v4.src_port = sctph->source;
input->ip.v4.dst_port = sctph->dest;
} else if (l3 == VIRTCHNL_PROTO_HDR_IPV6) {
input->ip.v6.src_port = sctph->source;
input->ip.v6.dst_port = sctph->dest;
}
}
break;
case VIRTCHNL_PROTO_HDR_L2TPV3:
if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_L2TPV3;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_L2TPV3;
if (hdr->field_selector)
input->l2tpv3_data.session_id = *((__be32 *)hdr->buffer);
break;
case VIRTCHNL_PROTO_HDR_ESP:
esph = (struct ip_esp_hdr *)hdr->buffer;
if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
l4 == VIRTCHNL_PROTO_HDR_UDP)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
l4 == VIRTCHNL_PROTO_HDR_UDP)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 &&
l4 == VIRTCHNL_PROTO_HDR_NONE)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 &&
l4 == VIRTCHNL_PROTO_HDR_NONE)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
if (l4 == VIRTCHNL_PROTO_HDR_UDP)
conf->inset_flag |= FDIR_INSET_FLAG_ESP_UDP;
else
conf->inset_flag |= FDIR_INSET_FLAG_ESP_IPSEC;
if (hdr->field_selector) {
if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
input->ip.v4.sec_parm_idx = esph->spi;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
input->ip.v6.sec_parm_idx = esph->spi;
}
break;
case VIRTCHNL_PROTO_HDR_AH:
ah = (struct ip_auth_hdr *)hdr->buffer;
if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_AH;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_AH;
if (hdr->field_selector) {
if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
input->ip.v4.sec_parm_idx = ah->spi;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
input->ip.v6.sec_parm_idx = ah->spi;
}
break;
case VIRTCHNL_PROTO_HDR_PFCP:
rawh = (u8 *)hdr->buffer;
s_field = (rawh[0] >> PFCP_S_OFFSET) & PFCP_S_MASK;
if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 0)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_NODE;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV4 && s_field == 1)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_PFCP_SESSION;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 0)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_NODE;
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6 && s_field == 1)
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV6_PFCP_SESSION;
if (hdr->field_selector) {
if (l3 == VIRTCHNL_PROTO_HDR_IPV4)
input->ip.v4.dst_port = cpu_to_be16(PFCP_PORT_NR);
else if (l3 == VIRTCHNL_PROTO_HDR_IPV6)
input->ip.v6.dst_port = cpu_to_be16(PFCP_PORT_NR);
}
break;
case VIRTCHNL_PROTO_HDR_GTPU_IP:
rawh = (u8 *)hdr->buffer;
input->flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
if (hdr->field_selector)
input->gtpu_data.teid = *(__be32 *)(&rawh[GTPU_TEID_OFFSET]);
conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU;
break;
case VIRTCHNL_PROTO_HDR_GTPU_EH:
rawh = (u8 *)hdr->buffer;
if (hdr->field_selector)
input->gtpu_data.qfi = rawh[GTPU_EH_QFI_OFFSET] & GTPU_EH_QFI_MASK;
conf->ttype = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
break;
default:
dev_dbg(dev, "Invalid header type 0x:%x for VF %d\n",
hdr->type, vf->vf_id);
return -EINVAL;
}
}
return 0;
}
/**
* ice_vc_fdir_parse_action
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
* @conf: FDIR configuration for each filter
*
* Parse the virtual channel filter's action and store them into conf
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_parse_action(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_fdir_fltr_conf *conf)
{
struct virtchnl_filter_action_set *as = &fltr->rule_cfg.action_set;
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_fdir_fltr *input = &conf->input;
u32 dest_num = 0;
u32 mark_num = 0;
int i;
if (as->count > VIRTCHNL_MAX_NUM_ACTIONS) {
dev_dbg(dev, "Invalid action numbers:0x%x for VF %d\n",
as->count, vf->vf_id);
return -EINVAL;
}
for (i = 0; i < as->count; i++) {
struct virtchnl_filter_action *action = &as->actions[i];
switch (action->type) {
case VIRTCHNL_ACTION_PASSTHRU:
dest_num++;
input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
break;
case VIRTCHNL_ACTION_DROP:
dest_num++;
input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
break;
case VIRTCHNL_ACTION_QUEUE:
dest_num++;
input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
input->q_index = action->act_conf.queue.index;
break;
case VIRTCHNL_ACTION_Q_REGION:
dest_num++;
input->dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
input->q_index = action->act_conf.queue.index;
input->q_region = action->act_conf.queue.region;
break;
case VIRTCHNL_ACTION_MARK:
mark_num++;
input->fltr_id = action->act_conf.mark_id;
input->fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_THREE;
break;
default:
dev_dbg(dev, "Invalid action type:0x%x for VF %d\n",
action->type, vf->vf_id);
return -EINVAL;
}
}
if (dest_num == 0 || dest_num >= 2) {
dev_dbg(dev, "Invalid destination action for VF %d\n",
vf->vf_id);
return -EINVAL;
}
if (mark_num >= 2) {
dev_dbg(dev, "Too many mark actions for VF %d\n", vf->vf_id);
return -EINVAL;
}
return 0;
}
/**
* ice_vc_validate_fdir_fltr - validate the virtual channel filter
* @vf: pointer to the VF info
* @fltr: virtual channel add cmd buffer
* @conf: FDIR configuration for each filter
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
struct virtchnl_fdir_fltr_conf *conf)
{
int ret;
ret = ice_vc_fdir_search_pattern(vf, fltr);
if (ret)
return ret;
ret = ice_vc_fdir_parse_pattern(vf, fltr, conf);
if (ret)
return ret;
return ice_vc_fdir_parse_action(vf, fltr, conf);
}
/**
* ice_vc_fdir_comp_rules - compare if two filter rules have the same value
* @conf_a: FDIR configuration for filter a
* @conf_b: FDIR configuration for filter b
*
* Return: 0 on success, and other on error.
*/
static bool
ice_vc_fdir_comp_rules(struct virtchnl_fdir_fltr_conf *conf_a,
struct virtchnl_fdir_fltr_conf *conf_b)
{
struct ice_fdir_fltr *a = &conf_a->input;
struct ice_fdir_fltr *b = &conf_b->input;
if (conf_a->ttype != conf_b->ttype)
return false;
if (a->flow_type != b->flow_type)
return false;
if (memcmp(&a->ip, &b->ip, sizeof(a->ip)))
return false;
if (memcmp(&a->mask, &b->mask, sizeof(a->mask)))
return false;
if (memcmp(&a->gtpu_data, &b->gtpu_data, sizeof(a->gtpu_data)))
return false;
if (memcmp(&a->gtpu_mask, &b->gtpu_mask, sizeof(a->gtpu_mask)))
return false;
if (memcmp(&a->l2tpv3_data, &b->l2tpv3_data, sizeof(a->l2tpv3_data)))
return false;
if (memcmp(&a->l2tpv3_mask, &b->l2tpv3_mask, sizeof(a->l2tpv3_mask)))
return false;
if (memcmp(&a->ext_data, &b->ext_data, sizeof(a->ext_data)))
return false;
if (memcmp(&a->ext_mask, &b->ext_mask, sizeof(a->ext_mask)))
return false;
return true;
}
/**
* ice_vc_fdir_is_dup_fltr
* @vf: pointer to the VF info
* @conf: FDIR configuration for each filter
*
* Check if there is duplicated rule with same conf value
*
* Return: 0 true success, and false on error.
*/
static bool
ice_vc_fdir_is_dup_fltr(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf)
{
struct ice_fdir_fltr *desc;
bool ret;
list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
struct virtchnl_fdir_fltr_conf *node =
to_fltr_conf_from_desc(desc);
ret = ice_vc_fdir_comp_rules(node, conf);
if (ret)
return true;
}
return false;
}
/**
* ice_vc_fdir_insert_entry
* @vf: pointer to the VF info
* @conf: FDIR configuration for each filter
* @id: pointer to ID value allocated by driver
*
* Insert FDIR conf entry into list and allocate ID for this filter
*
* Return: 0 true success, and other on error.
*/
static int
ice_vc_fdir_insert_entry(struct ice_vf *vf,
struct virtchnl_fdir_fltr_conf *conf, u32 *id)
{
struct ice_fdir_fltr *input = &conf->input;
int i;
/* alloc ID corresponding with conf */
i = idr_alloc(&vf->fdir.fdir_rule_idr, conf, 0,
ICE_FDIR_MAX_FLTRS, GFP_KERNEL);
if (i < 0)
return -EINVAL;
*id = i;
list_add(&input->fltr_node, &vf->fdir.fdir_rule_list);
return 0;
}
/**
* ice_vc_fdir_remove_entry - remove FDIR conf entry by ID value
* @vf: pointer to the VF info
* @conf: FDIR configuration for each filter
* @id: filter rule's ID
*/
static void
ice_vc_fdir_remove_entry(struct ice_vf *vf,
struct virtchnl_fdir_fltr_conf *conf, u32 id)
{
struct ice_fdir_fltr *input = &conf->input;
idr_remove(&vf->fdir.fdir_rule_idr, id);
list_del(&input->fltr_node);
}
/**
* ice_vc_fdir_lookup_entry - lookup FDIR conf entry by ID value
* @vf: pointer to the VF info
* @id: filter rule's ID
*
* Return: NULL on error, and other on success.
*/
static struct virtchnl_fdir_fltr_conf *
ice_vc_fdir_lookup_entry(struct ice_vf *vf, u32 id)
{
return idr_find(&vf->fdir.fdir_rule_idr, id);
}
/**
* ice_vc_fdir_flush_entry - remove all FDIR conf entry
* @vf: pointer to the VF info
*/
static void ice_vc_fdir_flush_entry(struct ice_vf *vf)
{
struct virtchnl_fdir_fltr_conf *conf;
struct ice_fdir_fltr *desc, *temp;
list_for_each_entry_safe(desc, temp,
&vf->fdir.fdir_rule_list, fltr_node) {
conf = to_fltr_conf_from_desc(desc);
list_del(&desc->fltr_node);
devm_kfree(ice_pf_to_dev(vf->pf), conf);
}
}
/**
* ice_vc_fdir_write_fltr - write filter rule into hardware
* @vf: pointer to the VF info
* @conf: FDIR configuration for each filter
* @add: true implies add rule, false implies del rules
* @is_tun: false implies non-tunnel type filter, true implies tunnel filter
*
* Return: 0 on success, and other on error.
*/
static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
struct virtchnl_fdir_fltr_conf *conf,
bool add, bool is_tun)
{
struct ice_fdir_fltr *input = &conf->input;
struct ice_vsi *vsi, *ctrl_vsi;
struct ice_fltr_desc desc;
enum ice_status status;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
int ret;
u8 *pkt;
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
return -EINVAL;
}
input->dest_vsi = vsi->idx;
input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx];
if (!ctrl_vsi) {
dev_dbg(dev, "Invalid ctrl_vsi for VF %d\n", vf->vf_id);
return -EINVAL;
}
pkt = devm_kzalloc(dev, ICE_FDIR_MAX_RAW_PKT_SIZE, GFP_KERNEL);
if (!pkt)
return -ENOMEM;
ice_fdir_get_prgm_desc(hw, input, &desc, add);
status = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun);
ret = ice_status_to_errno(status);
if (ret) {
dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n",
vf->vf_id, input->flow_type);
goto err_free_pkt;
}
ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt);
if (ret)
goto err_free_pkt;
return 0;
err_free_pkt:
devm_kfree(dev, pkt);
return ret;
}
/**
* ice_vf_fdir_timer - FDIR program waiting timer interrupt handler
* @t: pointer to timer_list
*/
static void ice_vf_fdir_timer(struct timer_list *t)
{
struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir *fdir;
unsigned long flags;
struct ice_vf *vf;
struct ice_pf *pf;
fdir = container_of(ctx_irq, struct ice_vf_fdir, ctx_irq);
vf = container_of(fdir, struct ice_vf, fdir);
ctx_done = &fdir->ctx_done;
pf = vf->pf;
spin_lock_irqsave(&fdir->ctx_lock, flags);
if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
spin_unlock_irqrestore(&fdir->ctx_lock, flags);
WARN_ON_ONCE(1);
return;
}
ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
ctx_done->conf = ctx_irq->conf;
ctx_done->stat = ICE_FDIR_CTX_TIMEOUT;
ctx_done->v_opcode = ctx_irq->v_opcode;
spin_unlock_irqrestore(&fdir->ctx_lock, flags);
set_bit(__ICE_FD_VF_FLUSH_CTX, pf->state);
ice_service_task_schedule(pf);
}
/**
* ice_vc_fdir_irq_handler - ctrl_vsi Rx queue interrupt handler
* @ctrl_vsi: pointer to a VF's CTRL VSI
* @rx_desc: pointer to FDIR Rx queue descriptor
*/
void
ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
union ice_32b_rx_flex_desc *rx_desc)
{
struct ice_pf *pf = ctrl_vsi->back;
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir_ctx *ctx_irq;
struct ice_vf_fdir *fdir;
unsigned long flags;
struct device *dev;
struct ice_vf *vf;
int ret;
vf = &pf->vf[ctrl_vsi->vf_id];
fdir = &vf->fdir;
ctx_done = &fdir->ctx_done;
ctx_irq = &fdir->ctx_irq;
dev = ice_pf_to_dev(pf);
spin_lock_irqsave(&fdir->ctx_lock, flags);
if (!(ctx_irq->flags & ICE_VF_FDIR_CTX_VALID)) {
spin_unlock_irqrestore(&fdir->ctx_lock, flags);
WARN_ON_ONCE(1);
return;
}
ctx_irq->flags &= ~ICE_VF_FDIR_CTX_VALID;
ctx_done->flags |= ICE_VF_FDIR_CTX_VALID;
ctx_done->conf = ctx_irq->conf;
ctx_done->stat = ICE_FDIR_CTX_IRQ;
ctx_done->v_opcode = ctx_irq->v_opcode;
memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
spin_unlock_irqrestore(&fdir->ctx_lock, flags);
ret = del_timer(&ctx_irq->rx_tmr);
if (!ret)
dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
set_bit(__ICE_FD_VF_FLUSH_CTX, pf->state);
ice_service_task_schedule(pf);
}
/**
* ice_vf_fdir_dump_info - dump FDIR information for diagnosis
* @vf: pointer to the VF info
*/
static void ice_vf_fdir_dump_info(struct ice_vf *vf)
{
struct ice_vsi *vf_vsi;
u32 fd_size, fd_cnt;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
u16 vsi_num;
pf = vf->pf;
hw = &pf->hw;
dev = ice_pf_to_dev(pf);
vf_vsi = pf->vsi[vf->lan_vsi_idx];
vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx);
fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x",
vf->vf_id,
(fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
(fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
(fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
(fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
}
/**
* ice_vf_verify_rx_desc - verify received FDIR programming status descriptor
* @vf: pointer to the VF info
* @ctx: FDIR context info for post processing
* @status: virtchnl FDIR program status
*
* Return: 0 on success, and other on error.
*/
static int
ice_vf_verify_rx_desc(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
enum virtchnl_fdir_prgm_status *status)
{
struct device *dev = ice_pf_to_dev(vf->pf);
u32 stat_err, error, prog_id;
int ret;
stat_err = le16_to_cpu(ctx->rx_desc.wb.status_error0);
if (((stat_err & ICE_FXD_FLTR_WB_QW1_DD_M) >>
ICE_FXD_FLTR_WB_QW1_DD_S) != ICE_FXD_FLTR_WB_QW1_DD_YES) {
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: Desc Done not set\n", vf->vf_id);
ret = -EINVAL;
goto err_exit;
}
prog_id = (stat_err & ICE_FXD_FLTR_WB_QW1_PROG_ID_M) >>
ICE_FXD_FLTR_WB_QW1_PROG_ID_S;
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD &&
ctx->v_opcode != VIRTCHNL_OP_ADD_FDIR_FILTER) {
dev_err(dev, "VF %d: Desc show add, but ctx not",
vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
ret = -EINVAL;
goto err_exit;
}
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_DEL &&
ctx->v_opcode != VIRTCHNL_OP_DEL_FDIR_FILTER) {
dev_err(dev, "VF %d: Desc show del, but ctx not",
vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
ret = -EINVAL;
goto err_exit;
}
error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_M) >>
ICE_FXD_FLTR_WB_QW1_FAIL_S;
if (error == ICE_FXD_FLTR_WB_QW1_FAIL_YES) {
if (prog_id == ICE_FXD_FLTR_WB_QW1_PROG_ADD) {
dev_err(dev, "VF %d, Failed to add FDIR rule due to no space in the table",
vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
} else {
dev_err(dev, "VF %d, Failed to remove FDIR rule, attempt to remove non-existent entry",
vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
}
ret = -EINVAL;
goto err_exit;
}
error = (stat_err & ICE_FXD_FLTR_WB_QW1_FAIL_PROF_M) >>
ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S;
if (error == ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES) {
dev_err(dev, "VF %d: Profile matching error", vf->vf_id);
*status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
ret = -EINVAL;
goto err_exit;
}
*status = VIRTCHNL_FDIR_SUCCESS;
return 0;
err_exit:
ice_vf_fdir_dump_info(vf);
return ret;
}
/**
* ice_vc_add_fdir_fltr_post
* @vf: pointer to the VF structure
* @ctx: FDIR context info for post processing
* @status: virtchnl FDIR program status
* @success: true implies success, false implies failure
*
* Post process for flow director add command. If success, then do post process
* and send back success msg by virtchnl. Otherwise, do context reversion and
* send back failure msg by virtchnl.
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
enum virtchnl_fdir_prgm_status status,
bool success)
{
struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
struct device *dev = ice_pf_to_dev(vf->pf);
enum virtchnl_status_code v_ret;
struct virtchnl_fdir_add *resp;
int ret, len, is_tun;
v_ret = VIRTCHNL_STATUS_SUCCESS;
len = sizeof(*resp);
resp = kzalloc(len, GFP_KERNEL);
if (!resp) {
len = 0;
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
goto err_exit;
}
if (!success)
goto err_exit;
is_tun = 0;
resp->status = status;
resp->flow_id = conf->flow_id;
vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
kfree(resp);
dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
vf->vf_id, conf->flow_id,
(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
"add" : "del");
return ret;
err_exit:
if (resp)
resp->status = status;
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
devm_kfree(dev, conf);
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
kfree(resp);
return ret;
}
/**
* ice_vc_del_fdir_fltr_post
* @vf: pointer to the VF structure
* @ctx: FDIR context info for post processing
* @status: virtchnl FDIR program status
* @success: true implies success, false implies failure
*
* Post process for flow director del command. If success, then do post process
* and send back success msg by virtchnl. Otherwise, do context reversion and
* send back failure msg by virtchnl.
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
enum virtchnl_fdir_prgm_status status,
bool success)
{
struct virtchnl_fdir_fltr_conf *conf = ctx->conf;
struct device *dev = ice_pf_to_dev(vf->pf);
enum virtchnl_status_code v_ret;
struct virtchnl_fdir_del *resp;
int ret, len, is_tun;
v_ret = VIRTCHNL_STATUS_SUCCESS;
len = sizeof(*resp);
resp = kzalloc(len, GFP_KERNEL);
if (!resp) {
len = 0;
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "VF %d: Alloc resp buf fail", vf->vf_id);
goto err_exit;
}
if (!success)
goto err_exit;
is_tun = 0;
resp->status = status;
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
kfree(resp);
dev_dbg(dev, "VF %d: flow_id:0x%X, FDIR %s success!\n",
vf->vf_id, conf->flow_id,
(ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER) ?
"add" : "del");
devm_kfree(dev, conf);
return ret;
err_exit:
if (resp)
resp->status = status;
if (success)
devm_kfree(dev, conf);
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
kfree(resp);
return ret;
}
/**
* ice_flush_fdir_ctx
* @pf: pointer to the PF structure
*
* Flush all the pending event on ctx_done list and process them.
*/
void ice_flush_fdir_ctx(struct ice_pf *pf)
{
int i;
if (!test_and_clear_bit(__ICE_FD_VF_FLUSH_CTX, pf->state))
return;
ice_for_each_vf(pf, i) {
struct device *dev = ice_pf_to_dev(pf);
enum virtchnl_fdir_prgm_status status;
struct ice_vf *vf = &pf->vf[i];
struct ice_vf_fdir_ctx *ctx;
unsigned long flags;
int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
continue;
if (vf->ctrl_vsi_idx == ICE_NO_VSI)
continue;
ctx = &vf->fdir.ctx_done;
spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
if (!(ctx->flags & ICE_VF_FDIR_CTX_VALID)) {
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
continue;
}
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
WARN_ON(ctx->stat == ICE_FDIR_CTX_READY);
if (ctx->stat == ICE_FDIR_CTX_TIMEOUT) {
status = VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT;
dev_err(dev, "VF %d: ctrl_vsi irq timeout\n",
vf->vf_id);
goto err_exit;
}
ret = ice_vf_verify_rx_desc(vf, ctx, &status);
if (ret)
goto err_exit;
if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
ice_vc_add_fdir_fltr_post(vf, ctx, status, true);
else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
ice_vc_del_fdir_fltr_post(vf, ctx, status, true);
else
dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
continue;
err_exit:
if (ctx->v_opcode == VIRTCHNL_OP_ADD_FDIR_FILTER)
ice_vc_add_fdir_fltr_post(vf, ctx, status, false);
else if (ctx->v_opcode == VIRTCHNL_OP_DEL_FDIR_FILTER)
ice_vc_del_fdir_fltr_post(vf, ctx, status, false);
else
dev_err(dev, "VF %d: Unsupported opcode\n", vf->vf_id);
spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
}
}
/**
* ice_vc_fdir_set_irq_ctx - set FDIR context info for later IRQ handler
* @vf: pointer to the VF structure
* @conf: FDIR configuration for each filter
* @v_opcode: virtual channel operation code
*
* Return: 0 on success, and other on error.
*/
static int
ice_vc_fdir_set_irq_ctx(struct ice_vf *vf, struct virtchnl_fdir_fltr_conf *conf,
enum virtchnl_ops v_opcode)
{
struct device *dev = ice_pf_to_dev(vf->pf);
struct ice_vf_fdir_ctx *ctx;
unsigned long flags;
ctx = &vf->fdir.ctx_irq;
spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
if ((vf->fdir.ctx_irq.flags & ICE_VF_FDIR_CTX_VALID) ||
(vf->fdir.ctx_done.flags & ICE_VF_FDIR_CTX_VALID)) {
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
dev_dbg(dev, "VF %d: Last request is still in progress\n",
vf->vf_id);
return -EBUSY;
}
ctx->flags |= ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
ctx->conf = conf;
ctx->v_opcode = v_opcode;
ctx->stat = ICE_FDIR_CTX_READY;
timer_setup(&ctx->rx_tmr, ice_vf_fdir_timer, 0);
mod_timer(&ctx->rx_tmr, round_jiffies(msecs_to_jiffies(10) + jiffies));
return 0;
}
/**
* ice_vc_fdir_clear_irq_ctx - clear FDIR context info for IRQ handler
* @vf: pointer to the VF structure
*
* Return: 0 on success, and other on error.
*/
static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
{
struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
unsigned long flags;
del_timer(&ctx->rx_tmr);
spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
}
/**
* ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
* Return: 0 on success, and other on error.
*/
int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
{
struct virtchnl_fdir_add *fltr = (struct virtchnl_fdir_add *)msg;
struct virtchnl_fdir_add *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
enum virtchnl_status_code v_ret;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
int len = 0;
int ret;
pf = vf->pf;
dev = ice_pf_to_dev(pf);
ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
if (ret) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
goto err_exit;
}
ret = ice_vf_start_ctrl_vsi(vf);
if (ret && (ret != -EEXIST)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Init FDIR for VF %d failed, ret:%d\n",
vf->vf_id, ret);
goto err_exit;
}
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
if (!stat) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
goto err_exit;
}
conf = devm_kzalloc(dev, sizeof(*conf), GFP_KERNEL);
if (!conf) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "Alloc conf for VF %d failed\n", vf->vf_id);
goto err_exit;
}
len = sizeof(*stat);
ret = ice_vc_validate_fdir_fltr(vf, fltr, conf);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID;
dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id);
goto err_free_conf;
}
if (fltr->validate_only) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_SUCCESS;
devm_kfree(dev, conf);
ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER,
v_ret, (u8 *)stat, len);
goto exit;
}
ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT;
dev_err(dev, "VF %d: FDIR input set configure failed, ret:%d\n",
vf->vf_id, ret);
goto err_free_conf;
}
ret = ice_vc_fdir_is_dup_fltr(vf, conf);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_EXIST;
dev_dbg(dev, "VF %d: duplicated FDIR rule detected\n",
vf->vf_id);
goto err_free_conf;
}
ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_dbg(dev, "VF %d: insert FDIR list failed\n", vf->vf_id);
goto err_free_conf;
}
ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_ADD_FDIR_FILTER);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
goto err_free_conf;
}
ret = ice_vc_fdir_write_fltr(vf, conf, true, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
vf->vf_id, ret);
goto err_rem_entry;
}
exit:
kfree(stat);
return ret;
err_rem_entry:
ice_vc_fdir_clear_irq_ctx(vf);
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
err_free_conf:
devm_kfree(dev, conf);
err_exit:
ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_FDIR_FILTER, v_ret,
(u8 *)stat, len);
kfree(stat);
return ret;
}
/**
* ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
* Return: 0 on success, and other on error.
*/
int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg)
{
struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg;
struct virtchnl_fdir_del *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
enum virtchnl_status_code v_ret;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
int len = 0;
int ret;
pf = vf->pf;
dev = ice_pf_to_dev(pf);
ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
if (ret) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_dbg(dev, "Parameter check for VF %d failed\n", vf->vf_id);
goto err_exit;
}
stat = kzalloc(sizeof(*stat), GFP_KERNEL);
if (!stat) {
v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
dev_dbg(dev, "Alloc stat for VF %d failed\n", vf->vf_id);
goto err_exit;
}
len = sizeof(*stat);
conf = ice_vc_fdir_lookup_entry(vf, fltr->flow_id);
if (!conf) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST;
dev_dbg(dev, "VF %d: FDIR invalid flow_id:0x%X\n",
vf->vf_id, fltr->flow_id);
goto err_exit;
}
/* Just return failure when ctrl_vsi idx is invalid */
if (vf->ctrl_vsi_idx == ICE_NO_VSI) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "Invalid FDIR ctrl_vsi for VF %d\n", vf->vf_id);
goto err_exit;
}
ret = ice_vc_fdir_set_irq_ctx(vf, conf, VIRTCHNL_OP_DEL_FDIR_FILTER);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_dbg(dev, "VF %d: set FDIR context failed\n", vf->vf_id);
goto err_exit;
}
ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun);
if (ret) {
v_ret = VIRTCHNL_STATUS_SUCCESS;
stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE;
dev_err(dev, "VF %d: writing FDIR rule failed, ret:%d\n",
vf->vf_id, ret);
goto err_del_tmr;
}
kfree(stat);
return ret;
err_del_tmr:
ice_vc_fdir_clear_irq_ctx(vf);
err_exit:
ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_FDIR_FILTER, v_ret,
(u8 *)stat, len);
kfree(stat);
return ret;
}
/**
* ice_vf_fdir_init - init FDIR resource for VF
* @vf: pointer to the VF info
*/
void ice_vf_fdir_init(struct ice_vf *vf)
{
struct ice_vf_fdir *fdir = &vf->fdir;
idr_init(&fdir->fdir_rule_idr);
INIT_LIST_HEAD(&fdir->fdir_rule_list);
spin_lock_init(&fdir->ctx_lock);
fdir->ctx_irq.flags = 0;
fdir->ctx_done.flags = 0;
}
/**
* ice_vf_fdir_exit - destroy FDIR resource for VF
* @vf: pointer to the VF info
*/
void ice_vf_fdir_exit(struct ice_vf *vf)
{
ice_vc_fdir_flush_entry(vf);
idr_destroy(&vf->fdir.fdir_rule_idr);
ice_vc_fdir_rem_prof_all(vf);
ice_vc_fdir_free_prof_all(vf);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2021, Intel Corporation. */
#ifndef _ICE_VIRTCHNL_FDIR_H_
#define _ICE_VIRTCHNL_FDIR_H_
struct ice_vf;
struct ice_pf;
enum ice_fdir_ctx_stat {
ICE_FDIR_CTX_READY,
ICE_FDIR_CTX_IRQ,
ICE_FDIR_CTX_TIMEOUT,
};
struct ice_vf_fdir_ctx {
struct timer_list rx_tmr;
enum virtchnl_ops v_opcode;
enum ice_fdir_ctx_stat stat;
union ice_32b_rx_flex_desc rx_desc;
#define ICE_VF_FDIR_CTX_VALID BIT(0)
u32 flags;
void *conf;
};
/* VF FDIR information structure */
struct ice_vf_fdir {
u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
struct ice_fd_hw_prof **fdir_prof;
struct idr fdir_rule_idr;
struct list_head fdir_rule_list;
spinlock_t ctx_lock; /* protects FDIR context info */
struct ice_vf_fdir_ctx ctx_irq;
struct ice_vf_fdir_ctx ctx_done;
};
#ifdef CONFIG_PCI_IOV
int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg);
int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg);
void ice_vf_fdir_init(struct ice_vf *vf);
void ice_vf_fdir_exit(struct ice_vf *vf);
void
ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
union ice_32b_rx_flex_desc *rx_desc);
void ice_flush_fdir_ctx(struct ice_pf *pf);
#else
static inline void
ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, union ice_32b_rx_flex_desc *rx_desc) { }
static inline void ice_flush_fdir_ctx(struct ice_pf *pf) { }
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_FDIR_H_ */
......@@ -201,6 +201,25 @@ static void ice_vf_vsi_release(struct ice_vf *vf)
ice_vf_invalidate_vsi(vf);
}
/**
* ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
* @vf: VF that control VSI is being invalidated on
*/
static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
{
vf->ctrl_vsi_idx = ICE_NO_VSI;
}
/**
* ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
* @vf: VF that control VSI is being released on
*/
static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
{
ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
ice_vf_ctrl_invalidate_vsi(vf);
}
/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
......@@ -214,6 +233,10 @@ static void ice_free_vf_res(struct ice_vf *vf)
* accessing the VF's VSI after it's freed or invalidated.
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
ice_vf_fdir_exit(vf);
/* free VF control VSI */
if (vf->ctrl_vsi_idx != ICE_NO_VSI)
ice_vf_ctrl_vsi_release(vf);
/* free VSI and disconnect it from the parent uplink */
if (vf->lan_vsi_idx != ICE_NO_VSI) {
......@@ -559,6 +582,28 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
return vsi;
}
/**
* ice_vf_ctrl_vsi_setup - Set up a VF control VSI
* @vf: VF to setup control VSI for
*
* Returns pointer to the successfully allocated VSI struct on success,
* otherwise returns NULL on failure.
*/
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
{
struct ice_port_info *pi = ice_vf_get_port_info(vf);
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
ice_vf_ctrl_invalidate_vsi(vf);
}
return vsi;
}
/**
* ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
* @pf: pointer to PF structure
......@@ -1256,6 +1301,13 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
ice_vf_fdir_exit(vf);
/* clean VF control VSI when resetting VFs since it should be
* setup only when VF creates its first FDIR rule.
*/
if (vf->ctrl_vsi_idx != ICE_NO_VSI)
ice_vf_ctrl_invalidate_vsi(vf);
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi(vf);
ice_vf_post_vsi_rebuild(vf);
......@@ -1374,6 +1426,13 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
dev_err(dev, "disabling promiscuous mode failed\n");
}
ice_vf_fdir_exit(vf);
/* clean VF control VSI when resetting VF since it should be setup
* only when VF creates its first FDIR rule.
*/
if (vf->ctrl_vsi_idx != ICE_NO_VSI)
ice_vf_ctrl_vsi_release(vf);
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi_with_release(vf);
ice_vf_post_vsi_rebuild(vf);
......@@ -1549,6 +1608,12 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
vf->spoofchk = true;
vf->num_vf_qs = pf->num_qps_per_vf;
/* ctrl_vsi_idx will be set to a valid value only when VF
* creates its first fdir rule.
*/
ice_vf_ctrl_invalidate_vsi(vf);
ice_vf_fdir_init(vf);
}
}
......@@ -1848,7 +1913,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
*
* send msg to VF
*/
static int
int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
......@@ -1996,6 +2061,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
......@@ -2084,7 +2152,7 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
*
* check for the valid VSI ID
*/
static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
......@@ -3816,6 +3884,12 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
err = ice_vc_dis_vlan_stripping(vf);
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER:
err = ice_vc_add_fdir_fltr(vf, msg);
break;
case VIRTCHNL_OP_DEL_FDIR_FILTER:
err = ice_vc_del_fdir_fltr(vf, msg);
break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
......
......@@ -4,6 +4,7 @@
#ifndef _ICE_VIRTCHNL_PF_H_
#define _ICE_VIRTCHNL_PF_H_
#include "ice.h"
#include "ice_virtchnl_fdir.h"
/* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */
#define ICE_MAX_VLAN_PER_VF 8
......@@ -70,6 +71,8 @@ struct ice_vf {
u16 vf_id; /* VF ID in the PF space */
u16 lan_vsi_idx; /* index into PF struct */
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
/* first vector index of this VF in the PF space */
int first_vector_idx;
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
......@@ -138,6 +141,11 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
#else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0)
......
......@@ -136,6 +136,9 @@ enum virtchnl_ops {
VIRTCHNL_OP_DISABLE_CHANNELS = 31,
VIRTCHNL_OP_ADD_CLOUD_FILTER = 32,
VIRTCHNL_OP_DEL_CLOUD_FILTER = 33,
/* opcode 34 - 46 are reserved */
VIRTCHNL_OP_ADD_FDIR_FILTER = 47,
VIRTCHNL_OP_DEL_FDIR_FILTER = 48,
};
/* These macros are used to generate compilation errors if a structure/union
......@@ -247,6 +250,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
#define VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
#define VIRTCHNL_VF_OFFLOAD_ADQ 0X00800000
#define VIRTCHNL_VF_OFFLOAD_FDIR_PF 0X10000000
/* Define below the capability flags that are not offloads */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED 0x00000080
......@@ -559,6 +563,11 @@ enum virtchnl_action {
/* action types */
VIRTCHNL_ACTION_DROP = 0,
VIRTCHNL_ACTION_TC_REDIRECT,
VIRTCHNL_ACTION_PASSTHRU,
VIRTCHNL_ACTION_QUEUE,
VIRTCHNL_ACTION_Q_REGION,
VIRTCHNL_ACTION_MARK,
VIRTCHNL_ACTION_COUNT,
};
enum virtchnl_flow_type {
......@@ -668,6 +677,269 @@ enum virtchnl_vfr_states {
VIRTCHNL_VFR_VFACTIVE,
};
#define VIRTCHNL_MAX_NUM_PROTO_HDRS 32
#define PROTO_HDR_SHIFT 5
#define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT)
#define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1)
/* VF use these macros to configure each protocol header.
* Specify which protocol headers and protocol header fields base on
* virtchnl_proto_hdr_type and virtchnl_proto_hdr_field.
* @param hdr: a struct of virtchnl_proto_hdr
* @param hdr_type: ETH/IPV4/TCP, etc
* @param field: SRC/DST/TEID/SPI, etc
*/
#define VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, field) \
((hdr)->field_selector |= BIT((field) & PROTO_HDR_FIELD_MASK))
#define VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, field) \
((hdr)->field_selector &= ~BIT((field) & PROTO_HDR_FIELD_MASK))
#define VIRTCHNL_TEST_PROTO_HDR_FIELD(hdr, val) \
((hdr)->field_selector & BIT((val) & PROTO_HDR_FIELD_MASK))
#define VIRTCHNL_GET_PROTO_HDR_FIELD(hdr) ((hdr)->field_selector)
#define VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
(VIRTCHNL_ADD_PROTO_HDR_FIELD(hdr, \
VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
#define VIRTCHNL_DEL_PROTO_HDR_FIELD_BIT(hdr, hdr_type, field) \
(VIRTCHNL_DEL_PROTO_HDR_FIELD(hdr, \
VIRTCHNL_PROTO_HDR_ ## hdr_type ## _ ## field))
#define VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, hdr_type) \
((hdr)->type = VIRTCHNL_PROTO_HDR_ ## hdr_type)
#define VIRTCHNL_GET_PROTO_HDR_TYPE(hdr) \
(((hdr)->type) >> PROTO_HDR_SHIFT)
#define VIRTCHNL_TEST_PROTO_HDR_TYPE(hdr, val) \
((hdr)->type == ((val) >> PROTO_HDR_SHIFT))
#define VIRTCHNL_TEST_PROTO_HDR(hdr, val) \
(VIRTCHNL_TEST_PROTO_HDR_TYPE((hdr), (val)) && \
VIRTCHNL_TEST_PROTO_HDR_FIELD((hdr), (val)))
/* Protocol header type within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
* tunneling or encapsulation protocols for network virtualization.
*/
enum virtchnl_proto_hdr_type {
VIRTCHNL_PROTO_HDR_NONE,
VIRTCHNL_PROTO_HDR_ETH,
VIRTCHNL_PROTO_HDR_S_VLAN,
VIRTCHNL_PROTO_HDR_C_VLAN,
VIRTCHNL_PROTO_HDR_IPV4,
VIRTCHNL_PROTO_HDR_IPV6,
VIRTCHNL_PROTO_HDR_TCP,
VIRTCHNL_PROTO_HDR_UDP,
VIRTCHNL_PROTO_HDR_SCTP,
VIRTCHNL_PROTO_HDR_GTPU_IP,
VIRTCHNL_PROTO_HDR_GTPU_EH,
VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
VIRTCHNL_PROTO_HDR_PPPOE,
VIRTCHNL_PROTO_HDR_L2TPV3,
VIRTCHNL_PROTO_HDR_ESP,
VIRTCHNL_PROTO_HDR_AH,
VIRTCHNL_PROTO_HDR_PFCP,
};
/* Protocol header field within a protocol header. */
enum virtchnl_proto_hdr_field {
/* ETHER */
VIRTCHNL_PROTO_HDR_ETH_SRC =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ETH),
VIRTCHNL_PROTO_HDR_ETH_DST,
VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE,
/* S-VLAN */
VIRTCHNL_PROTO_HDR_S_VLAN_ID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_S_VLAN),
/* C-VLAN */
VIRTCHNL_PROTO_HDR_C_VLAN_ID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_C_VLAN),
/* IPV4 */
VIRTCHNL_PROTO_HDR_IPV4_SRC =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV4),
VIRTCHNL_PROTO_HDR_IPV4_DST,
VIRTCHNL_PROTO_HDR_IPV4_DSCP,
VIRTCHNL_PROTO_HDR_IPV4_TTL,
VIRTCHNL_PROTO_HDR_IPV4_PROT,
/* IPV6 */
VIRTCHNL_PROTO_HDR_IPV6_SRC =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_IPV6),
VIRTCHNL_PROTO_HDR_IPV6_DST,
VIRTCHNL_PROTO_HDR_IPV6_TC,
VIRTCHNL_PROTO_HDR_IPV6_HOP_LIMIT,
VIRTCHNL_PROTO_HDR_IPV6_PROT,
/* TCP */
VIRTCHNL_PROTO_HDR_TCP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_TCP),
VIRTCHNL_PROTO_HDR_TCP_DST_PORT,
/* UDP */
VIRTCHNL_PROTO_HDR_UDP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_UDP),
VIRTCHNL_PROTO_HDR_UDP_DST_PORT,
/* SCTP */
VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_SCTP),
VIRTCHNL_PROTO_HDR_SCTP_DST_PORT,
/* GTPU_IP */
VIRTCHNL_PROTO_HDR_GTPU_IP_TEID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_IP),
/* GTPU_EH */
VIRTCHNL_PROTO_HDR_GTPU_EH_PDU =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_GTPU_EH),
VIRTCHNL_PROTO_HDR_GTPU_EH_QFI,
/* PPPOE */
VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PPPOE),
/* L2TPV3 */
VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_L2TPV3),
/* ESP */
VIRTCHNL_PROTO_HDR_ESP_SPI =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_ESP),
/* AH */
VIRTCHNL_PROTO_HDR_AH_SPI =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_AH),
/* PFCP */
VIRTCHNL_PROTO_HDR_PFCP_S_FIELD =
PROTO_HDR_FIELD_START(VIRTCHNL_PROTO_HDR_PFCP),
VIRTCHNL_PROTO_HDR_PFCP_SEID,
};
struct virtchnl_proto_hdr {
enum virtchnl_proto_hdr_type type;
u32 field_selector; /* a bit mask to select field for header type */
u8 buffer[64];
/**
* binary buffer in network order for specific header type.
* For example, if type = VIRTCHNL_PROTO_HDR_IPV4, a IPv4
* header is expected to be copied into the buffer.
*/
};
VIRTCHNL_CHECK_STRUCT_LEN(72, virtchnl_proto_hdr);
struct virtchnl_proto_hdrs {
u8 tunnel_level;
/**
* specify where protocol header start from.
* 0 - from the outer layer
* 1 - from the first inner layer
* 2 - from the second inner layer
* ....
**/
int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */
struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS];
};
VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs);
/* action configuration for FDIR */
struct virtchnl_filter_action {
enum virtchnl_action type;
union {
/* used for queue and qgroup action */
struct {
u16 index;
u8 region;
} queue;
/* used for count action */
struct {
/* share counter ID with other flow rules */
u8 shared;
u32 id; /* counter ID */
} count;
/* used for mark action */
u32 mark_id;
u8 reserve[32];
} act_conf;
};
VIRTCHNL_CHECK_STRUCT_LEN(36, virtchnl_filter_action);
#define VIRTCHNL_MAX_NUM_ACTIONS 8
struct virtchnl_filter_action_set {
/* action number must be less then VIRTCHNL_MAX_NUM_ACTIONS */
int count;
struct virtchnl_filter_action actions[VIRTCHNL_MAX_NUM_ACTIONS];
};
VIRTCHNL_CHECK_STRUCT_LEN(292, virtchnl_filter_action_set);
/* pattern and action for FDIR rule */
struct virtchnl_fdir_rule {
struct virtchnl_proto_hdrs proto_hdrs;
struct virtchnl_filter_action_set action_set;
};
VIRTCHNL_CHECK_STRUCT_LEN(2604, virtchnl_fdir_rule);
/* Status returned to VF after VF requests FDIR commands
* VIRTCHNL_FDIR_SUCCESS
* VF FDIR related request is successfully done by PF
* The request can be OP_ADD/DEL.
*
* VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE
* OP_ADD_FDIR_FILTER request is failed due to no Hardware resource.
*
* VIRTCHNL_FDIR_FAILURE_RULE_EXIST
* OP_ADD_FDIR_FILTER request is failed due to the rule is already existed.
*
* VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT
* OP_ADD_FDIR_FILTER request is failed due to conflict with existing rule.
*
* VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST
* OP_DEL_FDIR_FILTER request is failed due to this rule doesn't exist.
*
* VIRTCHNL_FDIR_FAILURE_RULE_INVALID
* OP_ADD_FDIR_FILTER request is failed due to parameters validation
* or HW doesn't support.
*
* VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT
* OP_ADD/DEL_FDIR_FILTER request is failed due to timing out
* for programming.
*/
enum virtchnl_fdir_prgm_status {
VIRTCHNL_FDIR_SUCCESS = 0,
VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE,
VIRTCHNL_FDIR_FAILURE_RULE_EXIST,
VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT,
VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST,
VIRTCHNL_FDIR_FAILURE_RULE_INVALID,
VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT,
};
/* VIRTCHNL_OP_ADD_FDIR_FILTER
* VF sends this request to PF by filling out vsi_id,
* validate_only and rule_cfg. PF will return flow_id
* if the request is successfully done and return add_status to VF.
*/
struct virtchnl_fdir_add {
u16 vsi_id; /* INPUT */
/*
* 1 for validating a fdir rule, 0 for creating a fdir rule.
* Validate and create share one ops: VIRTCHNL_OP_ADD_FDIR_FILTER.
*/
u16 validate_only; /* INPUT */
u32 flow_id; /* OUTPUT */
struct virtchnl_fdir_rule rule_cfg; /* INPUT */
enum virtchnl_fdir_prgm_status status; /* OUTPUT */
};
VIRTCHNL_CHECK_STRUCT_LEN(2616, virtchnl_fdir_add);
/* VIRTCHNL_OP_DEL_FDIR_FILTER
* VF sends this request to PF by filling out vsi_id
* and flow_id. PF will return del_status to VF.
*/
struct virtchnl_fdir_del {
u16 vsi_id; /* INPUT */
u16 pad;
u32 flow_id; /* INPUT */
enum virtchnl_fdir_prgm_status status; /* OUTPUT */
};
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del);
/**
* virtchnl_vc_validate_vf_msg
* @ver: Virtchnl version info
......@@ -828,6 +1100,12 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
case VIRTCHNL_OP_DEL_CLOUD_FILTER:
valid_len = sizeof(struct virtchnl_filter);
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER:
valid_len = sizeof(struct virtchnl_fdir_add);
break;
case VIRTCHNL_OP_DEL_FDIR_FILTER:
valid_len = sizeof(struct virtchnl_fdir_del);
break;
/* These are always errors coming from the VF. */
case VIRTCHNL_OP_EVENT:
case VIRTCHNL_OP_UNKNOWN:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment