Commit b25ba113 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-hns3-add-aRFS-feature-and-fix-FEC-bugs-for-HNS3-driver'

Huazhong Tan says:

====================
net: hns3: add aRFS feature and fix FEC bugs for HNS3 driver

This patchset adds some new features support and fixes some bugs:
[Patch 1/4 - 3/4] adds support for aRFS
[Patch 4/4] fix FEC configuration issue
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 18e88171 f438bfe9
......@@ -343,6 +343,8 @@ struct hnae3_ae_dev {
* Enable/disable hardware strip vlan tag of packets received
* set_gro_en
* Enable/disable HW GRO
* add_arfs_entry
* Check the 5-tuples of flow, and create flow director rule
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
......@@ -492,6 +494,8 @@ struct hnae3_ae_ops {
struct ethtool_rxnfc *cmd, u32 *rule_locs);
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
......
......@@ -4,6 +4,9 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
......@@ -79,23 +82,6 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
return IRQ_HANDLED;
}
/* This callback function is used to set affinity changes to the irq affinity
* masks when the irq_set_affinity_notifier function is used.
*/
static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct hns3_enet_tqp_vector *tqp_vectors =
container_of(notify, struct hns3_enet_tqp_vector,
affinity_notify);
tqp_vectors->affinity_mask = *mask;
}
static void hns3_nic_irq_affinity_release(struct kref *ref)
{
}
static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
{
struct hns3_enet_tqp_vector *tqp_vectors;
......@@ -107,8 +93,7 @@ static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
continue;
/* clear the affinity notifier and affinity mask */
irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
/* clear the affinity mask */
irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
/* release the irq resource */
......@@ -161,12 +146,6 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
return ret;
}
tqp_vectors->affinity_notify.notify =
hns3_nic_irq_affinity_notify;
tqp_vectors->affinity_notify.release =
hns3_nic_irq_affinity_release;
irq_set_affinity_notifier(tqp_vectors->vector_irq,
&tqp_vectors->affinity_notify);
irq_set_affinity_hint(tqp_vectors->vector_irq,
&tqp_vectors->affinity_mask);
......@@ -340,6 +319,40 @@ static void hns3_tqp_disable(struct hnae3_queue *tqp)
hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
}
static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
{
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(netdev->rx_cpu_rmap);
netdev->rx_cpu_rmap = NULL;
#endif
}
static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
{
#ifdef CONFIG_RFS_ACCEL
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_tqp_vector *tqp_vector;
int i, ret;
if (!netdev->rx_cpu_rmap) {
netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
if (!netdev->rx_cpu_rmap)
return -ENOMEM;
}
for (i = 0; i < priv->vector_num; i++) {
tqp_vector = &priv->tqp_vector[i];
ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
tqp_vector->vector_irq);
if (ret) {
hns3_free_rx_cpu_rmap(netdev);
return ret;
}
}
#endif
return 0;
}
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
......@@ -351,11 +364,16 @@ static int hns3_nic_net_up(struct net_device *netdev)
if (ret)
return ret;
/* the device can work without cpu rmap, only aRFS needs it */
ret = hns3_set_rx_cpu_rmap(netdev);
if (ret)
netdev_warn(netdev, "set rx cpu rmap fail, ret=%d!\n", ret);
/* get irq resource for all vectors */
ret = hns3_nic_init_irq(priv);
if (ret) {
netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
return ret;
goto free_rmap;
}
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
......@@ -384,7 +402,8 @@ static int hns3_nic_net_up(struct net_device *netdev)
hns3_vector_disable(&priv->tqp_vector[j]);
hns3_nic_uninit_irq(priv);
free_rmap:
hns3_free_rx_cpu_rmap(netdev);
return ret;
}
......@@ -467,6 +486,8 @@ static void hns3_nic_net_down(struct net_device *netdev)
if (ops->stop)
ops->stop(priv->ae_handle);
hns3_free_rx_cpu_rmap(netdev);
/* free irq resources */
hns3_nic_uninit_irq(priv);
......@@ -1722,6 +1743,32 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
h->ae_algo->ops->reset_event(h->pdev, h);
}
#ifdef CONFIG_RFS_ACCEL
static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct hnae3_handle *h = hns3_get_handle(dev);
struct flow_keys fkeys;
if (!h->ae_algo->ops->add_arfs_entry)
return -EOPNOTSUPP;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
return -EPROTONOSUPPORT;
if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
(fkeys.basic.ip_proto != IPPROTO_TCP &&
fkeys.basic.ip_proto != IPPROTO_UDP))
return -EPROTONOSUPPORT;
return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
}
#endif
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
......@@ -1737,6 +1784,10 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = hns3_rx_flow_steer,
#endif
};
bool hns3_is_phys_func(struct pci_dev *pdev)
......@@ -2828,6 +2879,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
return ret;
}
skb_record_rx_queue(skb, ring->tqp->tqp_index);
*out_skb = skb;
return 0;
......@@ -3331,8 +3383,6 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
irq_set_affinity_notifier(tqp_vector->vector_irq,
NULL);
irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
free_irq(tqp_vector->vector_irq, tqp_vector);
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
......
......@@ -578,6 +578,16 @@ static const struct key_info tuple_key_info[] = {
#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
#define MAX_META_DATA_LENGTH 32
/* assigned by firmware, the real filter number for each pf may be less */
#define MAX_FD_FILTER_NUM 4096
#define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5
enum HCLGE_FD_ACTIVE_RULE_TYPE {
HCLGE_FD_RULE_NONE,
HCLGE_FD_ARFS_ACTIVE,
HCLGE_FD_EP_ACTIVE,
};
enum HCLGE_FD_PACKET_TYPE {
NIC_PACKET,
ROCE_PACKET,
......@@ -630,6 +640,8 @@ struct hclge_fd_rule {
u16 vf_id;
u16 queue_id;
u16 location;
u16 flow_id; /* only used for arfs */
enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
};
struct hclge_fd_ad_data {
......@@ -809,7 +821,11 @@ struct hclge_dev {
struct hclge_fd_cfg fd_cfg;
struct hlist_head fd_rule_list;
spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
u16 hclge_fd_rule_num;
u16 fd_arfs_expire_timer;
unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
u8 fd_en;
u16 wanted_umv_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment