Commit 2de27f30 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4'

Amir Vadai says:

====================
This series from Yan Burman adds support for unicast MAC address filtering and
ndo FDB operations.  It also includes some optimizations to loopback related
decisions and checks in the TX/RX fast path and one cleanup, all in separate
patches.

Today, when adding macvlan devices, the NIC goes into promiscuous mode, since
unicast MAC filtering is not supported. With these changes, macvlan devices can
be added without the penalty of promiscuous mode.

If for some reason adding a unicast address filter fails e.g as of missing space in
the HW mac table, the device forces itself into promiscuous mode (and out of this
forced state when enough space is available).

Also, now it is possible to have bridge under multi-function configuration that include
PF and VFs.  In order to use bridge over PF/VFs, VM MAC fdb entries must be added e.g.
using 'bridge fdb add' command.

Changes from v1 - based on more comments from Eric Dumazet:
* added failure handling when adding unicast address filter

Changes from v0 - based on comments from Eric Dumazet:
* Removed unneeded synchronize_rcu()
* Use kfree_rcu() instead of synchronize_rcu() + kfree()
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c6edfe10 0ccddcd1
...@@ -712,16 +712,13 @@ static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv, ...@@ -712,16 +712,13 @@ static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
__be32 ipv4_dst) __be32 ipv4_dst)
{ {
#ifdef CONFIG_INET #ifdef CONFIG_INET
__be64 be_mac = 0;
unsigned char mac[ETH_ALEN]; unsigned char mac[ETH_ALEN];
if (!ipv4_is_multicast(ipv4_dst)) { if (!ipv4_is_multicast(ipv4_dst)) {
if (cmd->fs.flow_type & FLOW_MAC_EXT) { if (cmd->fs.flow_type & FLOW_MAC_EXT)
memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN); memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
} else { else
be_mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16); memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
memcpy(&mac, &be_mac, ETH_ALEN);
}
} else { } else {
ip_eth_mc_map(ipv4_dst, mac); ip_eth_mc_map(ipv4_dst, mac);
} }
......
...@@ -95,6 +95,28 @@ int en_print(const char *level, const struct mlx4_en_priv *priv, ...@@ -95,6 +95,28 @@ int en_print(const char *level, const struct mlx4_en_priv *priv,
return i; return i;
} }
void mlx4_en_update_loopback_state(struct net_device *dev,
netdev_features_t features)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
priv->flags &= ~(MLX4_EN_FLAG_RX_FILTER_NEEDED|
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK);
/* Drop the packet if SRIOV is not enabled
* and not performing the selftest or flb disabled
*/
if (mlx4_is_mfunc(priv->mdev->dev) &&
!(features & NETIF_F_LOOPBACK) && !priv->validate_loopback)
priv->flags |= MLX4_EN_FLAG_RX_FILTER_NEEDED;
/* Set dmac in Tx WQE if we are in SRIOV mode or if loopback selftest
* is requested
*/
if (mlx4_is_mfunc(priv->mdev->dev) || priv->validate_loopback)
priv->flags |= MLX4_EN_FLAG_ENABLE_HW_LOOPBACK;
}
static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
{ {
struct mlx4_en_profile *params = &mdev->profile; struct mlx4_en_profile *params = &mdev->profile;
......
...@@ -132,17 +132,14 @@ static void mlx4_en_filter_work(struct work_struct *work) ...@@ -132,17 +132,14 @@ static void mlx4_en_filter_work(struct work_struct *work)
.priority = MLX4_DOMAIN_RFS, .priority = MLX4_DOMAIN_RFS,
}; };
int rc; int rc;
__be64 mac;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
list_add_tail(&spec_eth.list, &rule.list); list_add_tail(&spec_eth.list, &rule.list);
list_add_tail(&spec_ip.list, &rule.list); list_add_tail(&spec_ip.list, &rule.list);
list_add_tail(&spec_tcp.list, &rule.list); list_add_tail(&spec_tcp.list, &rule.list);
mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN); memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
filter->activated = 0; filter->activated = 0;
...@@ -413,6 +410,235 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) ...@@ -413,6 +410,235 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
return 0; return 0;
} }
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
{
unsigned int i;
for (i = ETH_ALEN - 1; i; --i) {
dst_mac[i] = src_mac & 0xff;
src_mac >>= 8;
}
memset(&dst_mac[ETH_ALEN], 0, 2);
}
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
unsigned char *mac, int *qpn, u64 *reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = *qpn;
memcpy(&gid[10], mac, ETH_ALEN);
gid[5] = priv->port;
err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
struct mlx4_spec_list spec_eth = { {NULL} };
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_PROMISC_NONE,
.priority = MLX4_DOMAIN_NIC,
};
rule.port = priv->port;
rule.qpn = *qpn;
INIT_LIST_HEAD(&rule.list);
spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
list_add_tail(&spec_eth.list, &rule.list);
err = mlx4_flow_attach(dev, &rule, reg_id);
break;
}
default:
return -EINVAL;
}
if (err)
en_warn(priv, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
unsigned char *mac, int qpn, u64 reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = qpn;
memcpy(&gid[10], mac, ETH_ALEN);
gid[5] = priv->port;
mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
mlx4_flow_detach(dev, reg_id);
break;
}
default:
en_err(priv, "Invalid steering mode.\n");
}
}
static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
u64 reg_id;
int *qpn = &priv->base_qpn;
u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
index = mlx4_register_mac(dev, priv->port, mac);
if (index < 0) {
err = index;
en_err(priv, "Failed adding MAC: %pM\n",
priv->dev->dev_addr);
return err;
}
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
int base_qpn = mlx4_get_base_qpn(dev, priv->port);
*qpn = base_qpn + index;
return 0;
}
err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
if (err) {
en_err(priv, "Failed to reserve qp for mac registration\n");
goto qp_err;
}
err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
if (err)
goto steer_err;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
goto alloc_err;
}
memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
entry->reg_id = reg_id;
hlist_add_head_rcu(&entry->hlist,
&priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
return 0;
alloc_err:
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
steer_err:
mlx4_qp_release_range(dev, *qpn, 1);
qp_err:
mlx4_unregister_mac(dev, priv->port, mac);
return err;
}
static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int qpn = priv->base_qpn;
u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct mlx4_mac_entry *entry;
struct hlist_node *n, *tmp;
struct hlist_head *bucket;
unsigned int mac_hash;
mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
priv->dev->dev_addr)) {
en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
priv->port, priv->dev->dev_addr, qpn);
mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id);
mlx4_qp_release_range(dev, qpn, 1);
hlist_del_rcu(&entry->hlist);
kfree_rcu(entry, rcu);
break;
}
}
}
}
static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
unsigned char *new_mac, unsigned char *prev_mac)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
struct hlist_node *n, *tmp;
u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
mlx4_en_uc_steer_release(priv, entry->mac,
qpn, entry->reg_id);
mlx4_unregister_mac(dev, priv->port,
prev_mac_u64);
hlist_del_rcu(&entry->hlist);
synchronize_rcu();
memcpy(entry->mac, new_mac, ETH_ALEN);
entry->reg_id = 0;
mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
hlist_add_head_rcu(&entry->hlist,
&priv->mac_hash[mac_hash]);
mlx4_register_mac(dev, priv->port, new_mac_u64);
err = mlx4_en_uc_steer_add(priv, new_mac,
&qpn,
&entry->reg_id);
return err;
}
}
return -EINVAL;
}
return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
}
u64 mlx4_en_mac_to_u64(u8 *addr) u64 mlx4_en_mac_to_u64(u8 *addr)
{ {
u64 mac = 0; u64 mac = 0;
...@@ -435,7 +661,6 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr) ...@@ -435,7 +661,6 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL; return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
queue_work(mdev->workqueue, &priv->mac_task); queue_work(mdev->workqueue, &priv->mac_task);
return 0; return 0;
} }
...@@ -450,13 +675,14 @@ static void mlx4_en_do_set_mac(struct work_struct *work) ...@@ -450,13 +675,14 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
mutex_lock(&mdev->state_lock); mutex_lock(&mdev->state_lock);
if (priv->port_up) { if (priv->port_up) {
/* Remove old MAC and insert the new one */ /* Remove old MAC and insert the new one */
err = mlx4_replace_mac(mdev->dev, priv->port, err = mlx4_en_replace_mac(priv, priv->base_qpn,
priv->base_qpn, priv->mac); priv->dev->dev_addr, priv->prev_mac);
if (err) if (err)
en_err(priv, "Failed changing HW MAC address\n"); en_err(priv, "Failed changing HW MAC address\n");
memcpy(priv->prev_mac, priv->dev->dev_addr,
sizeof(priv->prev_mac));
} else } else
en_dbg(HW, priv, "Port is down while " en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
"registering mac, exiting...\n");
mutex_unlock(&mdev->state_lock); mutex_unlock(&mdev->state_lock);
} }
...@@ -540,54 +766,21 @@ static void update_mclist_flags(struct mlx4_en_priv *priv, ...@@ -540,54 +766,21 @@ static void update_mclist_flags(struct mlx4_en_priv *priv,
} }
} }
static void mlx4_en_set_multicast(struct net_device *dev) static void mlx4_en_set_rx_mode(struct net_device *dev)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
if (!priv->port_up) if (!priv->port_up)
return; return;
queue_work(priv->mdev->workqueue, &priv->mcast_task); queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
} }
static void mlx4_en_do_set_multicast(struct work_struct *work) static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev)
{ {
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
mcast_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
struct mlx4_en_mc_list *mclist, *tmp;
u64 mcast_addr = 0;
u8 mc_list[16] = {0};
int err = 0; int err = 0;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
en_dbg(HW, priv, "Card is not up, "
"ignoring multicast change.\n");
goto out;
}
if (!priv->port_up) {
en_dbg(HW, priv, "Port is down, "
"ignoring multicast change.\n");
goto out;
}
if (!netif_carrier_ok(dev)) {
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
netif_carrier_on(dev);
en_dbg(LINK, priv, "Link Up\n");
}
}
}
/*
* Promsicuous mode: disable all filters
*/
if (dev->flags & IFF_PROMISC) {
if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
if (netif_msg_rx_status(priv)) if (netif_msg_rx_status(priv))
en_warn(priv, "Entering promiscuous mode\n"); en_warn(priv, "Entering promiscuous mode\n");
...@@ -639,22 +832,20 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) ...@@ -639,22 +832,20 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
0, MLX4_MCAST_DISABLE); 0, MLX4_MCAST_DISABLE);
if (err) if (err)
en_err(priv, "Failed disabling " en_err(priv, "Failed disabling multicast filter\n");
"multicast filter\n");
/* Disable port VLAN filter */ /* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err) if (err)
en_err(priv, "Failed disabling VLAN filter\n"); en_err(priv, "Failed disabling VLAN filter\n");
} }
goto out; }
}
/* static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
* Not in promiscuous mode struct mlx4_en_dev *mdev)
*/ {
int err = 0;
if (priv->flags & MLX4_EN_FLAG_PROMISC) {
if (netif_msg_rx_status(priv)) if (netif_msg_rx_status(priv))
en_warn(priv, "Leaving promiscuous mode\n"); en_warn(priv, "Leaving promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_PROMISC; priv->flags &= ~MLX4_EN_FLAG_PROMISC;
...@@ -700,7 +891,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) ...@@ -700,7 +891,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err) if (err)
en_err(priv, "Failed enabling VLAN filter\n"); en_err(priv, "Failed enabling VLAN filter\n");
} }
static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
struct net_device *dev,
struct mlx4_en_dev *mdev)
{
struct mlx4_en_mc_list *mclist, *tmp;
u64 mcast_addr = 0;
u8 mc_list[16] = {0};
int err = 0;
/* Enable/disable the multicast filter according to IFF_ALLMULTI */ /* Enable/disable the multicast filter according to IFF_ALLMULTI */
if (dev->flags & IFF_ALLMULTI) { if (dev->flags & IFF_ALLMULTI) {
...@@ -814,6 +1014,170 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) ...@@ -814,6 +1014,170 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
} }
} }
} }
}
static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
struct net_device *dev,
struct mlx4_en_dev *mdev)
{
struct netdev_hw_addr *ha;
struct mlx4_mac_entry *entry;
struct hlist_node *n, *tmp;
bool found;
u64 mac;
int err = 0;
struct hlist_head *bucket;
unsigned int i;
int removed = 0;
u32 prev_flags;
/* Note that we do not need to protect our mac_hash traversal with rcu,
* since all modification code is protected by mdev->state_lock
*/
/* find what to remove */
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
found = false;
netdev_for_each_uc_addr(ha, dev) {
if (ether_addr_equal_64bits(entry->mac,
ha->addr)) {
found = true;
break;
}
}
/* MAC address of the port is not in uc list */
if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
found = true;
if (!found) {
mac = mlx4_en_mac_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
mlx4_unregister_mac(mdev->dev, priv->port, mac);
hlist_del_rcu(&entry->hlist);
kfree_rcu(entry, rcu);
en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
entry->mac, priv->port);
++removed;
}
}
}
/* if we didn't remove anything, there is no use in trying to add
* again once we are in a forced promisc mode state
*/
if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
return;
prev_flags = priv->flags;
priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
/* find what to add */
netdev_for_each_uc_addr(ha, dev) {
found = false;
bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry(entry, n, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
found = true;
break;
}
}
if (!found) {
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
ha->addr, priv->port);
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
mac = mlx4_en_mac_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
ha->addr, priv->port, err);
kfree(entry);
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
err = mlx4_en_uc_steer_add(priv, ha->addr,
&priv->base_qpn,
&entry->reg_id);
if (err) {
en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
ha->addr, priv->port, err);
mlx4_unregister_mac(mdev->dev, priv->port, mac);
kfree(entry);
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
} else {
unsigned int mac_hash;
en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
ha->addr, priv->port);
mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
hlist_add_head_rcu(&entry->hlist, bucket);
}
}
}
if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
en_warn(priv, "Forcing promiscuous mode on port:%d\n",
priv->port);
} else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
priv->port);
}
}
static void mlx4_en_do_set_rx_mode(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
rx_mode_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
goto out;
}
if (!priv->port_up) {
en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
goto out;
}
if (!netif_carrier_ok(dev)) {
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
netif_carrier_on(dev);
en_dbg(LINK, priv, "Link Up\n");
}
}
}
if (dev->priv_flags & IFF_UNICAST_FLT)
mlx4_en_do_uc_filter(priv, dev, mdev);
/* Promsicuous mode: disable all filters */
if ((dev->flags & IFF_PROMISC) ||
(priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
mlx4_en_set_promisc_mode(priv, mdev);
goto out;
}
/* Not in promiscuous mode */
if (priv->flags & MLX4_EN_FLAG_PROMISC)
mlx4_en_clear_promisc_mode(priv, mdev);
mlx4_en_do_multicast(priv, dev, mdev);
out: out:
mutex_unlock(&mdev->state_lock); mutex_unlock(&mdev->state_lock);
} }
...@@ -876,8 +1240,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) ...@@ -876,8 +1240,7 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
priv->rx_usecs = MLX4_EN_RX_COAL_TIME; priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
priv->tx_frames = MLX4_EN_TX_COAL_PKTS; priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
priv->tx_usecs = MLX4_EN_TX_COAL_TIME; priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
"rx_frames:%d rx_usecs:%d\n",
priv->dev->mtu, priv->rx_frames, priv->rx_usecs); priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
/* Setup cq moderation params */ /* Setup cq moderation params */
...@@ -959,8 +1322,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) ...@@ -959,8 +1322,8 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
cq->moder_time = moder_time; cq->moder_time = moder_time;
err = mlx4_en_set_cq_moder(priv, cq); err = mlx4_en_set_cq_moder(priv, cq);
if (err) if (err)
en_err(priv, "Failed modifying moderation " en_err(priv, "Failed modifying moderation for cq:%d\n",
"for cq:%d\n", ring); ring);
} }
priv->last_moder_packets[ring] = rx_packets; priv->last_moder_packets[ring] = rx_packets;
priv->last_moder_bytes[ring] = rx_bytes; priv->last_moder_bytes[ring] = rx_bytes;
...@@ -1077,8 +1440,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1077,8 +1440,7 @@ int mlx4_en_start_port(struct net_device *dev)
/* Set qp number */ /* Set qp number */
en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
err = mlx4_get_eth_qp(mdev->dev, priv->port, err = mlx4_en_get_qp(priv);
priv->mac, &priv->base_qpn);
if (err) { if (err) {
en_err(priv, "Failed getting eth qp\n"); en_err(priv, "Failed getting eth qp\n");
goto cq_err; goto cq_err;
...@@ -1141,8 +1503,8 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1141,8 +1503,8 @@ int mlx4_en_start_port(struct net_device *dev)
priv->prof->rx_pause, priv->prof->rx_pause,
priv->prof->rx_ppp); priv->prof->rx_ppp);
if (err) { if (err) {
en_err(priv, "Failed setting port general configurations " en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
"for port %d, with error %d\n", priv->port, err); priv->port, err);
goto tx_err; goto tx_err;
} }
/* Set default qp number */ /* Set default qp number */
...@@ -1172,7 +1534,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1172,7 +1534,7 @@ int mlx4_en_start_port(struct net_device *dev)
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
/* Schedule multicast task to populate multicast list */ /* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task); queue_work(mdev->workqueue, &priv->rx_mode_task);
mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
...@@ -1191,7 +1553,7 @@ int mlx4_en_start_port(struct net_device *dev) ...@@ -1191,7 +1553,7 @@ int mlx4_en_start_port(struct net_device *dev)
rss_err: rss_err:
mlx4_en_release_rss_steer(priv); mlx4_en_release_rss_steer(priv);
mac_err: mac_err:
mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); mlx4_en_put_qp(priv);
cq_err: cq_err:
while (rx_index--) while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
...@@ -1290,7 +1652,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) ...@@ -1290,7 +1652,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
mlx4_en_release_rss_steer(priv); mlx4_en_release_rss_steer(priv);
/* Unregister Mac address for the port */ /* Unregister Mac address for the port */
mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); mlx4_en_put_qp(priv);
if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN)) if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
mdev->mac_removed[priv->port] = 1; mdev->mac_removed[priv->port] = 1;
...@@ -1563,17 +1925,91 @@ static int mlx4_en_set_features(struct net_device *netdev, ...@@ -1563,17 +1925,91 @@ static int mlx4_en_set_features(struct net_device *netdev,
priv->ctrl_flags &= priv->ctrl_flags &=
cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK); cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
mlx4_en_update_loopback_state(netdev, features);
return 0; return 0;
} }
static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr, u16 flags)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_dev *mdev = priv->mdev->dev;
int err;
if (!mlx4_is_mfunc(mdev))
return -EOPNOTSUPP;
/* Hardware does not support aging addresses, allow only
* permanent addresses if ndm_state is given
*/
if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
en_info(priv, "Add FDB only supports static addresses\n");
return -EINVAL;
}
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
err = dev_uc_add_excl(dev, addr);
else if (is_multicast_ether_addr(addr))
err = dev_mc_add_excl(dev, addr);
else
err = -EINVAL;
/* Only return duplicate errors if NLM_F_EXCL is set */
if (err == -EEXIST && !(flags & NLM_F_EXCL))
err = 0;
return err;
}
static int mlx4_en_fdb_del(struct ndmsg *ndm,
struct net_device *dev,
const unsigned char *addr)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_dev *mdev = priv->mdev->dev;
int err;
if (!mlx4_is_mfunc(mdev))
return -EOPNOTSUPP;
if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
en_info(priv, "Del FDB only supports static addresses\n");
return -EINVAL;
}
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
err = dev_uc_del(dev, addr);
else if (is_multicast_ether_addr(addr))
err = dev_mc_del(dev, addr);
else
err = -EINVAL;
return err;
}
static int mlx4_en_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev, int idx)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_dev *mdev = priv->mdev->dev;
if (mlx4_is_mfunc(mdev))
idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
return idx;
}
static const struct net_device_ops mlx4_netdev_ops = { static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open, .ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close, .ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit, .ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue, .ndo_select_queue = mlx4_en_select_queue,
.ndo_get_stats = mlx4_en_get_stats, .ndo_get_stats = mlx4_en_get_stats,
.ndo_set_rx_mode = mlx4_en_set_multicast, .ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac, .ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = mlx4_en_change_mtu, .ndo_change_mtu = mlx4_en_change_mtu,
...@@ -1588,6 +2024,9 @@ static const struct net_device_ops mlx4_netdev_ops = { ...@@ -1588,6 +2024,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs, .ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif #endif
.ndo_fdb_add = mlx4_en_fdb_add,
.ndo_fdb_del = mlx4_en_fdb_del,
.ndo_fdb_dump = mlx4_en_fdb_dump,
}; };
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
...@@ -1644,7 +2083,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -1644,7 +2083,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->mac_index = -1; priv->mac_index = -1;
priv->msg_enable = MLX4_EN_MSG_LEVEL; priv->msg_enable = MLX4_EN_MSG_LEVEL;
spin_lock_init(&priv->stats_lock); spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
INIT_WORK(&priv->watchdog_task, mlx4_en_restart); INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
...@@ -1654,16 +2093,24 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -1654,16 +2093,24 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->dcbnl_ops = &mlx4_en_dcbnl_ops; dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
#endif #endif
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
INIT_HLIST_HEAD(&priv->mac_hash[i]);
/* Query for default mac and max mtu */ /* Query for default mac and max mtu */
priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
priv->mac = mdev->dev->caps.def_mac[priv->port];
if (ILLEGAL_MAC(priv->mac)) { /* Set default MAC */
en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", dev->addr_len = ETH_ALEN;
priv->port, priv->mac); mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
priv->port, dev->dev_addr);
err = -EINVAL; err = -EINVAL;
goto out; goto out;
} }
memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
DS_SIZE * MLX4_EN_MAX_RX_FRAGS); DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
err = mlx4_en_alloc_resources(priv); err = mlx4_en_alloc_resources(priv);
...@@ -1694,11 +2141,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -1694,11 +2141,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
/* Set defualt MAC */
dev->addr_len = ETH_ALEN;
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
/* /*
* Set driver features * Set driver features
*/ */
...@@ -1718,6 +2160,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -1718,6 +2160,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
MLX4_STEERING_MODE_DEVICE_MANAGED) MLX4_STEERING_MODE_DEVICE_MANAGED)
dev->hw_features |= NETIF_F_NTUPLE; dev->hw_features |= NETIF_F_NTUPLE;
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
dev->priv_flags |= IFF_UNICAST_FLT;
mdev->pndev[port] = dev; mdev->pndev[port] = dev;
netif_carrier_off(dev); netif_carrier_off(dev);
...@@ -1731,6 +2176,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -1731,6 +2176,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
/* Configure port */ /* Configure port */
mlx4_en_calc_rx_buf(dev); mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port, err = mlx4_SET_PORT_general(mdev->dev, priv->port,
......
...@@ -563,9 +563,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -563,9 +563,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length; unsigned int length;
int polled = 0; int polled = 0;
int ip_summed; int ip_summed;
struct ethhdr *ethh;
dma_addr_t dma;
u64 s_mac;
int factor = priv->cqe_factor; int factor = priv->cqe_factor;
if (!priv->port_up) if (!priv->port_up)
...@@ -603,21 +600,41 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud ...@@ -603,21 +600,41 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next; goto next;
} }
/* Get pointer to first fragment since we haven't skb yet and /* Check if we need to drop the packet if SRIOV is not enabled
* cast it to ethhdr struct */ * and not performing the selftest or flb disabled
*/
if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
struct ethhdr *ethh;
dma_addr_t dma;
/* Get pointer to first fragment since we haven't
* skb yet and cast it to ethhdr struct
*/
dma = be64_to_cpu(rx_desc->data[0].addr); dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh), dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
ethh = (struct ethhdr *)(page_address(frags[0].page) + ethh = (struct ethhdr *)(page_address(frags[0].page) +
frags[0].offset); frags[0].offset);
s_mac = mlx4_en_mac_to_u64(ethh->h_source);
/* If source MAC is equal to our own MAC and not performing if (is_multicast_ether_addr(ethh->h_dest)) {
* the selftest or flb disabled - drop the packet */ struct mlx4_mac_entry *entry;
if (s_mac == priv->mac && struct hlist_node *n;
!((dev->features & NETIF_F_LOOPBACK) || struct hlist_head *bucket;
priv->validate_loopback)) unsigned int mac_hash;
/* Drop the packet, since HW loopback-ed it */
mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
bucket = &priv->mac_hash[mac_hash];
rcu_read_lock();
hlist_for_each_entry_rcu(entry, n, bucket, hlist) {
if (ether_addr_equal_64bits(entry->mac,
ethh->h_source)) {
rcu_read_unlock();
goto next; goto next;
}
}
rcu_read_unlock();
}
}
/* /*
* Packet is OK - process it. * Packet is OK - process it.
......
...@@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) ...@@ -87,6 +87,8 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
priv->loopback_ok = 0; priv->loopback_ok = 0;
priv->validate_loopback = 1; priv->validate_loopback = 1;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
/* xmit */ /* xmit */
if (mlx4_en_test_loopback_xmit(priv)) { if (mlx4_en_test_loopback_xmit(priv)) {
en_err(priv, "Transmitting loopback packet failed\n"); en_err(priv, "Transmitting loopback packet failed\n");
...@@ -107,6 +109,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) ...@@ -107,6 +109,7 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
mlx4_en_test_loopback_exit: mlx4_en_test_loopback_exit:
priv->validate_loopback = 0; priv->validate_loopback = 0;
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
return !loopback_ok; return !loopback_ok;
} }
......
...@@ -640,7 +640,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -640,7 +640,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->tx_csum++; ring->tx_csum++;
} }
if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) { if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
/* Copy dst mac address to wqe. This allows loopback in eSwitch, /* Copy dst mac address to wqe. This allows loopback in eSwitch,
* so that VFs and PF can communicate with each other * so that VFs and PF can communicate with each other
*/ */
......
...@@ -1833,12 +1833,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) ...@@ -1833,12 +1833,9 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev; info->dev = dev;
info->port = port; info->port = port;
if (!mlx4_is_slave(dev)) { if (!mlx4_is_slave(dev)) {
INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
mlx4_init_mac_table(dev, &info->mac_table); mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table); mlx4_init_vlan_table(dev, &info->vlan_table);
info->base_qpn = info->base_qpn = mlx4_get_base_qpn(dev, port);
dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << log_num_mac);
} }
sprintf(info->dev_name, "mlx4_port%d", port); sprintf(info->dev_name, "mlx4_port%d", port);
......
...@@ -653,11 +653,6 @@ struct mlx4_set_port_rqp_calc_context { ...@@ -653,11 +653,6 @@ struct mlx4_set_port_rqp_calc_context {
__be32 mcast; __be32 mcast;
}; };
struct mlx4_mac_entry {
u64 mac;
u64 reg_id;
};
struct mlx4_port_info { struct mlx4_port_info {
struct mlx4_dev *dev; struct mlx4_dev *dev;
int port; int port;
...@@ -667,7 +662,6 @@ struct mlx4_port_info { ...@@ -667,7 +662,6 @@ struct mlx4_port_info {
char dev_mtu_name[16]; char dev_mtu_name[16];
struct device_attribute port_mtu_attr; struct device_attribute port_mtu_attr;
struct mlx4_mac_table mac_table; struct mlx4_mac_table mac_table;
struct radix_tree_root mac_tree;
struct mlx4_vlan_table vlan_table; struct mlx4_vlan_table vlan_table;
int base_qpn; int base_qpn;
}; };
...@@ -916,7 +910,6 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, ...@@ -916,7 +910,6 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list); int start_index, int npages, u64 *page_list);
int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx); int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
......
...@@ -198,7 +198,6 @@ enum cq_type { ...@@ -198,7 +198,6 @@ enum cq_type {
*/ */
#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x)) #define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
#define XNOR(x, y) (!(x) == !(y)) #define XNOR(x, y) (!(x) == !(y))
#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
struct mlx4_en_tx_info { struct mlx4_en_tx_info {
...@@ -432,6 +431,21 @@ struct ethtool_flow_id { ...@@ -432,6 +431,21 @@ struct ethtool_flow_id {
u64 id; u64 id;
}; };
enum {
MLX4_EN_FLAG_PROMISC = (1 << 0),
MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
/* whether we need to enable hardware loopback by putting dmac
* in Tx WQE
*/
MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
/* whether we need to drop packets that hardware loopback-ed */
MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4)
};
#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
#define MLX4_EN_MAC_HASH_IDX 5
struct mlx4_en_priv { struct mlx4_en_priv {
struct mlx4_en_dev *mdev; struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof; struct mlx4_en_port_profile *prof;
...@@ -472,7 +486,7 @@ struct mlx4_en_priv { ...@@ -472,7 +486,7 @@ struct mlx4_en_priv {
int registered; int registered;
int allocated; int allocated;
int stride; int stride;
u64 mac; unsigned char prev_mac[ETH_ALEN + 2];
int mac_index; int mac_index;
unsigned max_mtu; unsigned max_mtu;
int base_qpn; int base_qpn;
...@@ -481,8 +495,6 @@ struct mlx4_en_priv { ...@@ -481,8 +495,6 @@ struct mlx4_en_priv {
struct mlx4_en_rss_map rss_map; struct mlx4_en_rss_map rss_map;
__be32 ctrl_flags; __be32 ctrl_flags;
u32 flags; u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
u8 num_tx_rings_p_up; u8 num_tx_rings_p_up;
u32 tx_ring_num; u32 tx_ring_num;
u32 rx_ring_num; u32 rx_ring_num;
...@@ -496,7 +508,7 @@ struct mlx4_en_priv { ...@@ -496,7 +508,7 @@ struct mlx4_en_priv {
struct mlx4_en_cq *tx_cq; struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
struct mlx4_qp drop_qp; struct mlx4_qp drop_qp;
struct work_struct mcast_task; struct work_struct rx_mode_task;
struct work_struct mac_task; struct work_struct mac_task;
struct work_struct watchdog_task; struct work_struct watchdog_task;
struct work_struct linkstate_task; struct work_struct linkstate_task;
...@@ -513,6 +525,7 @@ struct mlx4_en_priv { ...@@ -513,6 +525,7 @@ struct mlx4_en_priv {
bool wol; bool wol;
struct device *ddev; struct device *ddev;
int base_tx_qpn; int base_tx_qpn;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
#ifdef CONFIG_MLX4_EN_DCB #ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets; struct ieee_ets ets;
...@@ -532,8 +545,18 @@ enum mlx4_en_wol { ...@@ -532,8 +545,18 @@ enum mlx4_en_wol {
MLX4_EN_WOL_ENABLED = (1ULL << 62), MLX4_EN_WOL_ENABLED = (1ULL << 62),
}; };
struct mlx4_mac_entry {
struct hlist_node hlist;
unsigned char mac[ETH_ALEN + 2];
u64 reg_id;
struct rcu_head rcu;
};
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
void mlx4_en_update_loopback_state(struct net_device *dev,
netdev_features_t features);
void mlx4_en_destroy_netdev(struct net_device *dev); void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof); struct mlx4_en_port_profile *prof);
......
...@@ -74,87 +74,6 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) ...@@ -74,87 +74,6 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0; table->total = 0;
} }
static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
u64 mac, int *qpn, u64 *reg_id)
{
__be64 be_mac;
int err;
mac &= MLX4_MAC_MASK;
be_mac = cpu_to_be64(mac << 16);
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
qp.qpn = *qpn;
memcpy(&gid[10], &be_mac, ETH_ALEN);
gid[5] = port;
err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
struct mlx4_spec_list spec_eth = { {NULL} };
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
struct mlx4_net_trans_rule rule = {
.queue_mode = MLX4_NET_TRANS_Q_FIFO,
.exclusive = 0,
.allow_loopback = 1,
.promisc_mode = MLX4_FS_PROMISC_NONE,
.priority = MLX4_DOMAIN_NIC,
};
rule.port = port;
rule.qpn = *qpn;
INIT_LIST_HEAD(&rule.list);
spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
list_add_tail(&spec_eth.list, &rule.list);
err = mlx4_flow_attach(dev, &rule, reg_id);
break;
}
default:
return -EINVAL;
}
if (err)
mlx4_warn(dev, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
u64 mac, int qpn, u64 reg_id)
{
switch (dev->caps.steering_mode) {
case MLX4_STEERING_MODE_B0: {
struct mlx4_qp qp;
u8 gid[16] = {0};
__be64 be_mac;
qp.qpn = qpn;
mac &= MLX4_MAC_MASK;
be_mac = cpu_to_be64(mac << 16);
memcpy(&gid[10], &be_mac, ETH_ALEN);
gid[5] = port;
mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
break;
}
case MLX4_STEERING_MODE_DEVICE_MANAGED: {
mlx4_flow_detach(dev, reg_id);
break;
}
default:
mlx4_err(dev, "Invalid steering mode.\n");
}
}
static int validate_index(struct mlx4_dev *dev, static int validate_index(struct mlx4_dev *dev,
struct mlx4_mac_table *table, int index) struct mlx4_mac_table *table, int index)
{ {
...@@ -181,92 +100,6 @@ static int find_index(struct mlx4_dev *dev, ...@@ -181,92 +100,6 @@ static int find_index(struct mlx4_dev *dev,
return -EINVAL; return -EINVAL;
} }
int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
u64 reg_id;
mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
(unsigned long long) mac);
index = mlx4_register_mac(dev, port, mac);
if (index < 0) {
err = index;
mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
(unsigned long long) mac);
return err;
}
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
*qpn = info->base_qpn + index;
return 0;
}
err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
if (err) {
mlx4_err(dev, "Failed to reserve qp for mac registration\n");
goto qp_err;
}
err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
if (err)
goto steer_err;
entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry) {
err = -ENOMEM;
goto alloc_err;
}
entry->mac = mac;
entry->reg_id = reg_id;
err = radix_tree_insert(&info->mac_tree, *qpn, entry);
if (err)
goto insert_err;
return 0;
insert_err:
kfree(entry);
alloc_err:
mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
steer_err:
mlx4_qp_release_range(dev, *qpn, 1);
qp_err:
mlx4_unregister_mac(dev, port, mac);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_entry *entry;
mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
(unsigned long long) mac);
mlx4_unregister_mac(dev, port, mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
" qpn %d\n", port,
(unsigned long long) mac, qpn);
mlx4_uc_steer_release(dev, port, entry->mac,
qpn, entry->reg_id);
mlx4_qp_release_range(dev, qpn, 1);
radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
}
}
}
EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
__be64 *entries) __be64 *entries)
{ {
...@@ -359,6 +192,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) ...@@ -359,6 +192,12 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
} }
EXPORT_SYMBOL_GPL(mlx4_register_mac); EXPORT_SYMBOL_GPL(mlx4_register_mac);
int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
{
return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << dev->caps.log_num_macs);
}
EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{ {
...@@ -397,29 +236,13 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) ...@@ -397,29 +236,13 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
} }
EXPORT_SYMBOL_GPL(mlx4_unregister_mac); EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{ {
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table; struct mlx4_mac_table *table = &info->mac_table;
struct mlx4_mac_entry *entry;
int index = qpn - info->base_qpn; int index = qpn - info->base_qpn;
int err = 0; int err = 0;
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
mlx4_uc_steer_release(dev, port, entry->mac,
qpn, entry->reg_id);
mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
entry->reg_id = 0;
mlx4_register_mac(dev, port, new_mac);
err = mlx4_uc_steer_add(dev, port, entry->mac,
&qpn, &entry->reg_id);
return err;
}
/* CX1 doesn't support multi-functions */ /* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex); mutex_lock(&table->mutex);
...@@ -439,7 +262,7 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) ...@@ -439,7 +262,7 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
mutex_unlock(&table->mutex); mutex_unlock(&table->mutex);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx4_replace_mac); EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port, static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries) __be32 *entries)
......
...@@ -956,9 +956,8 @@ int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mo ...@@ -956,9 +956,8 @@ int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mo
int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac); int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac); void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac); int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn); int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap); void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu, int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment