Commit be35ffa3 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4'

Or Gerlitz says:

====================
mlx4 driver fixes, June 24, 2015

Some fixes that we made recently, all need to go into stable.

patch #1 "net/mlx4_en: Release TX QP when destroying TX ring" and patch #3
"Fix wrong csum complete report when rxvlan offload is disabled" to >= 3.19

patch #2 "Wake TX queues only when there's enough room" addressing a bug
which is there from day one... should go to whatever kernels it's still applicable

patch #4 "mlx4: Disable HA for SRIOV PF RoCE devices" to >= 4.0

The patches are marked with net but are made against net-next,
as the net tree still doesn't contain all the net-next bits.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents aefbef10 7254acff
......@@ -1977,10 +1977,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
if (priv->base_tx_qpn) {
mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
priv->base_tx_qpn = 0;
}
}
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
......
......@@ -718,7 +718,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
}
#endif
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
int hwtstamp_rx_filter)
netdev_features_t dev_features)
{
__wsum hw_checksum = 0;
......@@ -726,14 +726,8 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
/* next protocol non IPv4 or IPv6 */
if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
!= htons(ETH_P_IP) &&
((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
!= htons(ETH_P_IPV6))
return -1;
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
!(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
hdr += sizeof(struct vlan_hdr);
}
......@@ -896,7 +890,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (ip_summed == CHECKSUM_COMPLETE) {
void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
if (check_csum(cqe, gro_skb, va,
dev->features)) {
ip_summed = CHECKSUM_NONE;
ring->csum_none++;
ring->csum_complete--;
......@@ -951,7 +946,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
}
if (ip_summed == CHECKSUM_COMPLETE) {
if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
if (check_csum(cqe, skb, skb->data, dev->features)) {
ip_summed = CHECKSUM_NONE;
ring->csum_complete--;
ring->csum_none++;
......
......@@ -66,6 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
......@@ -180,6 +181,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
mlx4_bf_free(mdev->dev, &ring->bf);
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
......@@ -231,6 +233,11 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
}
static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
{
return ring->prod - ring->cons > ring->full_size;
}
static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring, int index,
u8 owner)
......@@ -473,11 +480,10 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
/*
* Wakeup Tx queue if this stopped, and at least 1 packet
* was completed
/* Wakeup Tx queue if this stopped, and ring is not full.
*/
if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
if (netif_tx_queue_stopped(ring->tx_queue) &&
!mlx4_en_is_tx_ring_full(ring)) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
......@@ -921,8 +927,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
/* Check available TXBBs And 2K spare for prefetch */
stop_queue = (int)(ring->prod - ring_cons) >
ring->size - HEADROOM - MAX_DESC_TXBBS;
stop_queue = mlx4_en_is_tx_ring_full(ring);
if (unlikely(stop_queue)) {
netif_tx_stop_queue(ring->tx_queue);
ring->queue_stopped++;
......@@ -991,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
smp_rmb();
ring_cons = ACCESS_ONCE(ring->cons);
if (unlikely(((int)(ring->prod - ring_cons)) <=
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
}
......
......@@ -93,8 +93,14 @@ int mlx4_register_interface(struct mlx4_interface *intf)
mutex_lock(&intf_mutex);
list_add_tail(&intf->list, &intf_list);
list_for_each_entry(priv, &dev_list, dev_list)
list_for_each_entry(priv, &dev_list, dev_list) {
if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
mlx4_dbg(&priv->dev,
"SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
intf->flags &= ~MLX4_INTFF_BONDING;
}
mlx4_add_device(intf, priv);
}
mutex_unlock(&intf_mutex);
......
......@@ -279,6 +279,7 @@ struct mlx4_en_tx_ring {
u32 size; /* number of TXBBs */
u32 size_mask;
u16 stride;
u32 full_size;
u16 cqn; /* index of port CQ associated with this ring */
u32 buf_size;
__be32 doorbell_qpn;
......@@ -580,7 +581,6 @@ struct mlx4_en_priv {
int vids[128];
bool wol;
struct device *ddev;
int base_tx_qpn;
struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
struct hwtstamp_config hwtstamp_config;
u32 counter_index;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment