Commit d205ce5c authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-next'

Or Gerlitz says:

====================
mlx5 Ethernet driver update - Jun 11 2015

This series from Saeed, Achiad and Gal contains few fixes
to the recently introduced mlx5 Ethernet functionality.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7ec0bb22 3191e05f
...@@ -446,15 +446,11 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, ...@@ -446,15 +446,11 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
if (err) if (err)
goto out; goto out;
err = mlx5_query_port_max_mtu(mdev, &max_mtu, port); mlx5_query_port_max_mtu(mdev, &max_mtu, port);
if (err)
goto out;
props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
err = mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
if (err)
goto out;
props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
......
...@@ -57,7 +57,6 @@ ...@@ -57,7 +57,6 @@
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7 #define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
#define MLX5E_PARAMS_MIN_MTU 46
#define MLX5E_TX_CQ_POLL_BUDGET 128 #define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */ #define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
...@@ -284,6 +283,8 @@ struct mlx5e_sq { ...@@ -284,6 +283,8 @@ struct mlx5e_sq {
struct netdev_queue *txq; struct netdev_queue *txq;
u32 sqn; u32 sqn;
u32 bf_buf_size; u32 bf_buf_size;
u16 max_inline;
u16 edge;
struct device *pdev; struct device *pdev;
__be32 mkey_be; __be32 mkey_be;
unsigned long state; unsigned long state;
...@@ -388,6 +389,7 @@ struct mlx5e_priv { ...@@ -388,6 +389,7 @@ struct mlx5e_priv {
struct mutex state_lock; /* Protects Interface state */ struct mutex state_lock; /* Protects Interface state */
struct mlx5_uar cq_uar; struct mlx5_uar cq_uar;
u32 pdn; u32 pdn;
u32 tdn;
struct mlx5_core_mr mr; struct mlx5_core_mr mr;
struct mlx5e_channel **channel; struct mlx5e_channel **channel;
...@@ -454,6 +456,7 @@ enum mlx5e_link_mode { ...@@ -454,6 +456,7 @@ enum mlx5e_link_mode {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode) #define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback); void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
......
...@@ -257,25 +257,8 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv) ...@@ -257,25 +257,8 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
spin_unlock_irq(&priv->async_events_spinlock); spin_unlock_irq(&priv->async_events_spinlock);
} }
static void mlx5e_send_nop(struct mlx5e_sq *sq) #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
{ #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
memset(cseg, 0, sizeof(*cseg));
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
sq->skb[pi] = NULL;
sq->pc++;
mlx5e_tx_notify_hw(sq, wqe);
}
static int mlx5e_create_rq(struct mlx5e_channel *c, static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param, struct mlx5e_rq_param *param,
...@@ -305,13 +288,16 @@ static int mlx5e_create_rq(struct mlx5e_channel *c, ...@@ -305,13 +288,16 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
} }
rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
priv->netdev->mtu + ETH_HLEN + VLAN_HLEN; MLX5E_SW2HW_MTU(priv->netdev->mtu);
rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
for (i = 0; i < wq_sz; i++) { for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
wqe->data.lkey = c->mkey_be; wqe->data.lkey = c->mkey_be;
wqe->data.byte_count = cpu_to_be32(rq->wqe_sz); wqe->data.byte_count =
cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
} }
rq->pdev = c->pdev; rq->pdev = c->pdev;
...@@ -447,7 +433,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, ...@@ -447,7 +433,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
goto err_disable_rq; goto err_disable_rq;
set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state); set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */ mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
return 0; return 0;
...@@ -536,6 +522,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, ...@@ -536,6 +522,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->mkey_be = c->mkey_be; sq->mkey_be = c->mkey_be;
sq->channel = c; sq->channel = c;
sq->tc = tc; sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
return 0; return 0;
...@@ -689,7 +676,7 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq) ...@@ -689,7 +676,7 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
/* ensure hw is notified of all pending wqes */ /* ensure hw is notified of all pending wqes */
if (mlx5e_sq_has_room_for(sq, 1)) if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq); mlx5e_send_nop(sq, true);
mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR); mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
while (sq->cc != sq->pc) /* wait till sq is empty */ while (sq->cc != sq->pc) /* wait till sq is empty */
...@@ -1115,6 +1102,7 @@ static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc) ...@@ -1115,6 +1102,7 @@ static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
memset(in, 0, sizeof(in)); memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc); MLX5_SET(tisc, tisc, prio, tc);
MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
} }
...@@ -1213,6 +1201,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt) ...@@ -1213,6 +1201,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
{ {
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
#define ROUGH_MAX_L2_L3_HDR_SZ 256 #define ROUGH_MAX_L2_L3_HDR_SZ 256
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
...@@ -1367,11 +1357,30 @@ static void mlx5e_close_tirs(struct mlx5e_priv *priv) ...@@ -1367,11 +1357,30 @@ static void mlx5e_close_tirs(struct mlx5e_priv *priv)
mlx5e_close_tir(priv, i); mlx5e_close_tir(priv, i);
} }
int mlx5e_open_locked(struct net_device *netdev) static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int actual_mtu; int hw_mtu;
int err;
err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
if (err)
return err;
mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
__func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
return 0;
}
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int num_txqs; int num_txqs;
int err; int err;
...@@ -1380,25 +1389,9 @@ int mlx5e_open_locked(struct net_device *netdev) ...@@ -1380,25 +1389,9 @@ int mlx5e_open_locked(struct net_device *netdev)
netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels); netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
err = mlx5_set_port_mtu(mdev, netdev->mtu); err = mlx5e_set_dev_port_mtu(netdev);
if (err) { if (err)
netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n",
__func__, err);
return err;
}
err = mlx5_query_port_oper_mtu(mdev, &actual_mtu, 1);
if (err) {
netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n",
__func__, err);
return err; return err;
}
if (actual_mtu != netdev->mtu)
netdev_warn(netdev, "%s: Failed to set MTU to %d\n",
__func__, netdev->mtu);
netdev->mtu = actual_mtu;
err = mlx5e_open_tises(priv); err = mlx5e_open_tises(priv);
if (err) { if (err) {
...@@ -1613,15 +1606,14 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -1613,15 +1606,14 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int max_mtu; int max_mtu;
int err = 0; int err;
err = mlx5_query_port_max_mtu(mdev, &max_mtu, 1); mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
if (err)
return err;
if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) { if (new_mtu > max_mtu) {
netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n", netdev_err(netdev,
__func__, MLX5E_PARAMS_MIN_MTU, max_mtu); "%s: Bad MTU (%d) > (%d) Max\n",
__func__, new_mtu, max_mtu);
return -EINVAL; return -EINVAL;
} }
...@@ -1655,7 +1647,10 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) ...@@ -1655,7 +1647,10 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
!MLX5_CAP_ETH(mdev, csum_cap) || !MLX5_CAP_ETH(mdev, csum_cap) ||
!MLX5_CAP_ETH(mdev, max_lso_cap) || !MLX5_CAP_ETH(mdev, max_lso_cap) ||
!MLX5_CAP_ETH(mdev, vlan_cap) || !MLX5_CAP_ETH(mdev, vlan_cap) ||
!MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) { !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
MLX5_CAP_FLOWTABLE(mdev,
flow_table_properties_nic_receive.max_ft_level)
< 3) {
mlx5_core_warn(mdev, mlx5_core_warn(mdev,
"Not creating net device, some required device capabilities are missing\n"); "Not creating net device, some required device capabilities are missing\n");
return -ENOTSUPP; return -ENOTSUPP;
...@@ -1736,6 +1731,7 @@ static void mlx5e_build_netdev(struct net_device *netdev) ...@@ -1736,6 +1731,7 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->ethtool_ops = &mlx5e_ethtool_ops; netdev->ethtool_ops = &mlx5e_ethtool_ops;
netdev->vlan_features |= NETIF_F_SG;
netdev->vlan_features |= NETIF_F_IP_CSUM; netdev->vlan_features |= NETIF_F_IP_CSUM;
netdev->vlan_features |= NETIF_F_IPV6_CSUM; netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_GRO; netdev->vlan_features |= NETIF_F_GRO;
...@@ -1748,7 +1744,6 @@ static void mlx5e_build_netdev(struct net_device *netdev) ...@@ -1748,7 +1744,6 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->vlan_features |= NETIF_F_LRO; netdev->vlan_features |= NETIF_F_LRO;
netdev->hw_features = netdev->vlan_features; netdev->hw_features = netdev->vlan_features;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
...@@ -1827,11 +1822,18 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -1827,11 +1822,18 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_unmap_free_uar; goto err_unmap_free_uar;
} }
err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
if (err) {
netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
__func__, err);
goto err_dealloc_pd;
}
err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr); err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
if (err) { if (err) {
netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n", netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
__func__, err); __func__, err);
goto err_dealloc_pd; goto err_dealloc_transport_domain;
} }
err = register_netdev(netdev); err = register_netdev(netdev);
...@@ -1848,6 +1850,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) ...@@ -1848,6 +1850,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err_destroy_mkey: err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mr); mlx5_core_destroy_mkey(mdev, &priv->mr);
err_dealloc_transport_domain:
mlx5_dealloc_transport_domain(mdev, priv->tdn);
err_dealloc_pd: err_dealloc_pd:
mlx5_core_dealloc_pd(mdev, priv->pdn); mlx5_core_dealloc_pd(mdev, priv->pdn);
...@@ -1867,6 +1872,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) ...@@ -1867,6 +1872,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
unregister_netdev(netdev); unregister_netdev(netdev);
mlx5_core_destroy_mkey(priv->mdev, &priv->mr); mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn); mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar); mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
mlx5e_disable_async_events(priv); mlx5e_disable_async_events(priv);
......
...@@ -45,18 +45,18 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, ...@@ -45,18 +45,18 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
if (unlikely(!skb)) if (unlikely(!skb))
return -ENOMEM; return -ENOMEM;
skb_reserve(skb, MLX5E_NET_IP_ALIGN);
dma_addr = dma_map_single(rq->pdev, dma_addr = dma_map_single(rq->pdev,
/* hw start padding */ /* hw start padding */
skb->data - MLX5E_NET_IP_ALIGN, skb->data,
/* hw end padding */ /* hw end padding */
rq->wqe_sz, rq->wqe_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
goto err_free_skb; goto err_free_skb;
skb_reserve(skb, MLX5E_NET_IP_ALIGN);
*((dma_addr_t *)skb->cb) = dma_addr; *((dma_addr_t *)skb->cb) = dma_addr;
wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN); wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
...@@ -217,7 +217,7 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -217,7 +217,7 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
dma_unmap_single(rq->pdev, dma_unmap_single(rq->pdev,
*((dma_addr_t *)skb->cb), *((dma_addr_t *)skb->cb),
skb_end_offset(skb), rq->wqe_sz,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
......
...@@ -34,6 +34,33 @@ ...@@ -34,6 +34,33 @@
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include "en.h" #include "en.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
MLX5E_SQ_NOPS_ROOM)
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
{
struct mlx5_wq_cyc *wq = &sq->wq;
u16 pi = sq->pc & wq->sz_m1;
struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
memset(cseg, 0, sizeof(*cseg));
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
sq->skb[pi] = NULL;
sq->pc++;
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
mlx5e_tx_notify_hw(sq, wqe);
}
}
static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
u32 *size) u32 *size)
{ {
...@@ -89,21 +116,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, ...@@ -89,21 +116,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
return MLX5E_MIN_INLINE; return MLX5E_MIN_INLINE;
} }
static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
{
struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
int cpy1_sz = 2 * ETH_ALEN;
int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN;
skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
skb_pull_inline(skb, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
cpy2_sz);
skb_pull_inline(skb, cpy2_sz);
}
static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
{ {
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
...@@ -149,12 +161,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -149,12 +161,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
ETH_ZLEN); ETH_ZLEN);
} }
if (skb_vlan_tag_present(skb)) { skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs); skb_pull_inline(skb, ihs);
} else {
skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
skb_pull_inline(skb, ihs);
}
eseg->inline_hdr_sz = cpu_to_be16(ihs); eseg->inline_hdr_sz = cpu_to_be16(ihs);
...@@ -215,7 +223,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -215,7 +223,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes); netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) { if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
netif_tx_stop_queue(sq->txq); netif_tx_stop_queue(sq->txq);
sq->stats.stopped++; sq->stats.stopped++;
} }
...@@ -223,6 +231,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -223,6 +231,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
mlx5e_tx_notify_hw(sq, wqe); mlx5e_tx_notify_hw(sq, wqe);
/* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
sq->stats.packets++; sq->stats.packets++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -330,7 +342,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -330,7 +342,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
netdev_tx_completed_queue(sq->txq, npkts, nbytes); netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) && if (netif_tx_queue_stopped(sq->txq) &&
mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) && mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
netif_tx_wake_queue(sq->txq); netif_tx_wake_queue(sq->txq);
sq->stats.wake++; sq->stats.wake++;
......
...@@ -248,22 +248,18 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status) ...@@ -248,22 +248,18 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
return err; return err;
} }
static int mlx5_query_port_mtu(struct mlx5_core_dev *dev, static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
int *admin_mtu, int *max_mtu, int *oper_mtu, int *max_mtu, int *oper_mtu, u8 port)
u8 local_port)
{ {
u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
int err;
memset(in, 0, sizeof(in)); memset(in, 0, sizeof(in));
MLX5_SET(pmtu_reg, in, local_port, local_port); MLX5_SET(pmtu_reg, in, local_port, port);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PMTU, 0, 0); sizeof(out), MLX5_REG_PMTU, 0, 0);
if (err)
return err;
if (max_mtu) if (max_mtu)
*max_mtu = MLX5_GET(pmtu_reg, out, max_mtu); *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu);
...@@ -271,11 +267,9 @@ static int mlx5_query_port_mtu(struct mlx5_core_dev *dev, ...@@ -271,11 +267,9 @@ static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
*oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu); *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
if (admin_mtu) if (admin_mtu)
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu); *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
return 0;
} }
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu) int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
{ {
u32 in[MLX5_ST_SZ_DW(pmtu_reg)]; u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)]; u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
...@@ -283,24 +277,24 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu) ...@@ -283,24 +277,24 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
memset(in, 0, sizeof(in)); memset(in, 0, sizeof(in));
MLX5_SET(pmtu_reg, in, admin_mtu, mtu); MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
MLX5_SET(pmtu_reg, in, local_port, 1); MLX5_SET(pmtu_reg, in, local_port, port);
return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), return mlx5_core_access_reg(dev, in, sizeof(in), out,
MLX5_REG_PMTU, 0, 1); sizeof(out), MLX5_REG_PMTU, 0, 1);
} }
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu); EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
u8 local_port) u8 port)
{ {
return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, local_port); mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
} }
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu); EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
u8 local_port) u8 port)
{ {
return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, local_port); mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
} }
EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu); EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
......
...@@ -34,6 +34,41 @@ ...@@ -34,6 +34,41 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "transobj.h" #include "transobj.h"
int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
{
u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
int err;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(alloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*tdn = MLX5_GET(alloc_transport_domain_out, out,
transport_domain);
return err;
}
void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(dealloc_transport_domain_in, in, opcode,
MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn) int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
{ {
u32 out[MLX5_ST_SZ_DW(create_rq_out)]; u32 out[MLX5_ST_SZ_DW(create_rq_out)];
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#ifndef __TRANSOBJ_H__ #ifndef __TRANSOBJ_H__
#define __TRANSOBJ_H__ #define __TRANSOBJ_H__
int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *rqn); u32 *rqn);
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen); int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
......
...@@ -131,6 +131,10 @@ enum { ...@@ -131,6 +131,10 @@ enum {
MLX5_INLINE_SEG = 0x80000000, MLX5_INLINE_SEG = 0x80000000,
}; };
enum {
MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
};
enum { enum {
MLX5_MIN_PKEY_TABLE_SIZE = 128, MLX5_MIN_PKEY_TABLE_SIZE = 128,
MLX5_MAX_LOG_PKEY_TABLE = 5, MLX5_MAX_LOG_PKEY_TABLE = 5,
......
...@@ -756,11 +756,11 @@ int mlx5_set_port_status(struct mlx5_core_dev *dev, ...@@ -756,11 +756,11 @@ int mlx5_set_port_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status); enum mlx5_port_status status);
int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status); int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu); int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
u8 local_port); void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, u8 port);
u8 local_port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
u8 *vl_hw_cap, u8 local_port); u8 *vl_hw_cap, u8 local_port);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment