Commit 15192a8c authored by Barak Witkowski's avatar Barak Witkowski Committed by David S. Miller

bnx2x: Split the FP structure

This patch moves some fields out of the FP structure to different structures, in
order to minimize size of contigiuous memory allocated.
Signed-off-by: default avatarBarak Witkowski <barak@broadcom.com>
Signed-off-by: default avatarEilon Greenstein <eilong@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 37ae41a9
...@@ -549,34 +549,23 @@ struct bnx2x_fastpath { ...@@ -549,34 +549,23 @@ struct bnx2x_fastpath {
rx_calls; rx_calls;
/* TPA related */ /* TPA related */
struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; struct bnx2x_agg_info *tpa_info;
u8 disable_tpa; u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR #ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used; u64 tpa_queue_used;
#endif #endif
struct tstorm_per_queue_stats old_tclient;
struct ustorm_per_queue_stats old_uclient;
struct xstorm_per_queue_stats old_xclient;
struct bnx2x_eth_q_stats eth_q_stats;
struct bnx2x_eth_q_stats_old eth_q_stats_old;
/* The size is calculated using the following: /* The size is calculated using the following:
sizeof name field from netdev structure + sizeof name field from netdev structure +
4 ('-Xx-' string) + 4 ('-Xx-' string) +
4 (for the digits and to make it DWORD aligned) */ 4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE]; char name[FP_NAME_SIZE];
/* MACs object */
struct bnx2x_vlan_mac_obj mac_obj;
/* Queue State object */
struct bnx2x_queue_sp_obj q_obj;
}; };
#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) #define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
/* Use 2500 as a mini-jumbo MTU for FCoE */ /* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 #define BNX2X_FCOE_MINI_JUMBO_MTU 2500
...@@ -587,6 +576,8 @@ struct bnx2x_fastpath { ...@@ -587,6 +576,8 @@ struct bnx2x_fastpath {
FCOE_IDX_OFFSET) FCOE_IDX_OFFSET)
#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)]) #define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) #define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ #define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
txdata_ptr[FIRST_TX_COS_INDEX] \ txdata_ptr[FIRST_TX_COS_INDEX] \
->var) ->var)
...@@ -1187,11 +1178,29 @@ struct bnx2x_prev_path_list { ...@@ -1187,11 +1178,29 @@ struct bnx2x_prev_path_list {
struct list_head list; struct list_head list;
}; };
struct bnx2x_sp_objs {
/* MACs object */
struct bnx2x_vlan_mac_obj mac_obj;
/* Queue State object */
struct bnx2x_queue_sp_obj q_obj;
};
struct bnx2x_fp_stats {
struct tstorm_per_queue_stats old_tclient;
struct ustorm_per_queue_stats old_uclient;
struct xstorm_per_queue_stats old_xclient;
struct bnx2x_eth_q_stats eth_q_stats;
struct bnx2x_eth_q_stats_old eth_q_stats_old;
};
struct bnx2x { struct bnx2x {
/* Fields used in the tx and intr/napi performance paths /* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure * are grouped together in the beginning of the structure
*/ */
struct bnx2x_fastpath *fp; struct bnx2x_fastpath *fp;
struct bnx2x_sp_objs *sp_objs;
struct bnx2x_fp_stats *fp_stats;
struct bnx2x_fp_txdata *bnx2x_txq; struct bnx2x_fp_txdata *bnx2x_txq;
int bnx2x_txq_size; int bnx2x_txq_size;
void __iomem *regview; void __iomem *regview;
......
...@@ -47,6 +47,10 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) ...@@ -47,6 +47,10 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{ {
struct bnx2x_fastpath *from_fp = &bp->fp[from]; struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to]; struct bnx2x_fastpath *to_fp = &bp->fp[to];
struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
int old_max_eth_txqs, new_max_eth_txqs; int old_max_eth_txqs, new_max_eth_txqs;
int old_txdata_index = 0, new_txdata_index = 0; int old_txdata_index = 0, new_txdata_index = 0;
...@@ -57,6 +61,12 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) ...@@ -57,6 +61,12 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
memcpy(to_fp, from_fp, sizeof(*to_fp)); memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to; to_fp->index = to;
/* move sp_objs contents as well, as their indices match fp ones */
memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
/* move fp_stats contents as well, as their indices match fp ones */
memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
/* Update txdata pointers in fp and move txdata content accordingly: /* Update txdata pointers in fp and move txdata content accordingly:
* Each fp consumes 'max_cos' txdata structures, so the index should be * Each fp consumes 'max_cos' txdata structures, so the index should be
* decremented by max_cos x delta. * decremented by max_cos x delta.
...@@ -500,7 +510,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -500,7 +510,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
where we are and drop the whole packet */ where we are and drop the whole packet */
err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
if (unlikely(err)) { if (unlikely(err)) {
fp->eth_q_stats.rx_skb_alloc_failed++; bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err; return err;
} }
...@@ -605,7 +615,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -605,7 +615,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* drop the packet and keep the buffer in the bin */ /* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS, DP(NETIF_MSG_RX_STATUS,
"Failed to allocate or map a new skb - dropping packet!\n"); "Failed to allocate or map a new skb - dropping packet!\n");
fp->eth_q_stats.rx_skb_alloc_failed++; bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
} }
static int bnx2x_alloc_rx_data(struct bnx2x *bp, static int bnx2x_alloc_rx_data(struct bnx2x *bp,
...@@ -638,8 +648,10 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, ...@@ -638,8 +648,10 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
return 0; return 0;
} }
static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, static
struct bnx2x_fastpath *fp) void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
struct bnx2x_fastpath *fp,
struct bnx2x_eth_q_stats *qstats)
{ {
/* Do nothing if no IP/L4 csum validation was done */ /* Do nothing if no IP/L4 csum validation was done */
...@@ -653,7 +665,7 @@ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, ...@@ -653,7 +665,7 @@ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
if (cqe->fast_path_cqe.type_error_flags & if (cqe->fast_path_cqe.type_error_flags &
(ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
fp->eth_q_stats.hw_csum_err++; qstats->hw_csum_err++;
else else
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} }
...@@ -797,7 +809,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) ...@@ -797,7 +809,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR flags %x rx packet %u\n", "ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons); cqe_fp_flags, sw_comp_cons);
fp->eth_q_stats.rx_err_discard_pkt++; bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
goto reuse_rx; goto reuse_rx;
} }
...@@ -810,7 +822,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) ...@@ -810,7 +822,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (skb == NULL) { if (skb == NULL) {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n"); "ERROR packet dropped because of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++; bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
goto reuse_rx; goto reuse_rx;
} }
memcpy(skb->data, data + pad, len); memcpy(skb->data, data + pad, len);
...@@ -824,14 +836,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) ...@@ -824,14 +836,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
skb = build_skb(data, 0); skb = build_skb(data, 0);
if (unlikely(!skb)) { if (unlikely(!skb)) {
kfree(data); kfree(data);
fp->eth_q_stats.rx_skb_alloc_failed++; bnx2x_fp_qstats(bp, fp)->
rx_skb_alloc_failed++;
goto next_rx; goto next_rx;
} }
skb_reserve(skb, pad); skb_reserve(skb, pad);
} else { } else {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n"); "ERROR packet dropped because of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++; bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx: reuse_rx:
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx; goto next_rx;
...@@ -847,8 +860,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) ...@@ -847,8 +860,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
if (bp->dev->features & NETIF_F_RXCSUM) if (bp->dev->features & NETIF_F_RXCSUM)
bnx2x_csum_validate(skb, cqe, fp); bnx2x_csum_validate(skb, cqe, fp,
bnx2x_fp_qstats(bp, fp));
skb_record_rx_queue(skb, fp->rx_queue); skb_record_rx_queue(skb, fp->rx_queue);
...@@ -1780,7 +1793,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) ...@@ -1780,7 +1793,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
int rc; int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0; unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
struct bnx2x_mcast_ramrod_params rparam = {NULL}; struct bnx2x_mcast_ramrod_params rparam = {NULL};
struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
/***************** Cleanup MACs' object first *************************/ /***************** Cleanup MACs' object first *************************/
...@@ -1791,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) ...@@ -1791,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
/* Clean ETH primary MAC */ /* Clean ETH primary MAC */
__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags); &ramrod_flags);
if (rc != 0) if (rc != 0)
BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
...@@ -1877,12 +1890,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) ...@@ -1877,12 +1890,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index) static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{ {
struct bnx2x_fastpath *fp = &bp->fp[index]; struct bnx2x_fastpath *fp = &bp->fp[index];
struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
int cos; int cos;
struct napi_struct orig_napi = fp->napi; struct napi_struct orig_napi = fp->napi;
struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
/* bzero bnx2x_fastpath contents */ /* bzero bnx2x_fastpath contents */
if (bp->stats_init) if (bp->stats_init) {
memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp)); memset(fp, 0, sizeof(*fp));
else { } else {
/* Keep Queue statistics */ /* Keep Queue statistics */
struct bnx2x_eth_q_stats *tmp_eth_q_stats; struct bnx2x_eth_q_stats *tmp_eth_q_stats;
struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
...@@ -1890,26 +1907,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) ...@@ -1890,26 +1907,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
GFP_KERNEL); GFP_KERNEL);
if (tmp_eth_q_stats) if (tmp_eth_q_stats)
memcpy(tmp_eth_q_stats, &fp->eth_q_stats, memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
sizeof(struct bnx2x_eth_q_stats)); sizeof(struct bnx2x_eth_q_stats));
tmp_eth_q_stats_old = tmp_eth_q_stats_old =
kzalloc(sizeof(struct bnx2x_eth_q_stats_old), kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
GFP_KERNEL); GFP_KERNEL);
if (tmp_eth_q_stats_old) if (tmp_eth_q_stats_old)
memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old)); sizeof(struct bnx2x_eth_q_stats_old));
memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp)); memset(fp, 0, sizeof(*fp));
if (tmp_eth_q_stats) { if (tmp_eth_q_stats) {
memcpy(&fp->eth_q_stats, tmp_eth_q_stats, memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
sizeof(struct bnx2x_eth_q_stats)); sizeof(struct bnx2x_eth_q_stats));
kfree(tmp_eth_q_stats); kfree(tmp_eth_q_stats);
} }
if (tmp_eth_q_stats_old) { if (tmp_eth_q_stats_old) {
memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old)); sizeof(struct bnx2x_eth_q_stats_old));
kfree(tmp_eth_q_stats_old); kfree(tmp_eth_q_stats_old);
} }
...@@ -1918,7 +1936,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) ...@@ -1918,7 +1936,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Restore the NAPI object as it has been already initialized */ /* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi; fp->napi = orig_napi;
fp->tpa_info = orig_tpa_info;
fp->bp = bp; fp->bp = bp;
fp->index = index; fp->index = index;
if (IS_ETH_FP(fp)) if (IS_ETH_FP(fp))
...@@ -2918,7 +2936,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -2918,7 +2936,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(bnx2x_tx_avail(bp, txdata) < if (unlikely(bnx2x_tx_avail(bp, txdata) <
(skb_shinfo(skb)->nr_frags + 3))) { (skb_shinfo(skb)->nr_frags + 3))) {
txdata->parent_fp->eth_q_stats.driver_xoff++; bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
netif_tx_stop_queue(txq); netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -3200,7 +3218,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3200,7 +3218,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
* fp->bd_tx_cons */ * fp->bd_tx_cons */
smp_mb(); smp_mb();
txdata->parent_fp->eth_q_stats.driver_xoff++; bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4) if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
netif_tx_wake_queue(txq); netif_tx_wake_queue(txq);
} }
...@@ -3437,7 +3455,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, ...@@ -3437,7 +3455,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
cqe_ring_prod); cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0; fp->rx_pkt = fp->rx_calls = 0;
fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
return i - failure_cnt; return i - failure_cnt;
} }
...@@ -3642,7 +3660,10 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) ...@@ -3642,7 +3660,10 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp) void bnx2x_free_mem_bp(struct bnx2x *bp)
{ {
kfree(bp->fp->tpa_info);
kfree(bp->fp); kfree(bp->fp);
kfree(bp->sp_objs);
kfree(bp->fp_stats);
kfree(bp->bnx2x_txq); kfree(bp->bnx2x_txq);
kfree(bp->msix_table); kfree(bp->msix_table);
kfree(bp->ilt); kfree(bp->ilt);
...@@ -3654,6 +3675,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) ...@@ -3654,6 +3675,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
struct msix_entry *tbl; struct msix_entry *tbl;
struct bnx2x_ilt *ilt; struct bnx2x_ilt *ilt;
int msix_table_size = 0; int msix_table_size = 0;
int fp_array_size;
int i;
/* /*
* The biggest MSI-X table we might need is as a maximum number of fast * The biggest MSI-X table we might need is as a maximum number of fast
...@@ -3662,12 +3685,34 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) ...@@ -3662,12 +3685,34 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1; msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */ /* fp array: RSS plus CNIC related L2 queues */
fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE, fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
sizeof(*fp), GFP_KERNEL); BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp) if (!fp)
goto alloc_err; goto alloc_err;
for (i = 0; i < fp_array_size; i++) {
fp[i].tpa_info =
kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
sizeof(struct bnx2x_agg_info), GFP_KERNEL);
if (!(fp[i].tpa_info))
goto alloc_err;
}
bp->fp = fp; bp->fp = fp;
/* allocate sp objs */
bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
GFP_KERNEL);
if (!bp->sp_objs)
goto alloc_err;
/* allocate fp_stats */
bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
GFP_KERNEL);
if (!bp->fp_stats)
goto alloc_err;
/* Allocate memory for the transmission queues array */ /* Allocate memory for the transmission queues array */
bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS; bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
#ifdef BCM_CNIC #ifdef BCM_CNIC
......
...@@ -981,8 +981,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, ...@@ -981,8 +981,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
struct bnx2x *bp = fp->bp; struct bnx2x *bp = fp->bp;
/* Configure classification DBs */ /* Configure classification DBs */
bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
bnx2x_sp_mapping(bp, mac_rdata), bnx2x_sp_mapping(bp, mac_rdata),
BNX2X_FILTER_MAC_PENDING, BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type, &bp->sp_state, obj_type,
...@@ -1138,8 +1138,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) ...@@ -1138,8 +1138,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
/* No multi-CoS for FCoE L2 client */ /* No multi-CoS for FCoE L2 client */
BUG_ON(fp->max_cos != 1); BUG_ON(fp->max_cos != 1);
bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
BP_FUNC(bp), bnx2x_sp(bp, q_rdata), &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type); bnx2x_sp_mapping(bp, q_rdata), q_type);
DP(NETIF_MSG_IFUP, DP(NETIF_MSG_IFUP,
......
...@@ -2292,7 +2292,7 @@ static int bnx2x_test_intr(struct bnx2x *bp) ...@@ -2292,7 +2292,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
return -ENODEV; return -ENODEV;
} }
params.q_obj = &bp->fp->q_obj; params.q_obj = &bp->sp_objs->q_obj;
params.cmd = BNX2X_Q_CMD_EMPTY; params.cmd = BNX2X_Q_CMD_EMPTY;
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
...@@ -2516,7 +2516,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, ...@@ -2516,7 +2516,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) { if (is_multi(bp)) {
for_each_eth_queue(bp, i) { for_each_eth_queue(bp, i) {
hw_stats = (u32 *)&bp->fp[i].eth_q_stats; hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) { if (bnx2x_q_stats_arr[j].size == 0) {
/* skip this counter */ /* skip this counter */
......
...@@ -1583,7 +1583,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) ...@@ -1583,7 +1583,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
DP(BNX2X_MSG_SP, DP(BNX2X_MSG_SP,
"fp %d cid %d got ramrod #%d state is %x type is %d\n", "fp %d cid %d got ramrod #%d state is %x type is %d\n",
...@@ -3035,9 +3035,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) ...@@ -3035,9 +3035,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
memcpy(ether_stat->version, DRV_MODULE_VERSION, memcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN - 1); ETH_STAT_INFO_VERSION_LEN - 1);
bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
ether_stat->mac_local); ether_stat->mac_local);
ether_stat->mtu_size = bp->dev->mtu; ether_stat->mtu_size = bp->dev->mtu;
...@@ -4632,7 +4632,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, ...@@ -4632,7 +4632,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
vlan_mac_obj = &bp->iscsi_l2_mac_obj; vlan_mac_obj = &bp->iscsi_l2_mac_obj;
else else
#endif #endif
vlan_mac_obj = &bp->fp[cid].mac_obj; vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break; break;
case BNX2X_FILTER_MCAST_PENDING: case BNX2X_FILTER_MCAST_PENDING:
...@@ -4730,7 +4730,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) ...@@ -4730,7 +4730,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
for_each_eth_queue(bp, q) { for_each_eth_queue(bp, q) {
/* Set the appropriate Queue object */ /* Set the appropriate Queue object */
fp = &bp->fp[q]; fp = &bp->fp[q];
queue_params.q_obj = &fp->q_obj; queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* send the ramrod */ /* send the ramrod */
rc = bnx2x_queue_state_change(bp, &queue_params); rc = bnx2x_queue_state_change(bp, &queue_params);
...@@ -4742,7 +4742,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) ...@@ -4742,7 +4742,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
#ifdef BCM_CNIC #ifdef BCM_CNIC
if (!NO_FCOE(bp)) { if (!NO_FCOE(bp)) {
fp = &bp->fp[FCOE_IDX(bp)]; fp = &bp->fp[FCOE_IDX(bp)];
queue_params.q_obj = &fp->q_obj; queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* clear pending completion bit */ /* clear pending completion bit */
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
...@@ -4775,10 +4775,10 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( ...@@ -4775,10 +4775,10 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
#ifdef BCM_CNIC #ifdef BCM_CNIC
if (cid == BNX2X_FCOE_ETH_CID(bp)) if (cid == BNX2X_FCOE_ETH_CID(bp))
return &bnx2x_fcoe(bp, q_obj); return &bnx2x_fcoe_sp_obj(bp, q_obj);
else else
#endif #endif
return &bnx2x_fp(bp, CID_TO_FP(cid, bp), q_obj); return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
} }
static void bnx2x_eq_int(struct bnx2x *bp) static void bnx2x_eq_int(struct bnx2x *bp)
...@@ -5667,8 +5667,8 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) ...@@ -5667,8 +5667,8 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
cids[cos] = fp->txdata_ptr[cos]->cid; cids[cos] = fp->txdata_ptr[cos]->cid;
} }
bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
BP_FUNC(bp), bnx2x_sp(bp, q_rdata), fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type); bnx2x_sp_mapping(bp, q_rdata), q_type);
/** /**
...@@ -7596,8 +7596,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) ...@@ -7596,8 +7596,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags); __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
/* Eth MAC is set on RSS leading client (fp[0]) */ /* Eth MAC is set on RSS leading client (fp[0]) */
return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
BNX2X_ETH_MAC, &ramrod_flags); set, BNX2X_ETH_MAC, &ramrod_flags);
} }
int bnx2x_setup_leading(struct bnx2x *bp) int bnx2x_setup_leading(struct bnx2x *bp)
...@@ -7877,7 +7877,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -7877,7 +7877,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
IGU_INT_ENABLE, 0); IGU_INT_ENABLE, 0);
q_params.q_obj = &fp->q_obj; q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */ /* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
...@@ -7950,7 +7950,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) ...@@ -7950,7 +7950,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
q_params.q_obj = &fp->q_obj; q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */ /* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
...@@ -8339,12 +8339,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) ...@@ -8339,12 +8339,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
usleep_range(1000, 1000); usleep_range(1000, 1000);
/* Clean all ETH MACs */ /* Clean all ETH MACs */
rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
false);
if (rc < 0) if (rc < 0)
BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
/* Clean up UC list */ /* Clean up UC list */
rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
true); true);
if (rc < 0) if (rc < 0)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
...@@ -11049,7 +11050,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp) ...@@ -11049,7 +11050,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
int rc; int rc;
struct net_device *dev = bp->dev; struct net_device *dev = bp->dev;
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;
struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
unsigned long ramrod_flags = 0; unsigned long ramrod_flags = 0;
/* First schedule a cleanup up of old configuration */ /* First schedule a cleanup up of old configuration */
......
...@@ -859,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) ...@@ -859,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
struct tstorm_per_queue_stats *tclient = struct tstorm_per_queue_stats *tclient =
&bp->fw_stats_data->queue_stats[i]. &bp->fw_stats_data->queue_stats[i].
tstorm_queue_statistics; tstorm_queue_statistics;
struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; struct tstorm_per_queue_stats *old_tclient =
&bnx2x_fp_stats(bp, fp)->old_tclient;
struct ustorm_per_queue_stats *uclient = struct ustorm_per_queue_stats *uclient =
&bp->fw_stats_data->queue_stats[i]. &bp->fw_stats_data->queue_stats[i].
ustorm_queue_statistics; ustorm_queue_statistics;
struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; struct ustorm_per_queue_stats *old_uclient =
&bnx2x_fp_stats(bp, fp)->old_uclient;
struct xstorm_per_queue_stats *xclient = struct xstorm_per_queue_stats *xclient =
&bp->fw_stats_data->queue_stats[i]. &bp->fw_stats_data->queue_stats[i].
xstorm_queue_statistics; xstorm_queue_statistics;
struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; struct xstorm_per_queue_stats *old_xclient =
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; &bnx2x_fp_stats(bp, fp)->old_xclient;
struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; struct bnx2x_eth_q_stats *qstats =
&bnx2x_fp_stats(bp, fp)->eth_q_stats;
struct bnx2x_eth_q_stats_old *qstats_old =
&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
u32 diff; u32 diff;
...@@ -1052,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) ...@@ -1052,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
tmp = estats->mac_discard; tmp = estats->mac_discard;
for_each_rx_queue(bp, i) for_each_rx_queue(bp, i) {
tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); struct tstorm_per_queue_stats *old_tclient =
&bp->fp_stats[i].old_tclient;
tmp += le32_to_cpu(old_tclient->checksum_discard);
}
nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
nstats->tx_dropped = 0; nstats->tx_dropped = 0;
...@@ -1103,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) ...@@ -1103,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
int i; int i;
for_each_queue(bp, i) { for_each_queue(bp, i) {
struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
struct bnx2x_eth_q_stats_old *qstats_old = struct bnx2x_eth_q_stats_old *qstats_old =
&bp->fp[i].eth_q_stats_old; &bp->fp_stats[i].eth_q_stats_old;
UPDATE_ESTAT_QSTAT(driver_xoff); UPDATE_ESTAT_QSTAT(driver_xoff);
UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
...@@ -1483,15 +1491,19 @@ void bnx2x_stats_init(struct bnx2x *bp) ...@@ -1483,15 +1491,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
/* function stats */ /* function stats */
for_each_queue(bp, i) { for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i]; struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); memset(&fp_stats->old_tclient, 0,
memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); sizeof(fp_stats->old_tclient));
memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); memset(&fp_stats->old_uclient, 0,
sizeof(fp_stats->old_uclient));
memset(&fp_stats->old_xclient, 0,
sizeof(fp_stats->old_xclient));
if (bp->stats_init) { if (bp->stats_init) {
memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); memset(&fp_stats->eth_q_stats, 0,
memset(&fp->eth_q_stats_old, 0, sizeof(fp_stats->eth_q_stats));
sizeof(fp->eth_q_stats_old)); memset(&fp_stats->eth_q_stats_old, 0,
sizeof(fp_stats->eth_q_stats_old));
} }
} }
...@@ -1533,8 +1545,10 @@ void bnx2x_save_statistics(struct bnx2x *bp) ...@@ -1533,8 +1545,10 @@ void bnx2x_save_statistics(struct bnx2x *bp)
/* save queue statistics */ /* save queue statistics */
for_each_eth_queue(bp, i) { for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i]; struct bnx2x_fastpath *fp = &bp->fp[i];
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; struct bnx2x_eth_q_stats *qstats =
struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; &bnx2x_fp_stats(bp, fp)->eth_q_stats;
struct bnx2x_eth_q_stats_old *qstats_old =
&bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
...@@ -1590,8 +1604,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, ...@@ -1590,8 +1604,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
memset(afex_stats, 0, sizeof(struct afex_stats)); memset(afex_stats, 0, sizeof(struct afex_stats));
for_each_eth_queue(bp, i) { for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i]; struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
ADD_64(afex_stats->rx_unicast_bytes_hi, ADD_64(afex_stats->rx_unicast_bytes_hi,
qstats->total_unicast_bytes_received_hi, qstats->total_unicast_bytes_received_hi,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment