Commit 924d75ab authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

bnx2x: reorganization and beautification

Slightly changes the bnx2x code without `true' functional changes.
Changes include:
 1. Gathering macros into a single macro when combination is used multiple
    times.
 2. Exporting parts of functions into their own functions.
 3. Return values after if-else instead of only on the else condition
    (where current flow would simply return same value later in the code)
 4. Removing some unnecessary code (either dead-code or incorrect conditions)
Signed-off-by: default avatarYuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: default avatarAriel Elior <ariele@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 2de67439
...@@ -417,8 +417,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, ...@@ -417,8 +417,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash); tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
if (fp->mode == TPA_MODE_GRO) { if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page = tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
tpa_info->gro_size = gro_size; tpa_info->gro_size = gro_size;
} }
...@@ -499,7 +498,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, ...@@ -499,7 +498,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
} }
mapping = dma_map_page(&bp->pdev->dev, page, 0, mapping = dma_map_page(&bp->pdev->dev, page, 0,
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); SGE_PAGES, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT); __free_pages(page, PAGES_PER_SGE_SHIFT);
BNX2X_ERR("Can't map sge\n"); BNX2X_ERR("Can't map sge\n");
...@@ -541,7 +540,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -541,7 +540,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
le16_to_cpu(cqe->pkt_len)); le16_to_cpu(cqe->pkt_len));
#ifdef BNX2X_STOP_ON_ERROR #ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
pages, cqe_idx); pages, cqe_idx);
BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len); BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
...@@ -559,8 +558,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -559,8 +558,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (fp->mode == TPA_MODE_GRO) if (fp->mode == TPA_MODE_GRO)
frag_len = min_t(u32, frag_size, (u32)full_page); frag_len = min_t(u32, frag_size, (u32)full_page);
else /* LRO */ else /* LRO */
frag_len = min_t(u32, frag_size, frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
(u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
rx_pg = &fp->rx_page_ring[sge_idx]; rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg; old_rx_pg = *rx_pg;
...@@ -576,7 +574,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -576,7 +574,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Unmap the page as we r going to pass it to the stack */ /* Unmap the page as we r going to pass it to the stack */
dma_unmap_page(&bp->pdev->dev, dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping), dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); SGE_PAGES, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */ /* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO) if (fp->mode == TPA_MODE_LRO)
skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
...@@ -594,7 +592,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, ...@@ -594,7 +592,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
} }
skb->data_len += frag_len; skb->data_len += frag_len;
skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE; skb->truesize += SGE_PAGES;
skb->len += frag_len; skb->len += frag_len;
frag_size -= frag_len; frag_size -= frag_len;
...@@ -2500,12 +2498,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) ...@@ -2500,12 +2498,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD; bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
/* Set the initial link reported state to link down */
bnx2x_acquire_phy_lock(bp);
memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link)); memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
__set_bit(BNX2X_LINK_REPORT_LINK_DOWN, __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
&bp->last_reported_link.link_report_flags); &bp->last_reported_link.link_report_flags);
bnx2x_release_phy_lock(bp);
if (IS_PF(bp)) if (IS_PF(bp))
/* must be called before memory allocation and HW init */ /* must be called before memory allocation and HW init */
...@@ -3346,12 +3341,11 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, ...@@ -3346,12 +3341,11 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
} else }
/* We support checksum offload for TCP and UDP only. /* We support checksum offload for TCP and UDP only.
* No need to pass the UDP header length - it's a constant. * No need to pass the UDP header length - it's a constant.
*/ */
return skb_transport_header(skb) + return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
sizeof(struct udphdr) - skb->data;
} }
static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
......
...@@ -403,7 +403,7 @@ void bnx2x_set_rx_mode(struct net_device *dev); ...@@ -403,7 +403,7 @@ void bnx2x_set_rx_mode(struct net_device *dev);
* If bp->state is OPEN, should be called with * If bp->state is OPEN, should be called with
* netif_addr_lock_bh(). * netif_addr_lock_bh().
*/ */
void bnx2x_set_storm_rx_mode(struct bnx2x *bp); int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
/** /**
* bnx2x_set_q_rx_mode - configures rx_mode for a single queue. * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
...@@ -415,11 +415,11 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp); ...@@ -415,11 +415,11 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
* @tx_accept_flags: tx accept configuration (tx switch) * @tx_accept_flags: tx accept configuration (tx switch)
* @ramrod_flags: ramrod configuration * @ramrod_flags: ramrod configuration
*/ */
void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
unsigned long rx_mode_flags, unsigned long rx_mode_flags,
unsigned long rx_accept_flags, unsigned long rx_accept_flags,
unsigned long tx_accept_flags, unsigned long tx_accept_flags,
unsigned long ramrod_flags); unsigned long ramrod_flags);
/* Parity errors related */ /* Parity errors related */
void bnx2x_set_pf_load(struct bnx2x *bp); void bnx2x_set_pf_load(struct bnx2x *bp);
...@@ -821,7 +821,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, ...@@ -821,7 +821,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
return; return;
dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE); SGE_PAGES, DMA_FROM_DEVICE);
__free_pages(page, PAGES_PER_SGE_SHIFT); __free_pages(page, PAGES_PER_SGE_SHIFT);
sw_buf->page = NULL; sw_buf->page = NULL;
......
...@@ -3002,9 +3002,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) ...@@ -3002,9 +3002,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
"rss re-configured, UDP 4-tupple %s\n", "rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled"); udp_rss_requested ? "enabled" : "disabled");
return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
} else {
return 0;
} }
return 0;
case IPV4_FLOW: case IPV4_FLOW:
case IPV6_FLOW: case IPV6_FLOW:
/* For IP only 2-tupple hash is supported */ /* For IP only 2-tupple hash is supported */
...@@ -3012,9 +3012,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) ...@@ -3012,9 +3012,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL, DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n"); "Command parameters not supported\n");
return -EINVAL; return -EINVAL;
} else {
return 0;
} }
return 0;
case SCTP_V4_FLOW: case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW: case AH_ESP_V4_FLOW:
case AH_V4_FLOW: case AH_V4_FLOW:
...@@ -3030,9 +3030,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) ...@@ -3030,9 +3030,9 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL, DP(BNX2X_MSG_ETHTOOL,
"Command parameters not supported\n"); "Command parameters not supported\n");
return -EINVAL; return -EINVAL;
} else {
return 0;
} }
return 0;
default: default:
return -EINVAL; return -EINVAL;
} }
......
...@@ -3034,15 +3034,12 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp, ...@@ -3034,15 +3034,12 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
pause->sge_th_hi + FW_PREFETCH_CNT > pause->sge_th_hi + FW_PREFETCH_CNT >
MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES); MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
tpa_agg_size = min_t(u32, tpa_agg_size = TPA_AGG_SIZE;
(min_t(u32, 8, MAX_SKB_FRAGS) *
SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >> max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
SGE_PAGE_SHIFT; SGE_PAGE_SHIFT;
max_sge = ((max_sge + PAGES_PER_SGE - 1) & max_sge = ((max_sge + PAGES_PER_SGE - 1) &
(~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT; (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE, sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
0xffff);
} }
/* pause - not for e1 */ /* pause - not for e1 */
...@@ -5673,13 +5670,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp) ...@@ -5673,13 +5670,12 @@ static void bnx2x_init_eq_ring(struct bnx2x *bp)
min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1); min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
} }
/* called with netif_addr_lock_bh() */ /* called with netif_addr_lock_bh() */
void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
unsigned long rx_mode_flags, unsigned long rx_mode_flags,
unsigned long rx_accept_flags, unsigned long rx_accept_flags,
unsigned long tx_accept_flags, unsigned long tx_accept_flags,
unsigned long ramrod_flags) unsigned long ramrod_flags)
{ {
struct bnx2x_rx_mode_ramrod_params ramrod_param; struct bnx2x_rx_mode_ramrod_params ramrod_param;
int rc; int rc;
...@@ -5709,22 +5705,21 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id, ...@@ -5709,22 +5705,21 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
rc = bnx2x_config_rx_mode(bp, &ramrod_param); rc = bnx2x_config_rx_mode(bp, &ramrod_param);
if (rc < 0) { if (rc < 0) {
BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode); BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
return; return rc;
} }
return 0;
} }
/* called with netif_addr_lock_bh() */ int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
void bnx2x_set_storm_rx_mode(struct bnx2x *bp) unsigned long *rx_accept_flags,
unsigned long *tx_accept_flags)
{ {
unsigned long rx_mode_flags = 0, ramrod_flags = 0; /* Clear the flags first */
unsigned long rx_accept_flags = 0, tx_accept_flags = 0; *rx_accept_flags = 0;
*tx_accept_flags = 0;
if (!NO_FCOE(bp))
/* Configure rx_mode of FCoE Queue */
__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
switch (bp->rx_mode) { switch (rx_mode) {
case BNX2X_RX_MODE_NONE: case BNX2X_RX_MODE_NONE:
/* /*
* 'drop all' supersedes any accept flags that may have been * 'drop all' supersedes any accept flags that may have been
...@@ -5732,25 +5727,25 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) ...@@ -5732,25 +5727,25 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
*/ */
break; break;
case BNX2X_RX_MODE_NORMAL: case BNX2X_RX_MODE_NORMAL:
__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */ /* internal switching mode */
__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break; break;
case BNX2X_RX_MODE_ALLMULTI: case BNX2X_RX_MODE_ALLMULTI:
__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */ /* internal switching mode */
__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
break; break;
case BNX2X_RX_MODE_PROMISC: case BNX2X_RX_MODE_PROMISC:
...@@ -5758,36 +5753,57 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp) ...@@ -5758,36 +5753,57 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
* should receive matched and unmatched (in resolution of port) * should receive matched and unmatched (in resolution of port)
* unicast packets. * unicast packets.
*/ */
__set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
/* internal switching mode */ /* internal switching mode */
__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
if (IS_MF_SI(bp)) if (IS_MF_SI(bp))
__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
else else
__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
break; break;
default: default:
BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode); BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
return; return -EINVAL;
} }
/* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
if (bp->rx_mode != BNX2X_RX_MODE_NONE) { if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags); __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
__set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags); __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
} }
return 0;
}
/* called with netif_addr_lock_bh() */
int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
{
unsigned long rx_mode_flags = 0, ramrod_flags = 0;
unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
int rc;
if (!NO_FCOE(bp))
/* Configure rx_mode of FCoE Queue */
__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
&tx_accept_flags);
if (rc)
return rc;
__set_bit(RAMROD_RX, &ramrod_flags); __set_bit(RAMROD_RX, &ramrod_flags);
__set_bit(RAMROD_TX, &ramrod_flags); __set_bit(RAMROD_TX, &ramrod_flags);
bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags, return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
tx_accept_flags, ramrod_flags); rx_accept_flags, tx_accept_flags,
ramrod_flags);
} }
static void bnx2x_init_internal_common(struct bnx2x *bp) static void bnx2x_init_internal_common(struct bnx2x *bp)
...@@ -9539,36 +9555,6 @@ u32 bnx2x_get_pretend_reg(struct bnx2x *bp) ...@@ -9539,36 +9555,6 @@ u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
return base + (BP_ABS_FUNC(bp)) * stride; return base + (BP_ABS_FUNC(bp)) * stride;
} }
static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
{
u32 reg = bnx2x_get_pretend_reg(bp);
/* Flush all outstanding writes */
mmiowb();
/* Pretend to be function 0 */
REG_WR(bp, reg, 0);
REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
/* From now we are in the "like-E1" mode */
bnx2x_int_disable(bp);
/* Flush all outstanding writes */
mmiowb();
/* Restore the original function */
REG_WR(bp, reg, BP_ABS_FUNC(bp));
REG_RD(bp, reg);
}
static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
{
if (CHIP_IS_E1(bp))
bnx2x_int_disable(bp);
else
bnx2x_undi_int_disable_e1h(bp);
}
static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
struct bnx2x_mac_vals *vals) struct bnx2x_mac_vals *vals)
{ {
...@@ -9856,7 +9842,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) ...@@ -9856,7 +9842,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* Check if the UNDI driver was previously loaded /* Check if the UNDI driver was previously loaded
* UNDI driver initializes CID offset for normal bell to 0x7 * UNDI driver initializes CID offset for normal bell to 0x7
*/ */
reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) { if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST); tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
if (tmp_reg == 0x7) { if (tmp_reg == 0x7) {
......
...@@ -2191,7 +2191,7 @@ static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid, ...@@ -2191,7 +2191,7 @@ static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
} }
static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
unsigned long accept_flags, unsigned long *accept_flags,
struct eth_filter_rules_cmd *cmd, struct eth_filter_rules_cmd *cmd,
bool clear_accept_all) bool clear_accept_all)
{ {
...@@ -2201,33 +2201,33 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp, ...@@ -2201,33 +2201,33 @@ static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL | state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
if (accept_flags) { if (test_bit(BNX2X_ACCEPT_UNICAST, accept_flags))
if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags)) state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags)) if (test_bit(BNX2X_ACCEPT_MULTICAST, accept_flags))
state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) { if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, accept_flags)) {
state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL; state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
} }
if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) { if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, accept_flags)) {
state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL; state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
} }
if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) { if (test_bit(BNX2X_ACCEPT_BROADCAST, accept_flags))
state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL; state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
} if (test_bit(BNX2X_ACCEPT_UNMATCHED, accept_flags)) {
if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags)) state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN; state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
} }
if (test_bit(BNX2X_ACCEPT_ANY_VLAN, accept_flags))
state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */ /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
if (clear_accept_all) { if (clear_accept_all) {
state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL; state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
...@@ -2260,8 +2260,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, ...@@ -2260,8 +2260,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data = data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD; ETH_FILTER_RULES_CMD_TX_CMD;
bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
&(data->rules[rule_idx++]), false); &(data->rules[rule_idx++]),
false);
} }
/* Rx */ /* Rx */
...@@ -2272,8 +2273,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, ...@@ -2272,8 +2273,9 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data = data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD; ETH_FILTER_RULES_CMD_RX_CMD;
bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
&(data->rules[rule_idx++]), false); &(data->rules[rule_idx++]),
false);
} }
...@@ -2293,9 +2295,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, ...@@ -2293,9 +2295,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data = data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_TX_CMD; ETH_FILTER_RULES_CMD_TX_CMD;
bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags, bnx2x_rx_mode_set_cmd_state_e2(bp, &p->tx_accept_flags,
&(data->rules[rule_idx++]), &(data->rules[rule_idx]),
true); true);
rule_idx++;
} }
/* Rx */ /* Rx */
...@@ -2306,9 +2309,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp, ...@@ -2306,9 +2309,10 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->rules[rule_idx].cmd_general_data = data->rules[rule_idx].cmd_general_data =
ETH_FILTER_RULES_CMD_RX_CMD; ETH_FILTER_RULES_CMD_RX_CMD;
bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags, bnx2x_rx_mode_set_cmd_state_e2(bp, &p->rx_accept_flags,
&(data->rules[rule_idx++]), &(data->rules[rule_idx]),
true); true);
rule_idx++;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment