Commit fa0d25f9 authored by Arend van Spriel's avatar Arend van Spriel Committed by Kleber Sacilotto de Souza

brcmfmac: revise handling events in receive path

BugLink: https://bugs.launchpad.net/bugs/1832661

commit 9c349892 upstream.

Move event handling out of brcmf_netif_rx() avoiding the need
to pass a flag. This flag is only ever true for USB hosts as
other interface use separate brcmf_rx_event() function.
Reviewed-by: default avatarHante Meuleman <hante.meuleman@broadcom.com>
Reviewed-by: default avatarPieter-Paul Giesberts <pieter-paul.giesberts@broadcom.com>
Reviewed-by: default avatarFranky Lin <franky.lin@broadcom.com>
Signed-off-by: default avatarArend van Spriel <arend@broadcom.com>
Signed-off-by: default avatarKalle Valo <kvalo@codeaurora.org>
[bwh: Backported to 4.4 as dependency of commit a4176ec3
 "brcmfmac: add subtype check for event handling in data path"
 - Adjust filenames, context]
Signed-off-by: default avatarBen Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarKhalid Elmously <khalid.elmously@canonical.com>
Signed-off-by: default avatarKleber Sacilotto de Souza <kleber.souza@canonical.com>
parent 08bafd94
...@@ -214,7 +214,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt, ...@@ -214,7 +214,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
int prec); int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */ /* Receive frame for delivery to OS. Callee disposes of rxp. */
void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_evnt); void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
/* Receive async event packet from firmware. Callee disposes of rxp. */ /* Receive async event packet from firmware. Callee disposes of rxp. */
void brcmf_rx_event(struct device *dev, struct sk_buff *rxp); void brcmf_rx_event(struct device *dev, struct sk_buff *rxp);
......
...@@ -301,18 +301,11 @@ void brcmf_txflowblock(struct device *dev, bool state) ...@@ -301,18 +301,11 @@ void brcmf_txflowblock(struct device *dev, bool state)
brcmf_fws_bus_blocked(drvr, state); brcmf_fws_bus_blocked(drvr, state);
} }
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
bool handle_event)
{ {
skb->protocol = eth_type_trans(skb, ifp->ndev);
if (skb->pkt_type == PACKET_MULTICAST) if (skb->pkt_type == PACKET_MULTICAST)
ifp->stats.multicast++; ifp->stats.multicast++;
/* Process special event packets */
if (handle_event)
brcmf_fweh_process_skb(ifp->drvr, skb);
if (!(ifp->ndev->flags & IFF_UP)) { if (!(ifp->ndev->flags & IFF_UP)) {
brcmu_pkt_buf_free_skb(skb); brcmu_pkt_buf_free_skb(skb);
return; return;
...@@ -372,7 +365,7 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, ...@@ -372,7 +365,7 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
/* validate flags and flow id */ /* validate flags and flow id */
if (flags == 0xFF) { if (flags == 0xFF) {
brcmf_err("invalid flags...so ignore this packet\n"); brcmf_err("invalid flags...so ignore this packet\n");
brcmf_netif_rx(ifp, pkt, false); brcmf_netif_rx(ifp, pkt);
return; return;
} }
...@@ -384,7 +377,7 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, ...@@ -384,7 +377,7 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
if (rfi == NULL) { if (rfi == NULL) {
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n", brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
flow_id); flow_id);
brcmf_netif_rx(ifp, pkt, false); brcmf_netif_rx(ifp, pkt);
return; return;
} }
...@@ -409,7 +402,7 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, ...@@ -409,7 +402,7 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
rfi = kzalloc(buf_size, GFP_ATOMIC); rfi = kzalloc(buf_size, GFP_ATOMIC);
if (rfi == NULL) { if (rfi == NULL) {
brcmf_err("failed to alloc buffer\n"); brcmf_err("failed to alloc buffer\n");
brcmf_netif_rx(ifp, pkt, false); brcmf_netif_rx(ifp, pkt);
return; return;
} }
...@@ -523,11 +516,11 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data, ...@@ -523,11 +516,11 @@ static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
netif_rx: netif_rx:
skb_queue_walk_safe(&reorder_list, pkt, pnext) { skb_queue_walk_safe(&reorder_list, pkt, pnext) {
__skb_unlink(pkt, &reorder_list); __skb_unlink(pkt, &reorder_list);
brcmf_netif_rx(ifp, pkt, false); brcmf_netif_rx(ifp, pkt);
} }
} }
void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_evnt) void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
{ {
struct brcmf_if *ifp; struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev); struct brcmf_bus *bus_if = dev_get_drvdata(dev);
...@@ -547,11 +540,18 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_evnt) ...@@ -547,11 +540,18 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_evnt)
return; return;
} }
skb->protocol = eth_type_trans(skb, ifp->ndev);
rd = (struct brcmf_skb_reorder_data *)skb->cb; rd = (struct brcmf_skb_reorder_data *)skb->cb;
if (rd->reorder) if (rd->reorder) {
brcmf_rxreorder_process_info(ifp, rd->reorder, skb); brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
else } else {
brcmf_netif_rx(ifp, skb, handle_evnt); /* Process special event packets */
if (handle_event)
brcmf_fweh_process_skb(ifp->drvr, skb);
brcmf_netif_rx(ifp, skb);
}
} }
void brcmf_rx_event(struct device *dev, struct sk_buff *skb) void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
......
...@@ -215,8 +215,7 @@ int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr); ...@@ -215,8 +215,7 @@ int brcmf_get_next_free_bsscfgidx(struct brcmf_pub *drvr);
void brcmf_txflowblock_if(struct brcmf_if *ifp, void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state); enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
bool handle_event);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
#endif /* BRCMFMAC_CORE_H */ #endif /* BRCMFMAC_CORE_H */
...@@ -1155,7 +1155,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) ...@@ -1155,7 +1155,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
brcmu_pkt_buf_free_skb(skb); brcmu_pkt_buf_free_skb(skb);
return; return;
} }
brcmf_netif_rx(ifp, skb, false); brcmf_netif_rx(ifp, skb);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment