Commit d71a756a authored by David S. Miller's avatar David S. Miller

Merge branch 'dsa-ACB-for-bcm_sf2-and-bcmsysport'

Florian Fainelli says:

====================
Enable ACB for bcm_sf2 and bcmsysport

This patch series enables Broadcom's Advanced Congestion Buffering mechanism
which requires cooperation between the CPU/Management Ethernet MAC controller
and the switch.

I took the notifier approach because ultimately the information we need to
carry to the master network device is DSA specific and I saw little room for
generalizing beyond what DSA requires. Chances are that this is highly specific
to the Broadcom HW as I don't know of any HW out there that supports something
nearly similar for similar or identical needs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3f7832c2 723934fb
...@@ -205,6 +205,19 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, ...@@ -205,6 +205,19 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
if (port == priv->moca_port) if (port == priv->moca_port)
bcm_sf2_port_intr_enable(priv, port); bcm_sf2_port_intr_enable(priv, port);
/* Set per-queue pause threshold to 32 */
core_writel(priv, 32, CORE_TXQ_THD_PAUSE_QN_PORT(port));
/* Set ACB threshold to 24 */
for (i = 0; i < SF2_NUM_EGRESS_QUEUES; i++) {
reg = acb_readl(priv, ACB_QUEUE_CFG(port *
SF2_NUM_EGRESS_QUEUES + i));
reg &= ~XOFF_THRESHOLD_MASK;
reg |= 24;
acb_writel(priv, reg, ACB_QUEUE_CFG(port *
SF2_NUM_EGRESS_QUEUES + i));
}
return b53_enable_port(ds, port, phy); return b53_enable_port(ds, port, phy);
} }
...@@ -613,6 +626,20 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, ...@@ -613,6 +626,20 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
status->pause = 1; status->pause = 1;
} }
static void bcm_sf2_enable_acb(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg;
/* Enable ACB globally */
reg = acb_readl(priv, ACB_CONTROL);
reg |= (ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
acb_writel(priv, reg, ACB_CONTROL);
reg &= ~(ACB_FLUSH_MASK << ACB_FLUSH_SHIFT);
reg |= ACB_EN | ACB_ALGORITHM;
acb_writel(priv, reg, ACB_CONTROL);
}
static int bcm_sf2_sw_suspend(struct dsa_switch *ds) static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
{ {
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
...@@ -655,6 +682,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) ...@@ -655,6 +682,8 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
bcm_sf2_imp_setup(ds, port); bcm_sf2_imp_setup(ds, port);
} }
bcm_sf2_enable_acb(ds);
return 0; return 0;
} }
...@@ -766,6 +795,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) ...@@ -766,6 +795,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
} }
bcm_sf2_sw_configure_vlan(ds); bcm_sf2_sw_configure_vlan(ds);
bcm_sf2_enable_acb(ds);
return 0; return 0;
} }
......
...@@ -115,6 +115,24 @@ enum bcm_sf2_reg_offs { ...@@ -115,6 +115,24 @@ enum bcm_sf2_reg_offs {
#define P7_IRQ_OFF 0 #define P7_IRQ_OFF 0
#define P_IRQ_OFF(x) ((6 - (x)) * P_NUM_IRQ) #define P_IRQ_OFF(x) ((6 - (x)) * P_NUM_IRQ)
/* Register set relative to 'ACB' */
#define ACB_CONTROL 0x00
#define ACB_EN (1 << 0)
#define ACB_ALGORITHM (1 << 1)
#define ACB_FLUSH_SHIFT 2
#define ACB_FLUSH_MASK 0x3
#define ACB_QUEUE_0_CFG 0x08
#define XOFF_THRESHOLD_MASK 0x7ff
#define XON_EN (1 << 11)
#define TOTAL_XOFF_THRESHOLD_SHIFT 12
#define TOTAL_XOFF_THRESHOLD_MASK 0x7ff
#define TOTAL_XOFF_EN (1 << 23)
#define TOTAL_XON_EN (1 << 24)
#define PKTLEN_SHIFT 25
#define PKTLEN_MASK 0x3f
#define ACB_QUEUE_CFG(x) (ACB_QUEUE_0_CFG + ((x) * 0x4))
/* Register set relative to 'CORE' */ /* Register set relative to 'CORE' */
#define CORE_G_PCTL_PORT0 0x00000 #define CORE_G_PCTL_PORT0 0x00000
#define CORE_G_PCTL_PORT(x) (CORE_G_PCTL_PORT0 + (x * 0x4)) #define CORE_G_PCTL_PORT(x) (CORE_G_PCTL_PORT0 + (x * 0x4))
...@@ -237,6 +255,11 @@ enum bcm_sf2_reg_offs { ...@@ -237,6 +255,11 @@ enum bcm_sf2_reg_offs {
#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8)) #define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
#define PORT_VLAN_CTRL_MASK 0x1ff #define PORT_VLAN_CTRL_MASK 0x1ff
#define CORE_TXQ_THD_PAUSE_QN_PORT_0 0x2c80
#define TXQ_PAUSE_THD_MASK 0x7ff
#define CORE_TXQ_THD_PAUSE_QN_PORT(x) (CORE_TXQ_THD_PAUSE_QN_PORT_0 + \
(x) * 0x8)
#define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8)) #define CORE_DEFAULT_1Q_TAG_P(x) (0xd040 + ((x) * 8))
#define CFI_SHIFT 12 #define CFI_SHIFT 12
#define PRI_SHIFT 13 #define PRI_SHIFT 13
......
...@@ -1416,9 +1416,20 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, ...@@ -1416,9 +1416,20 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index)); tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index)); tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
/* Configure QID and port mapping */
reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
reg |= ring->switch_queue & RING_QID_MASK;
reg |= ring->switch_port << RING_PORT_ID_SHIFT;
tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index)); tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
/* Enable ACB algorithm 2 */
reg = tdma_readl(priv, TDMA_CONTROL);
reg |= tdma_control_bit(priv, ACB_ALGO);
tdma_writel(priv, reg, TDMA_CONTROL);
/* Do not use tdma_control_bit() here because TSB_SWAP1 collides /* Do not use tdma_control_bit() here because TSB_SWAP1 collides
* with the original definition of ACB_ALGO * with the original definition of ACB_ALGO
*/ */
...@@ -1447,8 +1458,9 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, ...@@ -1447,8 +1458,9 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
napi_enable(&ring->napi); napi_enable(&ring->napi);
netif_dbg(priv, hw, priv->netdev, netif_dbg(priv, hw, priv->netdev,
"TDMA cfg, size=%d, desc_cpu=%p\n", "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
ring->size, ring->desc_cpu); ring->size, ring->desc_cpu, ring->switch_queue,
ring->switch_port);
return 0; return 0;
} }
...@@ -2011,6 +2023,92 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = { ...@@ -2011,6 +2023,92 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings,
}; };
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv,
select_queue_fallback_t fallback)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
u16 queue = skb_get_queue_mapping(skb);
struct bcm_sysport_tx_ring *tx_ring;
unsigned int q, port;
if (!netdev_uses_dsa(dev))
return fallback(dev, skb);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
port = BRCM_TAG_GET_PORT(queue);
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
return tx_ring->index;
}
static int bcm_sysport_map_queues(struct net_device *dev,
struct dsa_notifier_register_info *info)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
struct net_device *slave_dev;
unsigned int num_tx_queues;
unsigned int q, start, port;
/* We can't be setting up queue inspection for non directly attached
* switches
*/
if (info->switch_number)
return 0;
port = info->port_number;
slave_dev = info->info.dev;
/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
* 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
* per-port (slave_dev) network devices queue, we achieve just that.
* This need to happen now before any slave network device is used such
* it accurately reflects the number of real TX queues.
*/
if (priv->is_lite)
netif_set_real_num_tx_queues(slave_dev,
slave_dev->num_tx_queues / 2);
num_tx_queues = slave_dev->real_num_tx_queues;
if (priv->per_port_num_tx_queues &&
priv->per_port_num_tx_queues != num_tx_queues)
netdev_warn(slave_dev, "asymetric number of per-port queues\n");
priv->per_port_num_tx_queues = num_tx_queues;
start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
for (q = 0; q < num_tx_queues; q++) {
ring = &priv->tx_rings[q + start];
/* Just remember the mapping actual programming done
* during bcm_sysport_init_tx_ring
*/
ring->switch_queue = q;
ring->switch_port = port;
priv->ring_map[q + port * num_tx_queues] = ring;
/* Set all queues as being used now */
set_bit(q + start, &priv->queue_bitmap);
}
return 0;
}
static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct dsa_notifier_register_info *info;
if (event != DSA_PORT_REGISTER)
return NOTIFY_DONE;
info = ptr;
return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
}
static const struct net_device_ops bcm_sysport_netdev_ops = { static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_start_xmit = bcm_sysport_xmit, .ndo_start_xmit = bcm_sysport_xmit,
.ndo_tx_timeout = bcm_sysport_tx_timeout, .ndo_tx_timeout = bcm_sysport_tx_timeout,
...@@ -2023,6 +2121,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { ...@@ -2023,6 +2121,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_poll_controller = bcm_sysport_poll_controller, .ndo_poll_controller = bcm_sysport_poll_controller,
#endif #endif
.ndo_get_stats64 = bcm_sysport_get_stats64, .ndo_get_stats64 = bcm_sysport_get_stats64,
.ndo_select_queue = bcm_sysport_select_queue,
}; };
#define REV_FMT "v%2x.%02x" #define REV_FMT "v%2x.%02x"
...@@ -2172,10 +2271,18 @@ static int bcm_sysport_probe(struct platform_device *pdev) ...@@ -2172,10 +2271,18 @@ static int bcm_sysport_probe(struct platform_device *pdev)
u64_stats_init(&priv->syncp); u64_stats_init(&priv->syncp);
priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
ret = register_dsa_notifier(&priv->dsa_notifier);
if (ret) {
dev_err(&pdev->dev, "failed to register DSA notifier\n");
goto err_deregister_fixed_link;
}
ret = register_netdev(dev); ret = register_netdev(dev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "failed to register net_device\n"); dev_err(&pdev->dev, "failed to register net_device\n");
goto err_deregister_fixed_link; goto err_deregister_notifier;
} }
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
...@@ -2188,6 +2295,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) ...@@ -2188,6 +2295,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0; return 0;
err_deregister_notifier:
unregister_dsa_notifier(&priv->dsa_notifier);
err_deregister_fixed_link: err_deregister_fixed_link:
if (of_phy_is_fixed_link(dn)) if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn); of_phy_deregister_fixed_link(dn);
...@@ -2199,11 +2308,13 @@ static int bcm_sysport_probe(struct platform_device *pdev) ...@@ -2199,11 +2308,13 @@ static int bcm_sysport_probe(struct platform_device *pdev)
static int bcm_sysport_remove(struct platform_device *pdev) static int bcm_sysport_remove(struct platform_device *pdev)
{ {
struct net_device *dev = dev_get_drvdata(&pdev->dev); struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct device_node *dn = pdev->dev.of_node; struct device_node *dn = pdev->dev.of_node;
/* Not much to do, ndo_close has been called /* Not much to do, ndo_close has been called
* and we use managed allocations * and we use managed allocations
*/ */
unregister_dsa_notifier(&priv->dsa_notifier);
unregister_netdev(dev); unregister_netdev(dev);
if (of_phy_is_fixed_link(dn)) if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn); of_phy_deregister_fixed_link(dn);
......
...@@ -404,7 +404,7 @@ struct bcm_rsb { ...@@ -404,7 +404,7 @@ struct bcm_rsb {
#define RING_CONS_INDEX_MASK 0xffff #define RING_CONS_INDEX_MASK 0xffff
#define RING_MAPPING 0x14 #define RING_MAPPING 0x14
#define RING_QID_MASK 0x3 #define RING_QID_MASK 0x7
#define RING_PORT_ID_SHIFT 3 #define RING_PORT_ID_SHIFT 3
#define RING_PORT_ID_MASK 0x7 #define RING_PORT_ID_MASK 0x7
#define RING_IGNORE_STATUS (1 << 6) #define RING_IGNORE_STATUS (1 << 6)
...@@ -712,6 +712,8 @@ struct bcm_sysport_tx_ring { ...@@ -712,6 +712,8 @@ struct bcm_sysport_tx_ring {
struct bcm_sysport_priv *priv; /* private context backpointer */ struct bcm_sysport_priv *priv; /* private context backpointer */
unsigned long packets; /* packets statistics */ unsigned long packets; /* packets statistics */
unsigned long bytes; /* bytes statistics */ unsigned long bytes; /* bytes statistics */
unsigned int switch_queue; /* switch port queue number */
unsigned int switch_port; /* switch port queue number */
}; };
/* Driver private structure */ /* Driver private structure */
...@@ -765,5 +767,12 @@ struct bcm_sysport_priv { ...@@ -765,5 +767,12 @@ struct bcm_sysport_priv {
/* For atomic update generic 64bit value on 32bit Machine */ /* For atomic update generic 64bit value on 32bit Machine */
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
/* map information between switch port queues and local queues */
struct notifier_block dsa_notifier;
unsigned int per_port_num_tx_queues;
unsigned long queue_bitmap;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
}; };
#endif /* __BCM_SYSPORT_H */ #endif /* __BCM_SYSPORT_H */
...@@ -471,4 +471,54 @@ static inline int dsa_switch_resume(struct dsa_switch *ds) ...@@ -471,4 +471,54 @@ static inline int dsa_switch_resume(struct dsa_switch *ds)
} }
#endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_SLEEP */
enum dsa_notifier_type {
DSA_PORT_REGISTER,
DSA_PORT_UNREGISTER,
};
struct dsa_notifier_info {
struct net_device *dev;
};
struct dsa_notifier_register_info {
struct dsa_notifier_info info; /* must be first */
struct net_device *master;
unsigned int port_number;
unsigned int switch_number;
};
static inline struct net_device *
dsa_notifier_info_to_dev(const struct dsa_notifier_info *info)
{
return info->dev;
}
#if IS_ENABLED(CONFIG_NET_DSA)
int register_dsa_notifier(struct notifier_block *nb);
int unregister_dsa_notifier(struct notifier_block *nb);
int call_dsa_notifiers(unsigned long val, struct net_device *dev,
struct dsa_notifier_info *info);
#else
static inline int register_dsa_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int unregister_dsa_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev,
struct dsa_notifier_info *info)
{
return NOTIFY_DONE;
}
#endif
/* Broadcom tag specific helpers to insert and extract queue/port number */
#define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q)
#define BRCM_TAG_GET_PORT(v) ((v) >> 8)
#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
#endif #endif
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/notifier.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_mdio.h> #include <linux/of_mdio.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
...@@ -261,6 +262,28 @@ bool dsa_schedule_work(struct work_struct *work) ...@@ -261,6 +262,28 @@ bool dsa_schedule_work(struct work_struct *work)
return queue_work(dsa_owq, work); return queue_work(dsa_owq, work);
} }
static ATOMIC_NOTIFIER_HEAD(dsa_notif_chain);
int register_dsa_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&dsa_notif_chain, nb);
}
EXPORT_SYMBOL_GPL(register_dsa_notifier);
int unregister_dsa_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&dsa_notif_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_dsa_notifier);
int call_dsa_notifiers(unsigned long val, struct net_device *dev,
struct dsa_notifier_info *info)
{
info->dev = dev;
return atomic_notifier_call_chain(&dsa_notif_chain, val, info);
}
EXPORT_SYMBOL_GPL(call_dsa_notifiers);
static int __init dsa_init_module(void) static int __init dsa_init_module(void)
{ {
int rc; int rc;
......
...@@ -1116,6 +1116,7 @@ int dsa_slave_resume(struct net_device *slave_dev) ...@@ -1116,6 +1116,7 @@ int dsa_slave_resume(struct net_device *slave_dev)
int dsa_slave_create(struct dsa_port *port, const char *name) int dsa_slave_create(struct dsa_port *port, const char *name)
{ {
struct dsa_notifier_register_info rinfo = { };
struct dsa_switch *ds = port->ds; struct dsa_switch *ds = port->ds;
struct net_device *master; struct net_device *master;
struct net_device *slave_dev; struct net_device *slave_dev;
...@@ -1177,6 +1178,12 @@ int dsa_slave_create(struct dsa_port *port, const char *name) ...@@ -1177,6 +1178,12 @@ int dsa_slave_create(struct dsa_port *port, const char *name)
goto out_free; goto out_free;
} }
rinfo.info.dev = slave_dev;
rinfo.master = master;
rinfo.port_number = p->dp->index;
rinfo.switch_number = p->dp->ds->index;
call_dsa_notifiers(DSA_PORT_REGISTER, slave_dev, &rinfo.info);
ret = register_netdev(slave_dev); ret = register_netdev(slave_dev);
if (ret) { if (ret) {
netdev_err(master, "error %d registering interface %s\n", netdev_err(master, "error %d registering interface %s\n",
...@@ -1200,6 +1207,7 @@ int dsa_slave_create(struct dsa_port *port, const char *name) ...@@ -1200,6 +1207,7 @@ int dsa_slave_create(struct dsa_port *port, const char *name)
void dsa_slave_destroy(struct net_device *slave_dev) void dsa_slave_destroy(struct net_device *slave_dev)
{ {
struct dsa_slave_priv *p = netdev_priv(slave_dev); struct dsa_slave_priv *p = netdev_priv(slave_dev);
struct dsa_notifier_register_info rinfo = { };
struct device_node *port_dn; struct device_node *port_dn;
port_dn = p->dp->dn; port_dn = p->dp->dn;
...@@ -1211,6 +1219,11 @@ void dsa_slave_destroy(struct net_device *slave_dev) ...@@ -1211,6 +1219,11 @@ void dsa_slave_destroy(struct net_device *slave_dev)
if (of_phy_is_fixed_link(port_dn)) if (of_phy_is_fixed_link(port_dn))
of_phy_deregister_fixed_link(port_dn); of_phy_deregister_fixed_link(port_dn);
} }
rinfo.info.dev = slave_dev;
rinfo.master = p->dp->cpu_dp->netdev;
rinfo.port_number = p->dp->index;
rinfo.switch_number = p->dp->ds->index;
call_dsa_notifiers(DSA_PORT_UNREGISTER, slave_dev, &rinfo.info);
unregister_netdev(slave_dev); unregister_netdev(slave_dev);
free_percpu(p->stats64); free_percpu(p->stats64);
free_netdev(slave_dev); free_netdev(slave_dev);
......
...@@ -86,6 +86,12 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -86,6 +86,12 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev
brcm_tag[2] = BRCM_IG_DSTMAP2_MASK; brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
brcm_tag[3] = (1 << p->dp->index) & BRCM_IG_DSTMAP1_MASK; brcm_tag[3] = (1 << p->dp->index) & BRCM_IG_DSTMAP1_MASK;
/* Now tell the master network device about the desired output queue
* as well
*/
skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(p->dp->index,
queue));
return skb; return skb;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment