Commit a477605f authored by David S. Miller's avatar David S. Miller

Merge branch 'dpaa2-eth-add-PFC-support'

Ioana Ciornei says:

====================
dpaa2-eth: add PFC support

This patch set adds support for Priority Flow Control in DPAA2 Ethernet
devices.

The first patch make the necessary changes so that multiple
traffic classes are configured. The dequeue priority
of the maximum 8 traffic classes is configured to be equal.
The second patch adds a static distribution to said traffic
classes based on the VLAN PCP field. In the future, this could be
extended through the .setapp() DCB callback for dynamic configuration.

Also, add support for the congestion group taildrop mechanism that
allows us to control the number of frames that can accumulate on a group
of Rx frame queues belonging to the same traffic class.

The basic subset of the DCB ops is implemented so that the user can
query the number of PFC capable traffic classes, their state and
reconfigure them if necessary.

Changes in v3:
 - add patches 6-7 which add the PFC functionality
 - patch 2/7: revert to explicitly cast mask to u16 * to not get into
   sparse warnings
Changes in v4:
 - really fix the sparse warnings in 2/7
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3190ca3b 07beb165
...@@ -9,6 +9,16 @@ config FSL_DPAA2_ETH ...@@ -9,6 +9,16 @@ config FSL_DPAA2_ETH
The driver manages network objects discovered on the Freescale The driver manages network objects discovered on the Freescale
MC bus. MC bus.
if FSL_DPAA2_ETH
config FSL_DPAA2_ETH_DCB
bool "Data Center Bridging (DCB) Support"
default n
depends on DCB
help
Enable Priority-Based Flow Control (PFC) support for DPAA2 Ethernet
devices.
endif
config FSL_DPAA2_PTP_CLOCK config FSL_DPAA2_PTP_CLOCK
tristate "Freescale DPAA2 PTP Clock" tristate "Freescale DPAA2 PTP Clock"
depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ
......
...@@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o ...@@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
......
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2020 NXP */
#include "dpaa2-eth.h"
static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
struct ieee_pfc *pfc)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
if (!(priv->link_state.options & DPNI_LINK_OPT_PFC_PAUSE))
return 0;
memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
pfc->pfc_cap = dpaa2_eth_tc_count(priv);
return 0;
}
static inline bool is_prio_enabled(u8 pfc_en, u8 tc)
{
return !!(pfc_en & (1 << tc));
}
static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
{
struct dpni_congestion_notification_cfg cfg = {0};
int i, err;
cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
cfg.message_iova = 0ULL;
cfg.message_ctx = 0ULL;
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
if (is_prio_enabled(pfc_en, i)) {
cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
} else {
/* For priorities not set in the pfc_en mask, we leave
* the congestion thresholds at zero, which effectively
* disables generation of PFC frames for them
*/
cfg.threshold_entry = 0;
cfg.threshold_exit = 0;
}
err = dpni_set_congestion_notification(priv->mc_io, 0,
priv->mc_token,
DPNI_QUEUE_RX, i, &cfg);
if (err) {
netdev_err(priv->net_dev,
"dpni_set_congestion_notification failed\n");
return err;
}
}
return 0;
}
static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
struct ieee_pfc *pfc)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpni_link_cfg link_cfg = {0};
bool tx_pause;
int err;
if (pfc->mbc || pfc->delay)
return -EOPNOTSUPP;
/* If same PFC enabled mask, nothing to do */
if (priv->pfc.pfc_en == pfc->pfc_en)
return 0;
/* We allow PFC configuration even if it won't have any effect until
* general pause frames are enabled
*/
tx_pause = dpaa2_eth_tx_pause_enabled(priv->link_state.options);
if (!dpaa2_eth_rx_pause_enabled(priv->link_state.options) || !tx_pause)
netdev_warn(net_dev, "Pause support must be enabled in order for PFC to work!\n");
link_cfg.rate = priv->link_state.rate;
link_cfg.options = priv->link_state.options;
if (pfc->pfc_en)
link_cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
else
link_cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
if (err) {
netdev_err(net_dev, "dpni_set_link_cfg failed\n");
return err;
}
/* Configure congestion notifications for the enabled priorities */
err = set_pfc_cn(priv, pfc->pfc_en);
if (err)
return err;
memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
priv->pfc_enabled = !!pfc->pfc_en;
dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
return 0;
}
static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
return priv->dcbx_mode;
}
static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
return (mode != (priv->dcbx_mode)) ? 1 : 0;
}
static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
switch (capid) {
case DCB_CAP_ATTR_PFC:
*cap = true;
break;
case DCB_CAP_ATTR_PFC_TCS:
*cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
break;
case DCB_CAP_ATTR_DCBX:
*cap = priv->dcbx_mode;
break;
default:
*cap = false;
break;
}
return 0;
}
const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
.ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
.ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
.getdcbx = dpaa2_eth_dcbnl_getdcbx,
.setdcbx = dpaa2_eth_dcbnl_setdcbx,
.getcap = dpaa2_eth_dcbnl_getcap,
};
...@@ -81,8 +81,8 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) ...@@ -81,8 +81,8 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
int i, err; int i, err;
seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
seq_printf(file, "%s%16s%16s%16s%16s\n", seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
"VFQID", "CPU", "Type", "Frames", "Pending frames"); "VFQID", "CPU", "TC", "Type", "Frames", "Pending frames");
for (i = 0; i < priv->num_fqs; i++) { for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i]; fq = &priv->fq[i];
...@@ -90,9 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) ...@@ -90,9 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
if (err) if (err)
fcnt = 0; fcnt = 0;
seq_printf(file, "%5d%16d%16s%16llu%16u\n", seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
fq->fqid, fq->fqid,
fq->target_cpu, fq->target_cpu,
fq->tc,
fq_type_to_str(fq), fq_type_to_str(fq),
fq->stats.frames, fq->stats.frames,
fcnt); fcnt);
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#ifndef __DPAA2_ETH_H #ifndef __DPAA2_ETH_H
#define __DPAA2_ETH_H #define __DPAA2_ETH_H
#include <linux/dcbnl.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/fsl/mc.h> #include <linux/fsl/mc.h>
...@@ -36,27 +37,46 @@ ...@@ -36,27 +37,46 @@
/* Convert L3 MTU to L2 MFL */ /* Convert L3 MTU to L2 MFL */
#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN) #define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo /* Set the taildrop threshold (in bytes) to allow the enqueue of a large
* frames in the Rx queues (length of the current frame is not * enough number of jumbo frames in the Rx queues (length of the current
* taken into account when making the taildrop decision) * frame is not taken into account when making the taildrop decision)
*/ */
#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) #define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024)
/* Maximum number of Tx confirmation frames to be processed /* Maximum number of Tx confirmation frames to be processed
* in a single NAPI call * in a single NAPI call
*/ */
#define DPAA2_ETH_TXCONF_PER_NAPI 256 #define DPAA2_ETH_TXCONF_PER_NAPI 256
/* Buffer quota per queue. Must be large enough such that for minimum sized /* Buffer qouta per channel. We want to keep in check number of ingress frames
* frames taildrop kicks in before the bpool gets depleted, so we compute * in flight: for small sized frames, congestion group taildrop may kick in
* how many 64B frames fit inside the taildrop threshold and add a margin * first; for large sizes, Rx FQ taildrop threshold will ensure only a
* to accommodate the buffer refill delay. * reasonable number of frames will be pending at any given time.
* Ingress frame drop due to buffer pool depletion should be a corner case only
*/ */
#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) #define DPAA2_ETH_NUM_BUFS 1280
#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
#define DPAA2_ETH_REFILL_THRESH \ #define DPAA2_ETH_REFILL_THRESH \
(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD) (DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
/* Congestion group taildrop threshold: number of frames allowed to accumulate
* at any moment in a group of Rx queues belonging to the same traffic class.
* Choose value such that we don't risk depleting the buffer pool before the
* taildrop kicks in
*/
#define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
(1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
/* Congestion group notification threshold: when this many frames accumulate
* on the Rx queues belonging to the same TC, the MAC is instructed to send
* PFC frames for that TC.
* When number of pending frames drops below exit threshold transmission of
* PFC frames is stopped.
*/
#define DPAA2_ETH_CN_THRESH_ENTRY(priv) \
(DPAA2_ETH_CG_TAILDROP_THRESH(priv) / 2)
#define DPAA2_ETH_CN_THRESH_EXIT(priv) \
(DPAA2_ETH_CN_THRESH_ENTRY(priv) * 3 / 4)
/* Maximum number of buffers that can be acquired/released through a single /* Maximum number of buffers that can be acquired/released through a single
* QBMan command * QBMan command
*/ */
...@@ -294,7 +314,9 @@ struct dpaa2_eth_ch_stats { ...@@ -294,7 +314,9 @@ struct dpaa2_eth_ch_stats {
/* Maximum number of queues associated with a DPNI */ /* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8 #define DPAA2_ETH_MAX_TCS 8
#define DPAA2_ETH_MAX_RX_QUEUES 16 #define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
#define DPAA2_ETH_MAX_RX_QUEUES \
(DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
#define DPAA2_ETH_MAX_TX_QUEUES 16 #define DPAA2_ETH_MAX_TX_QUEUES 16
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ #define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
DPAA2_ETH_MAX_TX_QUEUES) DPAA2_ETH_MAX_TX_QUEUES)
...@@ -414,7 +436,8 @@ struct dpaa2_eth_priv { ...@@ -414,7 +436,8 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_drv_stats __percpu *percpu_extras; struct dpaa2_eth_drv_stats __percpu *percpu_extras;
u16 mc_token; u16 mc_token;
u8 rx_td_enabled; u8 rx_fqtd_enabled;
u8 rx_cgtd_enabled;
struct dpni_link_state link_state; struct dpni_link_state link_state;
bool do_link_poll; bool do_link_poll;
...@@ -425,6 +448,12 @@ struct dpaa2_eth_priv { ...@@ -425,6 +448,12 @@ struct dpaa2_eth_priv {
u64 rx_cls_fields; u64 rx_cls_fields;
struct dpaa2_eth_cls_rule *cls_rules; struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled; u8 rx_cls_enabled;
u8 vlan_cls_enabled;
u8 pfc_enabled;
#ifdef CONFIG_FSL_DPAA2_ETH_DCB
u8 dcbx_mode;
struct ieee_pfc pfc;
#endif
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct dpaa2_debugfs dbg; struct dpaa2_debugfs dbg;
...@@ -507,6 +536,17 @@ enum dpaa2_eth_rx_dist { ...@@ -507,6 +536,17 @@ enum dpaa2_eth_rx_dist {
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \ (dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \
DPNI_PAUSE_VER_MINOR) >= 0) DPNI_PAUSE_VER_MINOR) >= 0)
static inline bool dpaa2_eth_tx_pause_enabled(u64 link_options)
{
return !!(link_options & DPNI_LINK_OPT_PAUSE) ^
!!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
}
static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
{
return !!(link_options & DPNI_LINK_OPT_PAUSE);
}
static inline static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv, unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -546,4 +586,9 @@ int dpaa2_eth_cls_key_size(u64 key); ...@@ -546,4 +586,9 @@ int dpaa2_eth_cls_key_size(u64 key);
int dpaa2_eth_cls_fld_off(int prot, int field); int dpaa2_eth_cls_fld_off(int prot, int field);
void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields); void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
bool tx_pause, bool pfc);
extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
#endif /* __DPAA2_H */ #endif /* __DPAA2_H */
...@@ -130,9 +130,8 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev, ...@@ -130,9 +130,8 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
return; return;
} }
pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE); pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
pause->tx_pause = pause->rx_pause ^ pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
!!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
pause->autoneg = AUTONEG_DISABLE; pause->autoneg = AUTONEG_DISABLE;
} }
...@@ -547,7 +546,7 @@ static int do_cls_rule(struct net_device *net_dev, ...@@ -547,7 +546,7 @@ static int do_cls_rule(struct net_device *net_dev,
dma_addr_t key_iova; dma_addr_t key_iova;
u64 fields = 0; u64 fields = 0;
void *key_buf; void *key_buf;
int err; int i, err;
if (fs->ring_cookie != RX_CLS_FLOW_DISC && if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
fs->ring_cookie >= dpaa2_eth_queue_count(priv)) fs->ring_cookie >= dpaa2_eth_queue_count(priv))
...@@ -607,11 +606,18 @@ static int do_cls_rule(struct net_device *net_dev, ...@@ -607,11 +606,18 @@ static int do_cls_rule(struct net_device *net_dev,
fs_act.options |= DPNI_FS_OPT_DISCARD; fs_act.options |= DPNI_FS_OPT_DISCARD;
else else
fs_act.flow_id = fs->ring_cookie; fs_act.flow_id = fs->ring_cookie;
err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, }
fs->location, &rule_cfg, &fs_act); for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
} else { if (add)
err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
&rule_cfg); i, fs->location, &rule_cfg,
&fs_act);
else
err = dpni_remove_fs_entry(priv->mc_io, 0,
priv->mc_token, i,
&rule_cfg);
if (err)
break;
} }
dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE); dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
......
...@@ -59,6 +59,10 @@ ...@@ -59,6 +59,10 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235) #define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
#define DPNI_CMDID_CLR_QOS_TBL DPNI_CMD(0x243)
#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244) #define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245) #define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246) #define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
...@@ -567,4 +571,59 @@ struct dpni_cmd_remove_fs_entry { ...@@ -567,4 +571,59 @@ struct dpni_cmd_remove_fs_entry {
__le64 mask_iova; __le64 mask_iova;
}; };
#define DPNI_DISCARD_ON_MISS_SHIFT 0
#define DPNI_DISCARD_ON_MISS_SIZE 1
struct dpni_cmd_set_qos_table {
__le32 pad;
u8 default_tc;
/* only the LSB */
u8 discard_on_miss;
__le16 pad1[21];
__le64 key_cfg_iova;
};
struct dpni_cmd_add_qos_entry {
__le16 pad;
u8 tc_id;
u8 key_size;
__le16 index;
__le16 pad1;
__le64 key_iova;
__le64 mask_iova;
};
struct dpni_cmd_remove_qos_entry {
u8 pad[3];
u8 key_size;
__le32 pad1;
__le64 key_iova;
__le64 mask_iova;
};
#define DPNI_DEST_TYPE_SHIFT 0
#define DPNI_DEST_TYPE_SIZE 4
#define DPNI_CONG_UNITS_SHIFT 4
#define DPNI_CONG_UNITS_SIZE 2
struct dpni_cmd_set_congestion_notification {
/* cmd word 0 */
u8 qtype;
u8 tc;
u8 pad[6];
/* cmd word 1 */
__le32 dest_id;
__le16 notification_mode;
u8 dest_priority;
/* from LSB: dest_type: 4 units:2 */
u8 type_units;
/* cmd word 2 */
__le64 message_iova;
/* cmd word 3 */
__le64 message_ctx;
/* cmd word 4 */
__le32 threshold_entry;
__le32 threshold_exit;
};
#endif /* _FSL_DPNI_CMD_H */ #endif /* _FSL_DPNI_CMD_H */
...@@ -1354,6 +1354,52 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, ...@@ -1354,6 +1354,52 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd); return mc_send_command(mc_io, &cmd);
} }
/**
* dpni_set_congestion_notification() - Set traffic class congestion
* notification configuration
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
* @tc_id: Traffic class selection (0-7)
* @cfg: Congestion notification configuration
*
* Return: '0' on Success; error code otherwise.
*/
int dpni_set_congestion_notification(
struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
enum dpni_queue_type qtype,
u8 tc_id,
const struct dpni_congestion_notification_cfg *cfg)
{
struct dpni_cmd_set_congestion_notification *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header =
mc_encode_cmd_header(DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
cmd_params->qtype = qtype;
cmd_params->tc = tc_id;
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
cmd_params->dest_priority = cfg->dest_cfg.priority;
dpni_set_field(cmd_params->type_units, DEST_TYPE,
cfg->dest_cfg.dest_type);
dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/** /**
* dpni_set_queue() - Set queue parameters * dpni_set_queue() - Set queue parameters
* @mc_io: Pointer to MC portal's I/O object * @mc_io: Pointer to MC portal's I/O object
...@@ -1786,3 +1832,134 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, ...@@ -1786,3 +1832,134 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
/* send command to mc*/ /* send command to mc*/
return mc_send_command(mc_io, &cmd); return mc_send_command(mc_io, &cmd);
} }
/**
* dpni_set_qos_table() - Set QoS mapping table
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: QoS table configuration
*
* This function and all QoS-related functions require that
*'max_tcs > 1' was set at DPNI creation.
*
* warning: Before calling this function, call dpkg_prepare_key_cfg() to
* prepare the key_cfg_iova parameter
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_set_qos_table(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_qos_tbl_cfg *cfg)
{
struct dpni_cmd_set_qos_table *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
cmd_params->default_tc = cfg->default_tc;
cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
dpni_set_field(cmd_params->discard_on_miss, DISCARD_ON_MISS,
cfg->discard_on_miss);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: QoS rule to add
* @tc_id: Traffic class selection (0-7)
* @index: Location in the QoS table where to insert the entry.
* Only relevant if MASKING is enabled for QoS classification on
* this DPNI, it is ignored for exact match.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rule_cfg *cfg,
u8 tc_id,
u16 index)
{
struct dpni_cmd_add_qos_entry *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
cmd_params->tc_id = tc_id;
cmd_params->key_size = cfg->key_size;
cmd_params->index = cpu_to_le16(index);
cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_remove_qos_entry() - Remove QoS mapping entry
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: QoS rule to remove
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rule_cfg *cfg)
{
struct dpni_cmd_remove_qos_entry *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
cmd_params->key_size = cfg->key_size;
cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_clear_qos_table() - Clear all QoS mapping entries
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
*
* Following this function call, all frames are directed to
* the default traffic class (0)
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
...@@ -513,6 +513,11 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io, ...@@ -513,6 +513,11 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io,
*/ */
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL #define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
/**
* Enable priority flow control pause frames
*/
#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
/** /**
* struct - Structure representing DPNI link configuration * struct - Structure representing DPNI link configuration
* @rate: Rate * @rate: Rate
...@@ -715,6 +720,26 @@ int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io, ...@@ -715,6 +720,26 @@ int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
u16 token, u16 token,
const struct dpni_rx_dist_cfg *cfg); const struct dpni_rx_dist_cfg *cfg);
/**
* struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
* @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
* key extractions to be used as the QoS criteria by calling
* dpkg_prepare_key_cfg()
* @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
* '0' to use the 'default_tc' in such cases
* @default_tc: Used in case of no-match and 'discard_on_miss'= 0
*/
struct dpni_qos_tbl_cfg {
u64 key_cfg_iova;
int discard_on_miss;
u8 default_tc;
};
int dpni_set_qos_table(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_qos_tbl_cfg *cfg);
/** /**
* enum dpni_dest - DPNI destination types * enum dpni_dest - DPNI destination types
* @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
...@@ -857,6 +882,62 @@ enum dpni_congestion_point { ...@@ -857,6 +882,62 @@ enum dpni_congestion_point {
DPNI_CP_GROUP, DPNI_CP_GROUP,
}; };
/**
* struct dpni_dest_cfg - Structure representing DPNI destination parameters
* @dest_type: Destination type
* @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
* @priority: Priority selection within the DPIO or DPCON channel; valid
* values are 0-1 or 0-7, depending on the number of priorities
* in that channel; not relevant for 'DPNI_DEST_NONE' option
*/
struct dpni_dest_cfg {
enum dpni_dest dest_type;
int dest_id;
u8 priority;
};
/* DPNI congestion options */
/**
* This congestion will trigger flow control or priority flow control.
* This will have effect only if flow control is enabled with
* dpni_set_link_cfg().
*/
#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
/**
* struct dpni_congestion_notification_cfg - congestion notification
* configuration
* @units: Units type
* @threshold_entry: Above this threshold we enter a congestion state.
* set it to '0' to disable it
* @threshold_exit: Below this threshold we exit the congestion state.
* @message_ctx: The context that will be part of the CSCN message
* @message_iova: I/O virtual address (must be in DMA-able memory),
* must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
* is contained in 'options'
* @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
* @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
*/
struct dpni_congestion_notification_cfg {
enum dpni_congestion_unit units;
u32 threshold_entry;
u32 threshold_exit;
u64 message_ctx;
u64 message_iova;
struct dpni_dest_cfg dest_cfg;
u16 notification_mode;
};
int dpni_set_congestion_notification(
struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
enum dpni_queue_type qtype,
u8 tc_id,
const struct dpni_congestion_notification_cfg *cfg);
/** /**
* struct dpni_taildrop - Structure representing the taildrop * struct dpni_taildrop - Structure representing the taildrop
* @enable: Indicates whether the taildrop is active or not. * @enable: Indicates whether the taildrop is active or not.
...@@ -961,6 +1042,22 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, ...@@ -961,6 +1042,22 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
u8 tc_id, u8 tc_id,
const struct dpni_rule_cfg *cfg); const struct dpni_rule_cfg *cfg);
int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rule_cfg *cfg,
u8 tc_id,
u16 index);
int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rule_cfg *cfg);
int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
int dpni_get_api_version(struct fsl_mc_io *mc_io, int dpni_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags, u32 cmd_flags,
u16 *major_ver, u16 *major_ver,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment