Commit db8ba1e8 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-abm-move-code-and-improve-parameter-validation'

Jakub Kicinski says:

====================
nfp: abm: move code and improve parameter validation

This set starts by separating Qdisc handling code into a new file.
Next two patches allow early access to TLV-based capabilities during
probe, previously the capabilities were parsed just before netdevs
were registered, but its cleaner to do some basic validation earlier
and avoid cleanup work.

Next three patches improve RED's parameter validation.  First we provide
a more precise message about why offload failed (and move the parameter
validation to a helper).  Next we make sure we don't set the top bit
in the 32 bit max RED threshold value.  Because FW is treating the value
as signed it reportedly causes slow downs (unnecessary queuing and
marking) when top bit is set with recent firmwares.  Last (and perhaps
least importantly) we offload the harddrop parameter of the Qdisc.
We don't plan to offload harddrop RED, but it seems prudent to make
sure user didn't set that flag as device behaviour would have differed.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1106a5ad 6e5a716f
......@@ -57,6 +57,7 @@ endif
ifeq ($(CONFIG_NFP_APP_ABM_NIC),y)
nfp-objs += \
abm/ctrl.o \
abm/qdisc.o \
abm/main.o
endif
......
......@@ -7,9 +7,6 @@
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/red.h>
#include "../nfpcore/nfp.h"
#include "../nfpcore/nfp_cpp.h"
......@@ -27,269 +24,6 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id)
FIELD_PREP(NFP_ABM_PORTID_ID, id);
}
static int
__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
u32 handle, unsigned int qs, u32 init_val)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
int ret;
ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
alink->parent = handle;
alink->num_qdiscs = qs;
port->tc_offload_cnt = qs;
return ret;
}
static void
nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
u32 handle, unsigned int qs)
{
__nfp_abm_reset_root(netdev, alink, handle, qs, ~0);
}
static int
nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
unsigned int i = TC_H_MIN(opt->parent) - 1;
if (opt->parent == TC_H_ROOT)
i = 0;
else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
i = TC_H_MIN(opt->parent) - 1;
else
return -EOPNOTSUPP;
if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
return -EOPNOTSUPP;
return i;
}
static void
nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
u32 handle)
{
unsigned int i;
for (i = 0; i < alink->num_qdiscs; i++)
if (handle == alink->qdiscs[i].handle)
break;
if (i == alink->num_qdiscs)
return;
if (alink->parent == TC_H_ROOT) {
nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
} else {
nfp_abm_ctrl_set_q_lvl(alink, i, ~0);
memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
}
}
static int
nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt)
{
bool existing;
int i, err;
i = nfp_abm_red_find(alink, opt);
existing = i >= 0;
if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
nfp_warn(alink->abm->app->cpp,
"RED offload failed - unsupported parameters\n");
err = -EINVAL;
goto err_destroy;
}
if (existing) {
if (alink->parent == TC_H_ROOT)
err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
else
err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
if (err)
goto err_destroy;
return 0;
}
if (opt->parent == TC_H_ROOT) {
i = 0;
err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
opt->set.min);
} else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
i = TC_H_MIN(opt->parent) - 1;
err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
} else {
return -EINVAL;
}
/* Set the handle to try full clean up, in case IO failed */
alink->qdiscs[i].handle = opt->handle;
if (err)
goto err_destroy;
if (opt->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
else
err = nfp_abm_ctrl_read_q_stats(alink, i,
&alink->qdiscs[i].stats);
if (err)
goto err_destroy;
if (opt->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_xstats(alink,
&alink->qdiscs[i].xstats);
else
err = nfp_abm_ctrl_read_q_xstats(alink, i,
&alink->qdiscs[i].xstats);
if (err)
goto err_destroy;
alink->qdiscs[i].stats.backlog_pkts = 0;
alink->qdiscs[i].stats.backlog_bytes = 0;
return 0;
err_destroy:
/* If the qdisc keeps on living, but we can't offload undo changes */
if (existing) {
opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
opt->set.qstats->backlog -=
alink->qdiscs[i].stats.backlog_bytes;
}
nfp_abm_red_destroy(netdev, alink, opt->handle);
return err;
}
static void
nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
struct tc_qopt_offload_stats *stats)
{
_bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
new->tx_pkts - old->tx_pkts);
stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
stats->qstats->overlimits += new->overlimits - old->overlimits;
stats->qstats->drops += new->drops - old->drops;
}
static int
nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
struct nfp_alink_stats *prev_stats;
struct nfp_alink_stats stats;
int i, err;
i = nfp_abm_red_find(alink, opt);
if (i < 0)
return i;
prev_stats = &alink->qdiscs[i].stats;
if (alink->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_stats(alink, &stats);
else
err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
if (err)
return err;
nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
*prev_stats = stats;
return 0;
}
static int
nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
struct nfp_alink_xstats *prev_xstats;
struct nfp_alink_xstats xstats;
int i, err;
i = nfp_abm_red_find(alink, opt);
if (i < 0)
return i;
prev_xstats = &alink->qdiscs[i].xstats;
if (alink->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_xstats(alink, &xstats);
else
err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
if (err)
return err;
opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
*prev_xstats = xstats;
return 0;
}
static int
nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt)
{
switch (opt->command) {
case TC_RED_REPLACE:
return nfp_abm_red_replace(netdev, alink, opt);
case TC_RED_DESTROY:
nfp_abm_red_destroy(netdev, alink, opt->handle);
return 0;
case TC_RED_STATS:
return nfp_abm_red_stats(alink, opt);
case TC_RED_XSTATS:
return nfp_abm_red_xstats(alink, opt);
default:
return -EOPNOTSUPP;
}
}
static int
nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
{
struct nfp_alink_stats stats;
unsigned int i;
int err;
for (i = 0; i < alink->num_qdiscs; i++) {
if (alink->qdiscs[i].handle == TC_H_UNSPEC)
continue;
err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
if (err)
return err;
nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
&opt->stats);
}
return 0;
}
static int
nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_mq_qopt_offload *opt)
{
switch (opt->command) {
case TC_MQ_CREATE:
nfp_abm_reset_root(netdev, alink, opt->handle,
alink->total_queues);
return 0;
case TC_MQ_DESTROY:
if (opt->handle == alink->parent)
nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
return 0;
case TC_MQ_STATS:
return nfp_abm_mq_stats(alink, opt);
default:
return -EOPNOTSUPP;
}
}
static int
nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
......
......@@ -4,7 +4,11 @@
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
#include <linux/bits.h>
#include <net/devlink.h>
#include <net/pkt_cls.h>
#define NFP_ABM_LVL_INFINITY S32_MAX
struct nfp_app;
struct nfp_net;
......@@ -91,6 +95,11 @@ struct nfp_abm_link {
struct nfp_red_qdisc *qdiscs;
};
int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt);
int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_mq_qopt_offload *opt);
void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <net/red.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfp_app.h"
#include "../nfp_port.h"
#include "main.h"
static int
__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
u32 handle, unsigned int qs, u32 init_val)
{
struct nfp_port *port = nfp_port_from_netdev(netdev);
int ret;
ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
alink->parent = handle;
alink->num_qdiscs = qs;
port->tc_offload_cnt = qs;
return ret;
}
static void
nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
u32 handle, unsigned int qs)
{
__nfp_abm_reset_root(netdev, alink, handle, qs, NFP_ABM_LVL_INFINITY);
}
static int
nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
unsigned int i = TC_H_MIN(opt->parent) - 1;
if (opt->parent == TC_H_ROOT)
i = 0;
else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
i = TC_H_MIN(opt->parent) - 1;
else
return -EOPNOTSUPP;
if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
return -EOPNOTSUPP;
return i;
}
static void
nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
u32 handle)
{
unsigned int i;
for (i = 0; i < alink->num_qdiscs; i++)
if (handle == alink->qdiscs[i].handle)
break;
if (i == alink->num_qdiscs)
return;
if (alink->parent == TC_H_ROOT) {
nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
} else {
nfp_abm_ctrl_set_q_lvl(alink, i, NFP_ABM_LVL_INFINITY);
memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
}
}
static bool
nfp_abm_red_check_params(struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt)
{
struct nfp_cpp *cpp = alink->abm->app->cpp;
if (!opt->set.is_ecn) {
nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
opt->parent, opt->handle);
return false;
}
if (opt->set.is_harddrop) {
nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
opt->parent, opt->handle);
return false;
}
if (opt->set.min != opt->set.max) {
nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
opt->parent, opt->handle);
return false;
}
if (opt->set.min > NFP_ABM_LVL_INFINITY) {
nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
opt->handle);
return false;
}
return true;
}
static int
nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt)
{
bool existing;
int i, err;
i = nfp_abm_red_find(alink, opt);
existing = i >= 0;
if (!nfp_abm_red_check_params(alink, opt)) {
err = -EINVAL;
goto err_destroy;
}
if (existing) {
if (alink->parent == TC_H_ROOT)
err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
else
err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
if (err)
goto err_destroy;
return 0;
}
if (opt->parent == TC_H_ROOT) {
i = 0;
err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
opt->set.min);
} else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
i = TC_H_MIN(opt->parent) - 1;
err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
} else {
return -EINVAL;
}
/* Set the handle to try full clean up, in case IO failed */
alink->qdiscs[i].handle = opt->handle;
if (err)
goto err_destroy;
if (opt->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
else
err = nfp_abm_ctrl_read_q_stats(alink, i,
&alink->qdiscs[i].stats);
if (err)
goto err_destroy;
if (opt->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_xstats(alink,
&alink->qdiscs[i].xstats);
else
err = nfp_abm_ctrl_read_q_xstats(alink, i,
&alink->qdiscs[i].xstats);
if (err)
goto err_destroy;
alink->qdiscs[i].stats.backlog_pkts = 0;
alink->qdiscs[i].stats.backlog_bytes = 0;
return 0;
err_destroy:
/* If the qdisc keeps on living, but we can't offload undo changes */
if (existing) {
opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
opt->set.qstats->backlog -=
alink->qdiscs[i].stats.backlog_bytes;
}
nfp_abm_red_destroy(netdev, alink, opt->handle);
return err;
}
static void
nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
struct tc_qopt_offload_stats *stats)
{
_bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
new->tx_pkts - old->tx_pkts);
stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
stats->qstats->overlimits += new->overlimits - old->overlimits;
stats->qstats->drops += new->drops - old->drops;
}
static int
nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
struct nfp_alink_stats *prev_stats;
struct nfp_alink_stats stats;
int i, err;
i = nfp_abm_red_find(alink, opt);
if (i < 0)
return i;
prev_stats = &alink->qdiscs[i].stats;
if (alink->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_stats(alink, &stats);
else
err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
if (err)
return err;
nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
*prev_stats = stats;
return 0;
}
static int
nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
{
struct nfp_alink_xstats *prev_xstats;
struct nfp_alink_xstats xstats;
int i, err;
i = nfp_abm_red_find(alink, opt);
if (i < 0)
return i;
prev_xstats = &alink->qdiscs[i].xstats;
if (alink->parent == TC_H_ROOT)
err = nfp_abm_ctrl_read_xstats(alink, &xstats);
else
err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
if (err)
return err;
opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
*prev_xstats = xstats;
return 0;
}
int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_red_qopt_offload *opt)
{
switch (opt->command) {
case TC_RED_REPLACE:
return nfp_abm_red_replace(netdev, alink, opt);
case TC_RED_DESTROY:
nfp_abm_red_destroy(netdev, alink, opt->handle);
return 0;
case TC_RED_STATS:
return nfp_abm_red_stats(alink, opt);
case TC_RED_XSTATS:
return nfp_abm_red_xstats(alink, opt);
default:
return -EOPNOTSUPP;
}
}
static int
nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
{
struct nfp_alink_stats stats;
unsigned int i;
int err;
for (i = 0; i < alink->num_qdiscs; i++) {
if (alink->qdiscs[i].handle == TC_H_UNSPEC)
continue;
err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
if (err)
return err;
nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
&opt->stats);
}
return 0;
}
int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
struct tc_mq_qopt_offload *opt)
{
switch (opt->command) {
case TC_MQ_CREATE:
nfp_abm_reset_root(netdev, alink, opt->handle,
alink->total_queues);
return 0;
case TC_MQ_DESTROY:
if (opt->handle == alink->parent)
nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
return 0;
case TC_MQ_STATS:
return nfp_abm_mq_stats(alink, opt);
default:
return -EOPNOTSUPP;
}
}
......@@ -851,7 +851,7 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn);
......
......@@ -3560,6 +3560,7 @@ void nfp_net_info(struct nfp_net *nn)
/**
* nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
* @ctrl_bar: PCI IOMEM with vNIC config memory
* @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
* @max_rx_rings: Maximum number of RX rings supported by device
......@@ -3570,11 +3571,12 @@ void nfp_net_info(struct nfp_net *nn)
*
* Return: NFP Net device structure, or ERR_PTR on error.
*/
struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
unsigned int max_tx_rings,
unsigned int max_rx_rings)
struct nfp_net *
nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings)
{
struct nfp_net *nn;
int err;
if (needs_netdev) {
struct net_device *netdev;
......@@ -3594,6 +3596,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
}
nn->dp.dev = &pdev->dev;
nn->dp.ctrl_bar = ctrl_bar;
nn->pdev = pdev;
nn->max_tx_rings = max_tx_rings;
......@@ -3616,7 +3619,19 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps);
if (err)
goto err_free_nn;
return nn;
err_free_nn:
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
vfree(nn);
return ERR_PTR(err);
}
/**
......@@ -3889,11 +3904,6 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
&nn->tlv_caps);
if (err)
return err;
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
......
......@@ -116,13 +116,13 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */
nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->app = pf->app;
nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
......
......@@ -172,7 +172,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
......@@ -180,7 +180,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
vf->nn = nn;
nn->fw_ver = fw_ver;
nn->dp.ctrl_bar = ctrl_bar;
nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
......
......@@ -807,6 +807,7 @@ struct tc_red_qopt_offload_params {
u32 max;
u32 probability;
bool is_ecn;
bool is_harddrop;
struct gnet_stats_queue *qstats;
};
......
......@@ -167,6 +167,7 @@ static int red_offload(struct Qdisc *sch, bool enable)
opt.set.max = q->parms.qth_max >> q->parms.Wlog;
opt.set.probability = q->parms.max_P;
opt.set.is_ecn = red_use_ecn(q);
opt.set.is_harddrop = red_use_harddrop(q);
opt.set.qstats = &sch->qstats;
} else {
opt.command = TC_RED_DESTROY;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment