Commit e6ebe6c1 authored by David S. Miller's avatar David S. Miller

Merge branch 'taprio-auto-qmaxsdu-new-tx'

Vladimir Oltean says:

====================
taprio automatic queueMaxSDU and new TXQ selection procedure

This patch set addresses 2 design limitations in the taprio software scheduler:

1. Software scheduling fundamentally prioritizes traffic incorrectly,
   in a way which was inspired from Intel igb/igc drivers and does not
   follow the inputs user space gives (traffic classes and TC to TXQ
   mapping). Patch 05/15 handles this, 01/15 - 04/15 are preparations
   for this work.

2. Software scheduling assumes that the gate for a traffic class closes
   as soon as the next interval begins. But this isn't true.
   If consecutive schedule entries have that traffic class gate open,
   there is no "gate close" event and taprio should keep dequeuing from
   that TC without interruptions. Patches 06/15 - 15/15 handle this.
   Patch 10/15 is a generic Qdisc change required for this to work.

Future development directions which depend on this patch set are:

- Propagating the automatic queueMaxSDU calculation down to offloading
  device drivers, instead of letting them calculate this, as
  vsc9959_tas_guard_bands_update() does today.

- A software data path for tc-taprio with preemptible traffic and
  Hold/Release events.

v1 at:
https://patchwork.kernel.org/project/netdevbpf/cover/20230128010719.2182346-1-vladimir.oltean@nxp.com/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6da13bf9 39b02d6d
...@@ -2810,6 +2810,22 @@ static int igb_offload_txtime(struct igb_adapter *adapter, ...@@ -2810,6 +2810,22 @@ static int igb_offload_txtime(struct igb_adapter *adapter,
return 0; return 0;
} }
static int igb_tc_query_caps(struct igb_adapter *adapter,
struct tc_query_caps_base *base)
{
switch (base->type) {
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
caps->broken_mqprio = true;
return 0;
}
default:
return -EOPNOTSUPP;
}
}
static LIST_HEAD(igb_block_cb_list); static LIST_HEAD(igb_block_cb_list);
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
...@@ -2818,6 +2834,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type, ...@@ -2818,6 +2834,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
struct igb_adapter *adapter = netdev_priv(dev); struct igb_adapter *adapter = netdev_priv(dev);
switch (type) { switch (type) {
case TC_QUERY_CAPS:
return igb_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_CBS: case TC_SETUP_QDISC_CBS:
return igb_offload_cbs(adapter, type_data); return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK: case TC_SETUP_BLOCK:
......
...@@ -6214,9 +6214,9 @@ static int igc_tc_query_caps(struct igc_adapter *adapter, ...@@ -6214,9 +6214,9 @@ static int igc_tc_query_caps(struct igc_adapter *adapter,
case TC_SETUP_QDISC_TAPRIO: { case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps; struct tc_taprio_caps *caps = base->caps;
if (hw->mac.type != igc_i225) caps->broken_mqprio = true;
return -EOPNOTSUPP;
if (hw->mac.type == igc_i225)
caps->gate_mask_per_txq = true; caps->gate_mask_per_txq = true;
return 0; return 0;
......
...@@ -177,6 +177,11 @@ struct tc_mqprio_qopt_offload { ...@@ -177,6 +177,11 @@ struct tc_mqprio_qopt_offload {
struct tc_taprio_caps { struct tc_taprio_caps {
bool supports_queue_max_sdu:1; bool supports_queue_max_sdu:1;
bool gate_mask_per_txq:1; bool gate_mask_per_txq:1;
/* Device expects lower TXQ numbers to have higher priority over higher
* TXQs, regardless of their TC mapping. DO NOT USE FOR NEW DRIVERS,
* INSTEAD ENFORCE A PROPER TC:TXQ MAPPING COMING FROM USER SPACE.
*/
bool broken_mqprio:1;
}; };
struct tc_taprio_sched_entry { struct tc_taprio_sched_entry {
......
...@@ -1282,12 +1282,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev, ...@@ -1282,12 +1282,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
if (err) if (err)
goto err_out3; goto err_out3;
if (ops->init) {
err = ops->init(sch, tca[TCA_OPTIONS], extack);
if (err != 0)
goto err_out5;
}
if (tca[TCA_STAB]) { if (tca[TCA_STAB]) {
stab = qdisc_get_stab(tca[TCA_STAB], extack); stab = qdisc_get_stab(tca[TCA_STAB], extack);
if (IS_ERR(stab)) { if (IS_ERR(stab)) {
...@@ -1296,11 +1290,18 @@ static struct Qdisc *qdisc_create(struct net_device *dev, ...@@ -1296,11 +1290,18 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
} }
rcu_assign_pointer(sch->stab, stab); rcu_assign_pointer(sch->stab, stab);
} }
if (ops->init) {
err = ops->init(sch, tca[TCA_OPTIONS], extack);
if (err != 0)
goto err_out5;
}
if (tca[TCA_RATE]) { if (tca[TCA_RATE]) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (sch->flags & TCQ_F_MQROOT) { if (sch->flags & TCQ_F_MQROOT) {
NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc"); NL_SET_ERR_MSG(extack, "Cannot attach rate estimator to a multi-queue root qdisc");
goto err_out4; goto err_out5;
} }
err = gen_new_estimator(&sch->bstats, err = gen_new_estimator(&sch->bstats,
...@@ -1311,7 +1312,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, ...@@ -1311,7 +1312,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
tca[TCA_RATE]); tca[TCA_RATE]);
if (err) { if (err) {
NL_SET_ERR_MSG(extack, "Failed to generate new estimator"); NL_SET_ERR_MSG(extack, "Failed to generate new estimator");
goto err_out4; goto err_out5;
} }
} }
...@@ -1321,6 +1322,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev, ...@@ -1321,6 +1322,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
return sch; return sch;
err_out5: err_out5:
qdisc_put_stab(rtnl_dereference(sch->stab));
err_out4:
/* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */ /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
if (ops->destroy) if (ops->destroy)
ops->destroy(sch); ops->destroy(sch);
...@@ -1332,16 +1335,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev, ...@@ -1332,16 +1335,6 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
err_out: err_out:
*errp = err; *errp = err;
return NULL; return NULL;
err_out4:
/*
* Any broken qdiscs that would require a ops->reset() here?
* The qdisc was never in action so it shouldn't be necessary.
*/
qdisc_put_stab(rtnl_dereference(sch->stab));
if (ops->destroy)
ops->destroy(sch);
goto err_out3;
} }
static int qdisc_change(struct Qdisc *sch, struct nlattr **tca, static int qdisc_change(struct Qdisc *sch, struct nlattr **tca,
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include "sch_mqprio_lib.h" #include "sch_mqprio_lib.h"
static LIST_HEAD(taprio_list); static LIST_HEAD(taprio_list);
static struct static_key_false taprio_have_broken_mqprio;
static struct static_key_false taprio_have_working_mqprio;
#define TAPRIO_ALL_GATES_OPEN -1 #define TAPRIO_ALL_GATES_OPEN -1
...@@ -37,15 +39,19 @@ static LIST_HEAD(taprio_list); ...@@ -37,15 +39,19 @@ static LIST_HEAD(taprio_list);
#define TAPRIO_FLAGS_INVALID U32_MAX #define TAPRIO_FLAGS_INVALID U32_MAX
struct sched_entry { struct sched_entry {
struct list_head list; /* Durations between this GCL entry and the GCL entry where the
* respective traffic class gate closes
/* The instant that this entry "closes" and the next one */
* should open, the qdisc will make some effort so that no u64 gate_duration[TC_MAX_QUEUE];
* packet leaves after this time. atomic_t budget[TC_MAX_QUEUE];
/* The qdisc makes some effort so that no packet leaves
* after this time
*/ */
ktime_t close_time; ktime_t gate_close_time[TC_MAX_QUEUE];
struct list_head list;
/* Used to calculate when to advance the schedule */
ktime_t end_time;
ktime_t next_txtime; ktime_t next_txtime;
atomic_t budget;
int index; int index;
u32 gate_mask; u32 gate_mask;
u32 interval; u32 interval;
...@@ -53,10 +59,16 @@ struct sched_entry { ...@@ -53,10 +59,16 @@ struct sched_entry {
}; };
struct sched_gate_list { struct sched_gate_list {
/* Longest non-zero contiguous gate durations per traffic class,
* or 0 if a traffic class gate never opens during the schedule.
*/
u64 max_open_gate_duration[TC_MAX_QUEUE];
u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */
u32 max_sdu[TC_MAX_QUEUE]; /* for dump */
struct rcu_head rcu; struct rcu_head rcu;
struct list_head entries; struct list_head entries;
size_t num_entries; size_t num_entries;
ktime_t cycle_close_time; ktime_t cycle_end_time;
s64 cycle_time; s64 cycle_time;
s64 cycle_time_extension; s64 cycle_time_extension;
s64 base_time; s64 base_time;
...@@ -69,6 +81,8 @@ struct taprio_sched { ...@@ -69,6 +81,8 @@ struct taprio_sched {
enum tk_offsets tk_offset; enum tk_offsets tk_offset;
int clockid; int clockid;
bool offloaded; bool offloaded;
bool detected_mqprio;
bool broken_mqprio;
atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+ atomic64_t picos_per_byte; /* Using picoseconds because for 10Gbps+
* speeds it's sub-nanoseconds per byte * speeds it's sub-nanoseconds per byte
*/ */
...@@ -80,8 +94,8 @@ struct taprio_sched { ...@@ -80,8 +94,8 @@ struct taprio_sched {
struct sched_gate_list __rcu *admin_sched; struct sched_gate_list __rcu *admin_sched;
struct hrtimer advance_timer; struct hrtimer advance_timer;
struct list_head taprio_list; struct list_head taprio_list;
u32 max_frm_len[TC_MAX_QUEUE]; /* for the fast path */ int cur_txq[TC_MAX_QUEUE];
u32 max_sdu[TC_MAX_QUEUE]; /* for dump and offloading */ u32 max_sdu[TC_MAX_QUEUE]; /* save info from the user */
u32 txtime_delay; u32 txtime_delay;
}; };
...@@ -90,6 +104,57 @@ struct __tc_taprio_qopt_offload { ...@@ -90,6 +104,57 @@ struct __tc_taprio_qopt_offload {
struct tc_taprio_qopt_offload offload; struct tc_taprio_qopt_offload offload;
}; };
static void taprio_calculate_gate_durations(struct taprio_sched *q,
struct sched_gate_list *sched)
{
struct net_device *dev = qdisc_dev(q->root);
int num_tc = netdev_get_num_tc(dev);
struct sched_entry *entry, *cur;
int tc;
list_for_each_entry(entry, &sched->entries, list) {
u32 gates_still_open = entry->gate_mask;
/* For each traffic class, calculate each open gate duration,
* starting at this schedule entry and ending at the schedule
* entry containing a gate close event for that TC.
*/
cur = entry;
do {
if (!gates_still_open)
break;
for (tc = 0; tc < num_tc; tc++) {
if (!(gates_still_open & BIT(tc)))
continue;
if (cur->gate_mask & BIT(tc))
entry->gate_duration[tc] += cur->interval;
else
gates_still_open &= ~BIT(tc);
}
cur = list_next_entry_circular(cur, &sched->entries, list);
} while (cur != entry);
/* Keep track of the maximum gate duration for each traffic
* class, taking care to not confuse a traffic class which is
* temporarily closed with one that is always closed.
*/
for (tc = 0; tc < num_tc; tc++)
if (entry->gate_duration[tc] &&
sched->max_open_gate_duration[tc] < entry->gate_duration[tc])
sched->max_open_gate_duration[tc] = entry->gate_duration[tc];
}
}
static bool taprio_entry_allows_tx(ktime_t skb_end_time,
struct sched_entry *entry, int tc)
{
return ktime_before(skb_end_time, entry->gate_close_time[tc]);
}
static ktime_t sched_base_time(const struct sched_gate_list *sched) static ktime_t sched_base_time(const struct sched_gate_list *sched)
{ {
if (!sched) if (!sched)
...@@ -182,6 +247,55 @@ static int length_to_duration(struct taprio_sched *q, int len) ...@@ -182,6 +247,55 @@ static int length_to_duration(struct taprio_sched *q, int len)
return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC); return div_u64(len * atomic64_read(&q->picos_per_byte), PSEC_PER_NSEC);
} }
static int duration_to_length(struct taprio_sched *q, u64 duration)
{
return div_u64(duration * PSEC_PER_NSEC, atomic64_read(&q->picos_per_byte));
}
/* Sets sched->max_sdu[] and sched->max_frm_len[] to the minimum between the
* q->max_sdu[] requested by the user and the max_sdu dynamically determined by
* the maximum open gate durations at the given link speed.
*/
static void taprio_update_queue_max_sdu(struct taprio_sched *q,
struct sched_gate_list *sched,
struct qdisc_size_table *stab)
{
struct net_device *dev = qdisc_dev(q->root);
int num_tc = netdev_get_num_tc(dev);
u32 max_sdu_from_user;
u32 max_sdu_dynamic;
u32 max_sdu;
int tc;
for (tc = 0; tc < num_tc; tc++) {
max_sdu_from_user = q->max_sdu[tc] ?: U32_MAX;
/* TC gate never closes => keep the queueMaxSDU
* selected by the user
*/
if (sched->max_open_gate_duration[tc] == sched->cycle_time) {
max_sdu_dynamic = U32_MAX;
} else {
u32 max_frm_len;
max_frm_len = duration_to_length(q, sched->max_open_gate_duration[tc]);
if (stab)
max_frm_len -= stab->szopts.overhead;
max_sdu_dynamic = max_frm_len - dev->hard_header_len;
}
max_sdu = min(max_sdu_dynamic, max_sdu_from_user);
if (max_sdu != U32_MAX) {
sched->max_frm_len[tc] = max_sdu + dev->hard_header_len;
sched->max_sdu[tc] = max_sdu;
} else {
sched->max_frm_len[tc] = U32_MAX; /* never oversized */
sched->max_sdu[tc] = 0;
}
}
}
/* Returns the entry corresponding to next available interval. If /* Returns the entry corresponding to next available interval. If
* validate_interval is set, it only validates whether the timestamp occurs * validate_interval is set, it only validates whether the timestamp occurs
* when the gate corresponding to the skb's traffic class is open. * when the gate corresponding to the skb's traffic class is open.
...@@ -415,14 +529,33 @@ static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch) ...@@ -415,14 +529,33 @@ static long get_packet_txtime(struct sk_buff *skb, struct Qdisc *sch)
return txtime; return txtime;
} }
static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, /* Devices with full offload are expected to honor this in hardware */
struct Qdisc *child, struct sk_buff **to_free) static bool taprio_skb_exceeds_queue_max_sdu(struct Qdisc *sch,
struct sk_buff *skb)
{ {
struct taprio_sched *q = qdisc_priv(sch); struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
struct sched_gate_list *sched;
int prio = skb->priority; int prio = skb->priority;
bool exceeds = false;
u8 tc; u8 tc;
tc = netdev_get_prio_tc_map(dev, prio);
rcu_read_lock();
sched = rcu_dereference(q->oper_sched);
if (sched && skb->len > sched->max_frm_len[tc])
exceeds = true;
rcu_read_unlock();
return exceeds;
}
static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
struct Qdisc *child, struct sk_buff **to_free)
{
struct taprio_sched *q = qdisc_priv(sch);
/* sk_flags are only safe to use on full sockets. */ /* sk_flags are only safe to use on full sockets. */
if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) { if (skb->sk && sk_fullsock(skb->sk) && sock_flag(skb->sk, SOCK_TXTIME)) {
if (!is_valid_interval(skb, sch)) if (!is_valid_interval(skb, sch))
...@@ -433,39 +566,16 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch, ...@@ -433,39 +566,16 @@ static int taprio_enqueue_one(struct sk_buff *skb, struct Qdisc *sch,
return qdisc_drop(skb, sch, to_free); return qdisc_drop(skb, sch, to_free);
} }
/* Devices with full offload are expected to honor this in hardware */
tc = netdev_get_prio_tc_map(dev, prio);
if (skb->len > q->max_frm_len[tc])
return qdisc_drop(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb); qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++; sch->q.qlen++;
return qdisc_enqueue(skb, child, to_free); return qdisc_enqueue(skb, child, to_free);
} }
/* Will not be called in the full offload case, since the TX queues are static int taprio_enqueue_segmented(struct sk_buff *skb, struct Qdisc *sch,
* attached to the Qdisc created using qdisc_create_dflt() struct Qdisc *child,
*/
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free) struct sk_buff **to_free)
{ {
struct taprio_sched *q = qdisc_priv(sch);
struct Qdisc *child;
int queue;
queue = skb_get_queue_mapping(skb);
child = q->qdiscs[queue];
if (unlikely(!child))
return qdisc_drop(skb, sch, to_free);
/* Large packets might not be transmitted when the transmission duration
* exceeds any configured interval. Therefore, segment the skb into
* smaller chunks. Drivers with full offload are expected to handle
* this in hardware.
*/
if (skb_is_gso(skb)) {
unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb); unsigned int slen = 0, numsegs = 0, len = qdisc_pkt_len(skb);
netdev_features_t features = netif_skb_features(skb); netdev_features_t features = netif_skb_features(skb);
struct sk_buff *segs, *nskb; struct sk_buff *segs, *nskb;
...@@ -480,7 +590,14 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -480,7 +590,14 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
qdisc_skb_cb(segs)->pkt_len = segs->len; qdisc_skb_cb(segs)->pkt_len = segs->len;
slen += segs->len; slen += segs->len;
/* FIXME: we should be segmenting to a smaller size
* rather than dropping these
*/
if (taprio_skb_exceeds_queue_max_sdu(sch, segs))
ret = qdisc_drop(segs, sch, to_free);
else
ret = taprio_enqueue_one(segs, sch, child, to_free); ret = taprio_enqueue_one(segs, sch, child, to_free);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch); qdisc_qstats_drop(sch);
...@@ -494,148 +611,245 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, ...@@ -494,148 +611,245 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
consume_skb(skb); consume_skb(skb);
return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; return numsegs > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
return taprio_enqueue_one(skb, sch, child, to_free);
} }
/* Will not be called in the full offload case, since the TX queues are /* Will not be called in the full offload case, since the TX queues are
* attached to the Qdisc created using qdisc_create_dflt() * attached to the Qdisc created using qdisc_create_dflt()
*/ */
static struct sk_buff *taprio_peek(struct Qdisc *sch) static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{ {
struct taprio_sched *q = qdisc_priv(sch); struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch); struct Qdisc *child;
struct sched_entry *entry; int queue;
struct sk_buff *skb;
u32 gate_mask;
int i;
rcu_read_lock(); queue = skb_get_queue_mapping(skb);
entry = rcu_dereference(q->current_entry);
gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
rcu_read_unlock();
if (!gate_mask) child = q->qdiscs[queue];
return NULL; if (unlikely(!child))
return qdisc_drop(skb, sch, to_free);
for (i = 0; i < dev->num_tx_queues; i++) { if (taprio_skb_exceeds_queue_max_sdu(sch, skb)) {
struct Qdisc *child = q->qdiscs[i]; /* Large packets might not be transmitted when the transmission
int prio; * duration exceeds any configured interval. Therefore, segment
u8 tc; * the skb into smaller chunks. Drivers with full offload are
* expected to handle this in hardware.
*/
if (skb_is_gso(skb))
return taprio_enqueue_segmented(skb, sch, child,
to_free);
if (unlikely(!child)) return qdisc_drop(skb, sch, to_free);
continue; }
skb = child->ops->peek(child); return taprio_enqueue_one(skb, sch, child, to_free);
if (!skb) }
continue;
if (TXTIME_ASSIST_IS_ENABLED(q->flags)) static struct sk_buff *taprio_peek(struct Qdisc *sch)
return skb; {
WARN_ONCE(1, "taprio only supports operating as root qdisc, peek() not implemented");
return NULL;
}
prio = skb->priority; static void taprio_set_budgets(struct taprio_sched *q,
tc = netdev_get_prio_tc_map(dev, prio); struct sched_gate_list *sched,
struct sched_entry *entry)
{
struct net_device *dev = qdisc_dev(q->root);
int num_tc = netdev_get_num_tc(dev);
int tc, budget;
if (!(gate_mask & BIT(tc))) for (tc = 0; tc < num_tc; tc++) {
continue; /* Traffic classes which never close have infinite budget */
if (entry->gate_duration[tc] == sched->cycle_time)
budget = INT_MAX;
else
budget = div64_u64((u64)entry->gate_duration[tc] * PSEC_PER_NSEC,
atomic64_read(&q->picos_per_byte));
return skb; atomic_set(&entry->budget[tc], budget);
} }
return NULL;
} }
static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) /* When an skb is sent, it consumes from the budget of all traffic classes */
static int taprio_update_budgets(struct sched_entry *entry, size_t len,
int tc_consumed, int num_tc)
{ {
atomic_set(&entry->budget, int tc, budget, new_budget = 0;
div64_u64((u64)entry->interval * PSEC_PER_NSEC,
atomic64_read(&q->picos_per_byte))); for (tc = 0; tc < num_tc; tc++) {
budget = atomic_read(&entry->budget[tc]);
/* Don't consume from infinite budget */
if (budget == INT_MAX) {
if (tc == tc_consumed)
new_budget = budget;
continue;
}
if (tc == tc_consumed)
new_budget = atomic_sub_return(len, &entry->budget[tc]);
else
atomic_sub(len, &entry->budget[tc]);
}
return new_budget;
} }
/* Will not be called in the full offload case, since the TX queues are static struct sk_buff *taprio_dequeue_from_txq(struct Qdisc *sch, int txq,
* attached to the Qdisc created using qdisc_create_dflt() struct sched_entry *entry,
*/ u32 gate_mask)
static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
{ {
struct taprio_sched *q = qdisc_priv(sch); struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch); struct net_device *dev = qdisc_dev(sch);
struct sk_buff *skb = NULL; struct Qdisc *child = q->qdiscs[txq];
struct sched_entry *entry; int num_tc = netdev_get_num_tc(dev);
u32 gate_mask; struct sk_buff *skb;
int i;
rcu_read_lock();
entry = rcu_dereference(q->current_entry);
/* if there's no entry, it means that the schedule didn't
* start yet, so force all gates to be open, this is in
* accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
* "AdminGateStates"
*/
gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
if (!gate_mask)
goto done;
for (i = 0; i < dev->num_tx_queues; i++) {
struct Qdisc *child = q->qdiscs[i];
ktime_t guard; ktime_t guard;
int prio; int prio;
int len; int len;
u8 tc; u8 tc;
if (unlikely(!child)) if (unlikely(!child))
continue; return NULL;
if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { if (TXTIME_ASSIST_IS_ENABLED(q->flags))
skb = child->ops->dequeue(child); goto skip_peek_checks;
if (!skb)
continue;
goto skb_found;
}
skb = child->ops->peek(child); skb = child->ops->peek(child);
if (!skb) if (!skb)
continue; return NULL;
prio = skb->priority; prio = skb->priority;
tc = netdev_get_prio_tc_map(dev, prio); tc = netdev_get_prio_tc_map(dev, prio);
if (!(gate_mask & BIT(tc))) { if (!(gate_mask & BIT(tc)))
skb = NULL; return NULL;
continue;
}
len = qdisc_pkt_len(skb); len = qdisc_pkt_len(skb);
guard = ktime_add_ns(taprio_get_time(q), guard = ktime_add_ns(taprio_get_time(q), length_to_duration(q, len));
length_to_duration(q, len));
/* In the case that there's no gate entry, there's no /* In the case that there's no gate entry, there's no
* guard band ... * guard band ...
*/ */
if (gate_mask != TAPRIO_ALL_GATES_OPEN && if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
ktime_after(guard, entry->close_time)) { !taprio_entry_allows_tx(guard, entry, tc))
skb = NULL; return NULL;
continue;
}
/* ... and no budget. */ /* ... and no budget. */
if (gate_mask != TAPRIO_ALL_GATES_OPEN && if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
atomic_sub_return(len, &entry->budget) < 0) { taprio_update_budgets(entry, len, tc, num_tc) < 0)
skb = NULL; return NULL;
continue;
}
skip_peek_checks:
skb = child->ops->dequeue(child); skb = child->ops->dequeue(child);
if (unlikely(!skb)) if (unlikely(!skb))
goto done; return NULL;
skb_found:
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb); qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--; sch->q.qlen--;
return skb;
}
static void taprio_next_tc_txq(struct net_device *dev, int tc, int *txq)
{
int offset = dev->tc_to_txq[tc].offset;
int count = dev->tc_to_txq[tc].count;
(*txq)++;
if (*txq == offset + count)
*txq = offset;
}
/* Prioritize higher traffic classes, and select among TXQs belonging to the
* same TC using round robin
*/
static struct sk_buff *taprio_dequeue_tc_priority(struct Qdisc *sch,
struct sched_entry *entry,
u32 gate_mask)
{
struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
int num_tc = netdev_get_num_tc(dev);
struct sk_buff *skb;
int tc;
for (tc = num_tc - 1; tc >= 0; tc--) {
int first_txq = q->cur_txq[tc];
if (!(gate_mask & BIT(tc)))
continue;
do {
skb = taprio_dequeue_from_txq(sch, q->cur_txq[tc],
entry, gate_mask);
taprio_next_tc_txq(dev, tc, &q->cur_txq[tc]);
if (skb)
return skb;
} while (q->cur_txq[tc] != first_txq);
}
return NULL;
}
/* Broken way of prioritizing smaller TXQ indices and ignoring the traffic
* class other than to determine whether the gate is open or not
*/
static struct sk_buff *taprio_dequeue_txq_priority(struct Qdisc *sch,
struct sched_entry *entry,
u32 gate_mask)
{
struct net_device *dev = qdisc_dev(sch);
struct sk_buff *skb;
int i;
for (i = 0; i < dev->num_tx_queues; i++) {
skb = taprio_dequeue_from_txq(sch, i, entry, gate_mask);
if (skb)
return skb;
}
return NULL;
}
/* Will not be called in the full offload case, since the TX queues are
* attached to the Qdisc created using qdisc_create_dflt()
*/
static struct sk_buff *taprio_dequeue(struct Qdisc *sch)
{
struct taprio_sched *q = qdisc_priv(sch);
struct sk_buff *skb = NULL;
struct sched_entry *entry;
u32 gate_mask;
rcu_read_lock();
entry = rcu_dereference(q->current_entry);
/* if there's no entry, it means that the schedule didn't
* start yet, so force all gates to be open, this is in
* accordance to IEEE 802.1Qbv-2015 Section 8.6.9.4.5
* "AdminGateStates"
*/
gate_mask = entry ? entry->gate_mask : TAPRIO_ALL_GATES_OPEN;
if (!gate_mask)
goto done; goto done;
if (static_branch_unlikely(&taprio_have_broken_mqprio) &&
!static_branch_likely(&taprio_have_working_mqprio)) {
/* Single NIC kind which is broken */
skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
} else if (static_branch_likely(&taprio_have_working_mqprio) &&
!static_branch_unlikely(&taprio_have_broken_mqprio)) {
/* Single NIC kind which prioritizes properly */
skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
} else {
/* Mixed NIC kinds present in system, need dynamic testing */
if (q->broken_mqprio)
skb = taprio_dequeue_txq_priority(sch, entry, gate_mask);
else
skb = taprio_dequeue_tc_priority(sch, entry, gate_mask);
} }
done: done:
...@@ -650,7 +864,7 @@ static bool should_restart_cycle(const struct sched_gate_list *oper, ...@@ -650,7 +864,7 @@ static bool should_restart_cycle(const struct sched_gate_list *oper,
if (list_is_last(&entry->list, &oper->entries)) if (list_is_last(&entry->list, &oper->entries))
return true; return true;
if (ktime_compare(entry->close_time, oper->cycle_close_time) == 0) if (ktime_compare(entry->end_time, oper->cycle_end_time) == 0)
return true; return true;
return false; return false;
...@@ -658,7 +872,7 @@ static bool should_restart_cycle(const struct sched_gate_list *oper, ...@@ -658,7 +872,7 @@ static bool should_restart_cycle(const struct sched_gate_list *oper,
static bool should_change_schedules(const struct sched_gate_list *admin, static bool should_change_schedules(const struct sched_gate_list *admin,
const struct sched_gate_list *oper, const struct sched_gate_list *oper,
ktime_t close_time) ktime_t end_time)
{ {
ktime_t next_base_time, extension_time; ktime_t next_base_time, extension_time;
...@@ -667,18 +881,18 @@ static bool should_change_schedules(const struct sched_gate_list *admin, ...@@ -667,18 +881,18 @@ static bool should_change_schedules(const struct sched_gate_list *admin,
next_base_time = sched_base_time(admin); next_base_time = sched_base_time(admin);
/* This is the simple case, the close_time would fall after /* This is the simple case, the end_time would fall after
* the next schedule base_time. * the next schedule base_time.
*/ */
if (ktime_compare(next_base_time, close_time) <= 0) if (ktime_compare(next_base_time, end_time) <= 0)
return true; return true;
/* This is the cycle_time_extension case, if the close_time /* This is the cycle_time_extension case, if the end_time
* plus the amount that can be extended would fall after the * plus the amount that can be extended would fall after the
* next schedule base_time, we can extend the current schedule * next schedule base_time, we can extend the current schedule
* for that amount. * for that amount.
*/ */
extension_time = ktime_add_ns(close_time, oper->cycle_time_extension); extension_time = ktime_add_ns(end_time, oper->cycle_time_extension);
/* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about /* FIXME: the IEEE 802.1Q-2018 Specification isn't clear about
* how precisely the extension should be made. So after * how precisely the extension should be made. So after
...@@ -694,10 +908,13 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer) ...@@ -694,10 +908,13 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
{ {
struct taprio_sched *q = container_of(timer, struct taprio_sched, struct taprio_sched *q = container_of(timer, struct taprio_sched,
advance_timer); advance_timer);
struct net_device *dev = qdisc_dev(q->root);
struct sched_gate_list *oper, *admin; struct sched_gate_list *oper, *admin;
int num_tc = netdev_get_num_tc(dev);
struct sched_entry *entry, *next; struct sched_entry *entry, *next;
struct Qdisc *sch = q->root; struct Qdisc *sch = q->root;
ktime_t close_time; ktime_t end_time;
int tc;
spin_lock(&q->current_entry_lock); spin_lock(&q->current_entry_lock);
entry = rcu_dereference_protected(q->current_entry, entry = rcu_dereference_protected(q->current_entry,
...@@ -716,41 +933,49 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer) ...@@ -716,41 +933,49 @@ static enum hrtimer_restart advance_sched(struct hrtimer *timer)
* entry of all schedules are pre-calculated during the * entry of all schedules are pre-calculated during the
* schedule initialization. * schedule initialization.
*/ */
if (unlikely(!entry || entry->close_time == oper->base_time)) { if (unlikely(!entry || entry->end_time == oper->base_time)) {
next = list_first_entry(&oper->entries, struct sched_entry, next = list_first_entry(&oper->entries, struct sched_entry,
list); list);
close_time = next->close_time; end_time = next->end_time;
goto first_run; goto first_run;
} }
if (should_restart_cycle(oper, entry)) { if (should_restart_cycle(oper, entry)) {
next = list_first_entry(&oper->entries, struct sched_entry, next = list_first_entry(&oper->entries, struct sched_entry,
list); list);
oper->cycle_close_time = ktime_add_ns(oper->cycle_close_time, oper->cycle_end_time = ktime_add_ns(oper->cycle_end_time,
oper->cycle_time); oper->cycle_time);
} else { } else {
next = list_next_entry(entry, list); next = list_next_entry(entry, list);
} }
close_time = ktime_add_ns(entry->close_time, next->interval); end_time = ktime_add_ns(entry->end_time, next->interval);
close_time = min_t(ktime_t, close_time, oper->cycle_close_time); end_time = min_t(ktime_t, end_time, oper->cycle_end_time);
if (should_change_schedules(admin, oper, close_time)) { for (tc = 0; tc < num_tc; tc++) {
if (next->gate_duration[tc] == oper->cycle_time)
next->gate_close_time[tc] = KTIME_MAX;
else
next->gate_close_time[tc] = ktime_add_ns(entry->end_time,
next->gate_duration[tc]);
}
if (should_change_schedules(admin, oper, end_time)) {
/* Set things so the next time this runs, the new /* Set things so the next time this runs, the new
* schedule runs. * schedule runs.
*/ */
close_time = sched_base_time(admin); end_time = sched_base_time(admin);
switch_schedules(q, &admin, &oper); switch_schedules(q, &admin, &oper);
} }
next->close_time = close_time; next->end_time = end_time;
taprio_set_budget(q, next); taprio_set_budgets(q, oper, next);
first_run: first_run:
rcu_assign_pointer(q->current_entry, next); rcu_assign_pointer(q->current_entry, next);
spin_unlock(&q->current_entry_lock); spin_unlock(&q->current_entry_lock);
hrtimer_set_expires(&q->advance_timer, close_time); hrtimer_set_expires(&q->advance_timer, end_time);
rcu_read_lock(); rcu_read_lock();
__netif_schedule(sch); __netif_schedule(sch);
...@@ -918,6 +1143,8 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, ...@@ -918,6 +1143,8 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb,
new->cycle_time = cycle; new->cycle_time = cycle;
} }
taprio_calculate_gate_durations(q, new);
return 0; return 0;
} }
...@@ -986,11 +1213,14 @@ static int taprio_get_start_time(struct Qdisc *sch, ...@@ -986,11 +1213,14 @@ static int taprio_get_start_time(struct Qdisc *sch,
return 0; return 0;
} }
static void setup_first_close_time(struct taprio_sched *q, static void setup_first_end_time(struct taprio_sched *q,
struct sched_gate_list *sched, ktime_t base) struct sched_gate_list *sched, ktime_t base)
{ {
struct net_device *dev = qdisc_dev(q->root);
int num_tc = netdev_get_num_tc(dev);
struct sched_entry *first; struct sched_entry *first;
ktime_t cycle; ktime_t cycle;
int tc;
first = list_first_entry(&sched->entries, first = list_first_entry(&sched->entries,
struct sched_entry, list); struct sched_entry, list);
...@@ -998,10 +1228,18 @@ static void setup_first_close_time(struct taprio_sched *q, ...@@ -998,10 +1228,18 @@ static void setup_first_close_time(struct taprio_sched *q,
cycle = sched->cycle_time; cycle = sched->cycle_time;
/* FIXME: find a better place to do this */ /* FIXME: find a better place to do this */
sched->cycle_close_time = ktime_add_ns(base, cycle); sched->cycle_end_time = ktime_add_ns(base, cycle);
first->end_time = ktime_add_ns(base, first->interval);
taprio_set_budgets(q, sched, first);
for (tc = 0; tc < num_tc; tc++) {
if (first->gate_duration[tc] == sched->cycle_time)
first->gate_close_time[tc] = KTIME_MAX;
else
first->gate_close_time[tc] = ktime_add_ns(base, first->gate_duration[tc]);
}
first->close_time = ktime_add_ns(base, first->interval);
taprio_set_budget(q, first);
rcu_assign_pointer(q->current_entry, NULL); rcu_assign_pointer(q->current_entry, NULL);
} }
...@@ -1055,6 +1293,8 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, ...@@ -1055,6 +1293,8 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
void *ptr) void *ptr)
{ {
struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct sched_gate_list *oper, *admin;
struct qdisc_size_table *stab;
struct taprio_sched *q; struct taprio_sched *q;
ASSERT_RTNL(); ASSERT_RTNL();
...@@ -1067,6 +1307,17 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event, ...@@ -1067,6 +1307,17 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
continue; continue;
taprio_set_picos_per_byte(dev, q); taprio_set_picos_per_byte(dev, q);
stab = rtnl_dereference(q->root->stab);
oper = rtnl_dereference(q->oper_sched);
if (oper)
taprio_update_queue_max_sdu(q, oper, stab);
admin = rtnl_dereference(q->admin_sched);
if (admin)
taprio_update_queue_max_sdu(q, admin, stab);
break; break;
} }
...@@ -1197,6 +1448,34 @@ static void taprio_sched_to_offload(struct net_device *dev, ...@@ -1197,6 +1448,34 @@ static void taprio_sched_to_offload(struct net_device *dev,
offload->num_entries = i; offload->num_entries = i;
} }
static void taprio_detect_broken_mqprio(struct taprio_sched *q)
{
struct net_device *dev = qdisc_dev(q->root);
struct tc_taprio_caps caps;
qdisc_offload_query_caps(dev, TC_SETUP_QDISC_TAPRIO,
&caps, sizeof(caps));
q->broken_mqprio = caps.broken_mqprio;
if (q->broken_mqprio)
static_branch_inc(&taprio_have_broken_mqprio);
else
static_branch_inc(&taprio_have_working_mqprio);
q->detected_mqprio = true;
}
static void taprio_cleanup_broken_mqprio(struct taprio_sched *q)
{
if (!q->detected_mqprio)
return;
if (q->broken_mqprio)
static_branch_dec(&taprio_have_broken_mqprio);
else
static_branch_dec(&taprio_have_working_mqprio);
}
static int taprio_enable_offload(struct net_device *dev, static int taprio_enable_offload(struct net_device *dev,
struct taprio_sched *q, struct taprio_sched *q,
struct sched_gate_list *sched, struct sched_gate_list *sched,
...@@ -1425,7 +1704,6 @@ static int taprio_parse_tc_entries(struct Qdisc *sch, ...@@ -1425,7 +1704,6 @@ static int taprio_parse_tc_entries(struct Qdisc *sch,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct taprio_sched *q = qdisc_priv(sch); struct taprio_sched *q = qdisc_priv(sch);
struct net_device *dev = qdisc_dev(sch);
u32 max_sdu[TC_QOPT_MAX_QUEUE]; u32 max_sdu[TC_QOPT_MAX_QUEUE];
unsigned long seen_tcs = 0; unsigned long seen_tcs = 0;
struct nlattr *n; struct nlattr *n;
...@@ -1439,18 +1717,14 @@ static int taprio_parse_tc_entries(struct Qdisc *sch, ...@@ -1439,18 +1717,14 @@ static int taprio_parse_tc_entries(struct Qdisc *sch,
if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY) if (nla_type(n) != TCA_TAPRIO_ATTR_TC_ENTRY)
continue; continue;
err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs, extack); err = taprio_parse_tc_entry(sch, n, max_sdu, &seen_tcs,
extack);
if (err) if (err)
goto out; goto out;
} }
for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++) { for (tc = 0; tc < TC_QOPT_MAX_QUEUE; tc++)
q->max_sdu[tc] = max_sdu[tc]; q->max_sdu[tc] = max_sdu[tc];
if (max_sdu[tc])
q->max_frm_len[tc] = max_sdu[tc] + dev->hard_header_len;
else
q->max_frm_len[tc] = U32_MAX; /* never oversized */
}
out: out:
return err; return err;
...@@ -1506,6 +1780,7 @@ static int taprio_new_flags(const struct nlattr *attr, u32 old, ...@@ -1506,6 +1780,7 @@ static int taprio_new_flags(const struct nlattr *attr, u32 old,
static int taprio_change(struct Qdisc *sch, struct nlattr *opt, static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct qdisc_size_table *stab = rtnl_dereference(sch->stab);
struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { }; struct nlattr *tb[TCA_TAPRIO_ATTR_MAX + 1] = { };
struct sched_gate_list *oper, *admin, *new_admin; struct sched_gate_list *oper, *admin, *new_admin;
struct taprio_sched *q = qdisc_priv(sch); struct taprio_sched *q = qdisc_priv(sch);
...@@ -1573,15 +1848,18 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -1573,15 +1848,18 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
goto free_sched; goto free_sched;
taprio_set_picos_per_byte(dev, q); taprio_set_picos_per_byte(dev, q);
taprio_update_queue_max_sdu(q, new_admin, stab);
if (mqprio) { if (mqprio) {
err = netdev_set_num_tc(dev, mqprio->num_tc); err = netdev_set_num_tc(dev, mqprio->num_tc);
if (err) if (err)
goto free_sched; goto free_sched;
for (i = 0; i < mqprio->num_tc; i++) for (i = 0; i < mqprio->num_tc; i++) {
netdev_set_tc_queue(dev, i, netdev_set_tc_queue(dev, i,
mqprio->count[i], mqprio->count[i],
mqprio->offset[i]); mqprio->offset[i]);
q->cur_txq[i] = mqprio->offset[i];
}
/* Always use supplied priority mappings */ /* Always use supplied priority mappings */
for (i = 0; i <= TC_BITMASK; i++) for (i = 0; i <= TC_BITMASK; i++)
...@@ -1636,7 +1914,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -1636,7 +1914,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (admin) if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb); call_rcu(&admin->rcu, taprio_free_sched_cb);
} else { } else {
setup_first_close_time(q, new_admin, start); setup_first_end_time(q, new_admin, start);
/* Protects against advance_sched() */ /* Protects against advance_sched() */
spin_lock_irqsave(&q->current_entry_lock, flags); spin_lock_irqsave(&q->current_entry_lock, flags);
...@@ -1656,6 +1934,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -1656,6 +1934,10 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
new_admin = NULL; new_admin = NULL;
err = 0; err = 0;
if (!stab)
NL_SET_ERR_MSG_MOD(extack,
"Size table not specified, frame length estimations may be inaccurate");
unlock: unlock:
spin_unlock_bh(qdisc_lock(sch)); spin_unlock_bh(qdisc_lock(sch));
...@@ -1716,6 +1998,8 @@ static void taprio_destroy(struct Qdisc *sch) ...@@ -1716,6 +1998,8 @@ static void taprio_destroy(struct Qdisc *sch)
if (admin) if (admin)
call_rcu(&admin->rcu, taprio_free_sched_cb); call_rcu(&admin->rcu, taprio_free_sched_cb);
taprio_cleanup_broken_mqprio(q);
} }
static int taprio_init(struct Qdisc *sch, struct nlattr *opt, static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
...@@ -1780,6 +2064,8 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -1780,6 +2064,8 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
q->qdiscs[i] = qdisc; q->qdiscs[i] = qdisc;
} }
taprio_detect_broken_mqprio(q);
return taprio_change(sch, opt, extack); return taprio_change(sch, opt, extack);
} }
...@@ -1920,7 +2206,8 @@ static int dump_schedule(struct sk_buff *msg, ...@@ -1920,7 +2206,8 @@ static int dump_schedule(struct sk_buff *msg,
return -1; return -1;
} }
static int taprio_dump_tc_entries(struct taprio_sched *q, struct sk_buff *skb) static int taprio_dump_tc_entries(struct sk_buff *skb,
struct sched_gate_list *sched)
{ {
struct nlattr *n; struct nlattr *n;
int tc; int tc;
...@@ -1934,7 +2221,7 @@ static int taprio_dump_tc_entries(struct taprio_sched *q, struct sk_buff *skb) ...@@ -1934,7 +2221,7 @@ static int taprio_dump_tc_entries(struct taprio_sched *q, struct sk_buff *skb)
goto nla_put_failure; goto nla_put_failure;
if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU, if (nla_put_u32(skb, TCA_TAPRIO_TC_ENTRY_MAX_SDU,
q->max_sdu[tc])) sched->max_sdu[tc]))
goto nla_put_failure; goto nla_put_failure;
nla_nest_end(skb, n); nla_nest_end(skb, n);
...@@ -1978,7 +2265,7 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -1978,7 +2265,7 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
goto options_error; goto options_error;
if (taprio_dump_tc_entries(q, skb)) if (oper && taprio_dump_tc_entries(skb, oper))
goto options_error; goto options_error;
if (oper && dump_schedule(skb, oper)) if (oper && dump_schedule(skb, oper))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment