Commit b62c3624 authored by Dmitry Bogdanov's avatar Dmitry Bogdanov Committed by David S. Miller

net: macsec: add support for getting offloaded stats

When HW offloading is enabled, offloaded stats should be used, because
s/w stats are wrong and out of sync with the HW in this case.
Signed-off-by: default avatarDmitry Bogdanov <dbogdanov@marvell.com>
Signed-off-by: default avatarMark Starovoytov <mstarovoitov@marvell.com>
Signed-off-by: default avatarIgor Russkikh <irusskikh@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f428011b
...@@ -88,17 +88,6 @@ struct gcm_iv { ...@@ -88,17 +88,6 @@ struct gcm_iv {
__be32 pn; __be32 pn;
}; };
struct macsec_dev_stats {
__u64 OutPktsUntagged;
__u64 InPktsUntagged;
__u64 OutPktsTooLong;
__u64 InPktsNoTag;
__u64 InPktsBadTag;
__u64 InPktsUnknownSCI;
__u64 InPktsNoSCI;
__u64 InPktsOverrun;
};
#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
struct pcpu_secy_stats { struct pcpu_secy_stats {
...@@ -2653,207 +2642,309 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) ...@@ -2653,207 +2642,309 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
return ret; return ret;
} }
static int copy_tx_sa_stats(struct sk_buff *skb, static void get_tx_sa_stats(struct net_device *dev, int an,
struct macsec_tx_sa_stats __percpu *pstats) struct macsec_tx_sa *tx_sa,
struct macsec_tx_sa_stats *sum)
{ {
struct macsec_tx_sa_stats sum = {0, }; struct macsec_dev *macsec = macsec_priv(dev);
int cpu; int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.sa.assoc_num = an;
ctx.sa.tx_sa = tx_sa;
ctx.stats.tx_sa_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); const struct macsec_tx_sa_stats *stats =
per_cpu_ptr(tx_sa->stats, cpu);
sum.OutPktsProtected += stats->OutPktsProtected; sum->OutPktsProtected += stats->OutPktsProtected;
sum.OutPktsEncrypted += stats->OutPktsEncrypted; sum->OutPktsEncrypted += stats->OutPktsEncrypted;
} }
}
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) {
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
sum->OutPktsProtected) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
sum->OutPktsEncrypted))
return -EMSGSIZE; return -EMSGSIZE;
return 0; return 0;
} }
static noinline_for_stack int static void get_rx_sa_stats(struct net_device *dev,
copy_rx_sa_stats(struct sk_buff *skb, struct macsec_rx_sc *rx_sc, int an,
struct macsec_rx_sa_stats __percpu *pstats) struct macsec_rx_sa *rx_sa,
struct macsec_rx_sa_stats *sum)
{ {
struct macsec_rx_sa_stats sum = {0, }; struct macsec_dev *macsec = macsec_priv(dev);
int cpu; int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.sa.assoc_num = an;
ctx.sa.rx_sa = rx_sa;
ctx.stats.rx_sa_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
ctx.rx_sc = rx_sc;
macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); const struct macsec_rx_sa_stats *stats =
per_cpu_ptr(rx_sa->stats, cpu);
sum.InPktsOK += stats->InPktsOK; sum->InPktsOK += stats->InPktsOK;
sum.InPktsInvalid += stats->InPktsInvalid; sum->InPktsInvalid += stats->InPktsInvalid;
sum.InPktsNotValid += stats->InPktsNotValid; sum->InPktsNotValid += stats->InPktsNotValid;
sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
sum.InPktsUnusedSA += stats->InPktsUnusedSA; sum->InPktsUnusedSA += stats->InPktsUnusedSA;
} }
}
if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || static int copy_rx_sa_stats(struct sk_buff *skb,
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || struct macsec_rx_sa_stats *sum)
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || {
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
sum->InPktsInvalid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
sum->InPktsNotValid) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
sum->InPktsNotUsingSA) ||
nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
sum->InPktsUnusedSA))
return -EMSGSIZE; return -EMSGSIZE;
return 0; return 0;
} }
static noinline_for_stack int static void get_rx_sc_stats(struct net_device *dev,
copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats) struct macsec_rx_sc *rx_sc,
struct macsec_rx_sc_stats *sum)
{ {
struct macsec_rx_sc_stats sum = {0, }; struct macsec_dev *macsec = macsec_priv(dev);
int cpu; int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.stats.rx_sc_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
ctx.rx_sc = rx_sc;
macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const struct pcpu_rx_sc_stats *stats; const struct pcpu_rx_sc_stats *stats;
struct macsec_rx_sc_stats tmp; struct macsec_rx_sc_stats tmp;
unsigned int start; unsigned int start;
stats = per_cpu_ptr(pstats, cpu); stats = per_cpu_ptr(rx_sc->stats, cpu);
do { do {
start = u64_stats_fetch_begin_irq(&stats->syncp); start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp)); memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start)); } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum.InOctetsValidated += tmp.InOctetsValidated; sum->InOctetsValidated += tmp.InOctetsValidated;
sum.InOctetsDecrypted += tmp.InOctetsDecrypted; sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
sum.InPktsUnchecked += tmp.InPktsUnchecked; sum->InPktsUnchecked += tmp.InPktsUnchecked;
sum.InPktsDelayed += tmp.InPktsDelayed; sum->InPktsDelayed += tmp.InPktsDelayed;
sum.InPktsOK += tmp.InPktsOK; sum->InPktsOK += tmp.InPktsOK;
sum.InPktsInvalid += tmp.InPktsInvalid; sum->InPktsInvalid += tmp.InPktsInvalid;
sum.InPktsLate += tmp.InPktsLate; sum->InPktsLate += tmp.InPktsLate;
sum.InPktsNotValid += tmp.InPktsNotValid; sum->InPktsNotValid += tmp.InPktsNotValid;
sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
sum.InPktsUnusedSA += tmp.InPktsUnusedSA; sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
} }
}
static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
{
if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
sum.InOctetsValidated, sum->InOctetsValidated,
MACSEC_RXSC_STATS_ATTR_PAD) || MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
sum.InOctetsDecrypted, sum->InOctetsDecrypted,
MACSEC_RXSC_STATS_ATTR_PAD) || MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
sum.InPktsUnchecked, sum->InPktsUnchecked,
MACSEC_RXSC_STATS_ATTR_PAD) || MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
sum.InPktsDelayed, sum->InPktsDelayed,
MACSEC_RXSC_STATS_ATTR_PAD) || MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
sum.InPktsOK, sum->InPktsOK,
MACSEC_RXSC_STATS_ATTR_PAD) || MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
sum.InPktsInvalid, sum->InPktsInvalid,
MACSEC_RXSC_STATS_ATTR_PAD) || MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
sum.InPktsLate, sum->InPktsLate,
MACSEC_RXSC_STATS_ATTR_PAD) || MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
sum.InPktsNotValid, sum->InPktsNotValid,
MACSEC_RXSC_STATS_ATTR_PAD) || MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
sum.InPktsNotUsingSA, sum->InPktsNotUsingSA,
MACSEC_RXSC_STATS_ATTR_PAD) || MACSEC_RXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
sum.InPktsUnusedSA, sum->InPktsUnusedSA,
MACSEC_RXSC_STATS_ATTR_PAD)) MACSEC_RXSC_STATS_ATTR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
return 0; return 0;
} }
static noinline_for_stack int static void get_tx_sc_stats(struct net_device *dev,
copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats) struct macsec_tx_sc_stats *sum)
{ {
struct macsec_tx_sc_stats sum = {0, }; struct macsec_dev *macsec = macsec_priv(dev);
int cpu; int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.stats.tx_sc_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const struct pcpu_tx_sc_stats *stats; const struct pcpu_tx_sc_stats *stats;
struct macsec_tx_sc_stats tmp; struct macsec_tx_sc_stats tmp;
unsigned int start; unsigned int start;
stats = per_cpu_ptr(pstats, cpu); stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
do { do {
start = u64_stats_fetch_begin_irq(&stats->syncp); start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp)); memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start)); } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum.OutPktsProtected += tmp.OutPktsProtected; sum->OutPktsProtected += tmp.OutPktsProtected;
sum.OutPktsEncrypted += tmp.OutPktsEncrypted; sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
sum.OutOctetsProtected += tmp.OutOctetsProtected; sum->OutOctetsProtected += tmp.OutOctetsProtected;
sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
} }
}
static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
{
if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
sum.OutPktsProtected, sum->OutPktsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) || MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
sum.OutPktsEncrypted, sum->OutPktsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD) || MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
sum.OutOctetsProtected, sum->OutOctetsProtected,
MACSEC_TXSC_STATS_ATTR_PAD) || MACSEC_TXSC_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
sum.OutOctetsEncrypted, sum->OutOctetsEncrypted,
MACSEC_TXSC_STATS_ATTR_PAD)) MACSEC_TXSC_STATS_ATTR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
return 0; return 0;
} }
static noinline_for_stack int static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
{ {
struct macsec_dev_stats sum = {0, }; struct macsec_dev *macsec = macsec_priv(dev);
int cpu; int cpu;
/* If h/w offloading is available, propagate to the device */
if (macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.stats.dev_stats = sum;
ctx.secy = &macsec_priv(dev)->secy;
macsec_offload(ops->mdo_get_dev_stats, &ctx);
}
return;
}
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const struct pcpu_secy_stats *stats; const struct pcpu_secy_stats *stats;
struct macsec_dev_stats tmp; struct macsec_dev_stats tmp;
unsigned int start; unsigned int start;
stats = per_cpu_ptr(pstats, cpu); stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
do { do {
start = u64_stats_fetch_begin_irq(&stats->syncp); start = u64_stats_fetch_begin_irq(&stats->syncp);
memcpy(&tmp, &stats->stats, sizeof(tmp)); memcpy(&tmp, &stats->stats, sizeof(tmp));
} while (u64_stats_fetch_retry_irq(&stats->syncp, start)); } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sum.OutPktsUntagged += tmp.OutPktsUntagged; sum->OutPktsUntagged += tmp.OutPktsUntagged;
sum.InPktsUntagged += tmp.InPktsUntagged; sum->InPktsUntagged += tmp.InPktsUntagged;
sum.OutPktsTooLong += tmp.OutPktsTooLong; sum->OutPktsTooLong += tmp.OutPktsTooLong;
sum.InPktsNoTag += tmp.InPktsNoTag; sum->InPktsNoTag += tmp.InPktsNoTag;
sum.InPktsBadTag += tmp.InPktsBadTag; sum->InPktsBadTag += tmp.InPktsBadTag;
sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
sum.InPktsNoSCI += tmp.InPktsNoSCI; sum->InPktsNoSCI += tmp.InPktsNoSCI;
sum.InPktsOverrun += tmp.InPktsOverrun; sum->InPktsOverrun += tmp.InPktsOverrun;
} }
}
static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
{
if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
sum.OutPktsUntagged, sum->OutPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) || MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
sum.InPktsUntagged, sum->InPktsUntagged,
MACSEC_SECY_STATS_ATTR_PAD) || MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
sum.OutPktsTooLong, sum->OutPktsTooLong,
MACSEC_SECY_STATS_ATTR_PAD) || MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
sum.InPktsNoTag, sum->InPktsNoTag,
MACSEC_SECY_STATS_ATTR_PAD) || MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
sum.InPktsBadTag, sum->InPktsBadTag,
MACSEC_SECY_STATS_ATTR_PAD) || MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
sum.InPktsUnknownSCI, sum->InPktsUnknownSCI,
MACSEC_SECY_STATS_ATTR_PAD) || MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
sum.InPktsNoSCI, sum->InPktsNoSCI,
MACSEC_SECY_STATS_ATTR_PAD) || MACSEC_SECY_STATS_ATTR_PAD) ||
nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
sum.InPktsOverrun, sum->InPktsOverrun,
MACSEC_SECY_STATS_ATTR_PAD)) MACSEC_SECY_STATS_ATTR_PAD))
return -EMSGSIZE; return -EMSGSIZE;
...@@ -2914,7 +3005,12 @@ static noinline_for_stack int ...@@ -2914,7 +3005,12 @@ static noinline_for_stack int
dump_secy(struct macsec_secy *secy, struct net_device *dev, dump_secy(struct macsec_secy *secy, struct net_device *dev,
struct sk_buff *skb, struct netlink_callback *cb) struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct macsec_tx_sc_stats tx_sc_stats = {0, };
struct macsec_tx_sa_stats tx_sa_stats = {0, };
struct macsec_rx_sc_stats rx_sc_stats = {0, };
struct macsec_rx_sa_stats rx_sa_stats = {0, };
struct macsec_dev *macsec = netdev_priv(dev); struct macsec_dev *macsec = netdev_priv(dev);
struct macsec_dev_stats dev_stats = {0, };
struct macsec_tx_sc *tx_sc = &secy->tx_sc; struct macsec_tx_sc *tx_sc = &secy->tx_sc;
struct nlattr *txsa_list, *rxsc_list; struct nlattr *txsa_list, *rxsc_list;
struct macsec_rx_sc *rx_sc; struct macsec_rx_sc *rx_sc;
...@@ -2945,7 +3041,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, ...@@ -2945,7 +3041,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS); attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
if (!attr) if (!attr)
goto nla_put_failure; goto nla_put_failure;
if (copy_tx_sc_stats(skb, tx_sc->stats)) {
get_tx_sc_stats(dev, &tx_sc_stats);
if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
nla_nest_cancel(skb, attr); nla_nest_cancel(skb, attr);
goto nla_put_failure; goto nla_put_failure;
} }
...@@ -2954,7 +3052,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, ...@@ -2954,7 +3052,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS); attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
if (!attr) if (!attr)
goto nla_put_failure; goto nla_put_failure;
if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { get_secy_stats(dev, &dev_stats);
if (copy_secy_stats(skb, &dev_stats)) {
nla_nest_cancel(skb, attr); nla_nest_cancel(skb, attr);
goto nla_put_failure; goto nla_put_failure;
} }
...@@ -2978,6 +3077,22 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, ...@@ -2978,6 +3077,22 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure; goto nla_put_failure;
} }
attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
nla_nest_end(skb, attr);
if (secy->xpn) { if (secy->xpn) {
pn = tx_sa->next_pn; pn = tx_sa->next_pn;
pn_len = MACSEC_XPN_PN_LEN; pn_len = MACSEC_XPN_PN_LEN;
...@@ -2996,20 +3111,6 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, ...@@ -2996,20 +3111,6 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure; goto nla_put_failure;
} }
attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
if (copy_tx_sa_stats(skb, tx_sa->stats)) {
nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
}
nla_nest_end(skb, attr);
nla_nest_end(skb, txsa_nest); nla_nest_end(skb, txsa_nest);
} }
nla_nest_end(skb, txsa_list); nla_nest_end(skb, txsa_list);
...@@ -3043,7 +3144,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, ...@@ -3043,7 +3144,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list); nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure; goto nla_put_failure;
} }
if (copy_rx_sc_stats(skb, rx_sc->stats)) { memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
nla_nest_cancel(skb, attr); nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list); nla_nest_cancel(skb, rxsc_list);
...@@ -3084,7 +3187,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev, ...@@ -3084,7 +3187,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_cancel(skb, rxsc_list); nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure; goto nla_put_failure;
} }
if (copy_rx_sa_stats(skb, rx_sa->stats)) { memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
nla_nest_cancel(skb, attr); nla_nest_cancel(skb, attr);
nla_nest_cancel(skb, rxsa_list); nla_nest_cancel(skb, rxsa_list);
nla_nest_cancel(skb, rxsc_nest); nla_nest_cancel(skb, rxsc_nest);
......
...@@ -88,6 +88,17 @@ struct macsec_tx_sc_stats { ...@@ -88,6 +88,17 @@ struct macsec_tx_sc_stats {
__u64 OutOctetsEncrypted; __u64 OutOctetsEncrypted;
}; };
struct macsec_dev_stats {
__u64 OutPktsUntagged;
__u64 InPktsUntagged;
__u64 OutPktsTooLong;
__u64 InPktsNoTag;
__u64 InPktsBadTag;
__u64 InPktsUnknownSCI;
__u64 InPktsNoSCI;
__u64 InPktsOverrun;
};
/** /**
* struct macsec_rx_sa - receive secure association * struct macsec_rx_sa - receive secure association
* @active: * @active:
...@@ -236,6 +247,13 @@ struct macsec_context { ...@@ -236,6 +247,13 @@ struct macsec_context {
struct macsec_tx_sa *tx_sa; struct macsec_tx_sa *tx_sa;
}; };
} sa; } sa;
union {
struct macsec_tx_sc_stats *tx_sc_stats;
struct macsec_tx_sa_stats *tx_sa_stats;
struct macsec_rx_sc_stats *rx_sc_stats;
struct macsec_rx_sa_stats *rx_sa_stats;
struct macsec_dev_stats *dev_stats;
} stats;
u8 prepare:1; u8 prepare:1;
}; };
...@@ -262,6 +280,12 @@ struct macsec_ops { ...@@ -262,6 +280,12 @@ struct macsec_ops {
int (*mdo_add_txsa)(struct macsec_context *ctx); int (*mdo_add_txsa)(struct macsec_context *ctx);
int (*mdo_upd_txsa)(struct macsec_context *ctx); int (*mdo_upd_txsa)(struct macsec_context *ctx);
int (*mdo_del_txsa)(struct macsec_context *ctx); int (*mdo_del_txsa)(struct macsec_context *ctx);
/* Statistics */
int (*mdo_get_dev_stats)(struct macsec_context *ctx);
int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
}; };
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa); void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment