Commit 2f7ccf1d authored by David S. Miller's avatar David S. Miller

Merge branch 'net-tja11xx-macsec-support'

Radu Pirea says:

====================
Add MACsec support for TJA11XX C45 PHYs

This is the MACsec support for TJA11XX PHYs. The MACsec block encrypts
the ethernet frames on the fly and has no buffering. This operation will
grow the frames by 32 bytes. If the frames are sent back to back, the
MACsec block will not have enough room to insert the SecTAG and the ICV
and the frames will be dropped.

To mitigate this, the PHY can parse a specific ethertype with some
padding bytes and replace them with the SecTAG and ICV. These padding
bytes might be dummy or might contain information about TX SC that must
be used to encrypt the frame.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c2b2ee36 dc1a0038
......@@ -15444,7 +15444,7 @@ NXP C45 TJA11XX PHY DRIVER
M: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/nxp-c45-tja11xx.c
F: drivers/net/phy/nxp-c45-tja11xx*
NXP FSPI DRIVER
M: Han Xu <han.xu@nxp.com>
......
......@@ -93,6 +93,8 @@ struct pcpu_secy_stats {
* @secys: linked list of SecY's on the underlying device
* @gro_cells: pointer to the Generic Receive Offload cell
* @offload: status of offloading on the MACsec device
* @insert_tx_tag: when offloading, device requires to insert an
* additional tag
*/
struct macsec_dev {
struct macsec_secy secy;
......@@ -102,6 +104,7 @@ struct macsec_dev {
struct list_head secys;
struct gro_cells gro_cells;
enum macsec_offload offload;
bool insert_tx_tag;
};
/**
......@@ -604,26 +607,11 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
struct sk_buff *nskb = skb_copy_expand(skb,
MACSEC_NEEDED_HEADROOM,
MACSEC_NEEDED_TAILROOM,
GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
ret = skb_ensure_writable_head_tail(skb, dev);
if (unlikely(ret < 0)) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) {
macsec_txsa_put(tx_sa);
return ERR_PTR(-ENOMEM);
}
return ERR_PTR(ret);
}
unprotected_len = skb->len;
......@@ -2583,6 +2571,33 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
return false;
}
static bool macsec_needs_tx_tag(struct macsec_dev *macsec,
const struct macsec_ops *ops)
{
return macsec->offload == MACSEC_OFFLOAD_PHY &&
ops->mdo_insert_tx_tag;
}
static void macsec_set_head_tail_room(struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
int needed_headroom, needed_tailroom;
const struct macsec_ops *ops;
ops = macsec_get_ops(macsec, NULL);
if (ops) {
needed_headroom = ops->needed_headroom;
needed_tailroom = ops->needed_tailroom;
} else {
needed_headroom = MACSEC_NEEDED_HEADROOM;
needed_tailroom = MACSEC_NEEDED_TAILROOM;
}
dev->needed_headroom = real_dev->needed_headroom + needed_headroom;
dev->needed_tailroom = real_dev->needed_tailroom + needed_tailroom;
}
static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
{
enum macsec_offload prev_offload;
......@@ -2620,8 +2635,13 @@ static int macsec_update_offload(struct net_device *dev, enum macsec_offload off
ctx.secy = &macsec->secy;
ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
: macsec_offload(ops->mdo_add_secy, &ctx);
if (ret)
if (ret) {
macsec->offload = prev_offload;
return ret;
}
macsec_set_head_tail_room(dev);
macsec->insert_tx_tag = macsec_needs_tx_tag(macsec, ops);
return ret;
}
......@@ -3379,6 +3399,40 @@ static struct genl_family macsec_fam __ro_after_init = {
.resv_start_op = MACSEC_CMD_UPD_OFFLOAD + 1,
};
static struct sk_buff *macsec_insert_tx_tag(struct sk_buff *skb,
struct net_device *dev)
{
struct macsec_dev *macsec = macsec_priv(dev);
const struct macsec_ops *ops;
struct phy_device *phydev;
struct macsec_context ctx;
int skb_final_len;
int err;
ops = macsec_get_ops(macsec, &ctx);
skb_final_len = skb->len - ETH_HLEN + ops->needed_headroom +
ops->needed_tailroom;
if (unlikely(skb_final_len > macsec->real_dev->mtu)) {
err = -EINVAL;
goto cleanup;
}
phydev = macsec->real_dev->phydev;
err = skb_ensure_writable_head_tail(skb, dev);
if (unlikely(err < 0))
goto cleanup;
err = ops->mdo_insert_tx_tag(phydev, skb);
if (unlikely(err))
goto cleanup;
return skb;
cleanup:
kfree_skb(skb);
return ERR_PTR(err);
}
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
......@@ -3393,6 +3447,15 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
skb_dst_drop(skb);
dst_hold(&md_dst->dst);
skb_dst_set(skb, &md_dst->dst);
if (macsec->insert_tx_tag) {
skb = macsec_insert_tx_tag(skb, dev);
if (IS_ERR(skb)) {
DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
}
skb->dev = macsec->real_dev;
return dev_queue_xmit(skb);
}
......@@ -3454,10 +3517,7 @@ static int macsec_dev_init(struct net_device *dev)
dev->features = real_dev->features & MACSEC_FEATURES;
dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
dev->needed_headroom = real_dev->needed_headroom +
MACSEC_NEEDED_HEADROOM;
dev->needed_tailroom = real_dev->needed_tailroom +
MACSEC_NEEDED_TAILROOM;
macsec_set_head_tail_room(dev);
if (is_zero_ether_addr(dev->dev_addr))
eth_hw_addr_inherit(dev, real_dev);
......@@ -3604,21 +3664,19 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
struct sockaddr *addr = p;
u8 old_addr[ETH_ALEN];
int err;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
if (!(dev->flags & IFF_UP))
goto out;
if (dev->flags & IFF_UP) {
err = dev_uc_add(real_dev, addr->sa_data);
if (err < 0)
return err;
}
dev_uc_del(real_dev, dev->dev_addr);
out:
ether_addr_copy(old_addr, dev->dev_addr);
eth_hw_addr_set(dev, addr->sa_data);
/* If h/w offloading is available, propagate to the device */
......@@ -3627,13 +3685,29 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
struct macsec_context ctx;
ops = macsec_get_ops(macsec, &ctx);
if (ops) {
ctx.secy = &macsec->secy;
macsec_offload(ops->mdo_upd_secy, &ctx);
if (!ops) {
err = -EOPNOTSUPP;
goto restore_old_addr;
}
ctx.secy = &macsec->secy;
err = macsec_offload(ops->mdo_upd_secy, &ctx);
if (err)
goto restore_old_addr;
}
if (dev->flags & IFF_UP)
dev_uc_del(real_dev, old_addr);
return 0;
restore_old_addr:
if (dev->flags & IFF_UP)
dev_uc_del(real_dev, addr->sa_data);
eth_hw_addr_set(dev, old_addr);
return err;
}
static int macsec_change_mtu(struct net_device *dev, int new_mtu)
......@@ -4126,6 +4200,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
err = macsec_offload(ops->mdo_add_secy, &ctx);
if (err)
goto del_dev;
macsec->insert_tx_tag =
macsec_needs_tx_tag(macsec, ops);
}
}
......
......@@ -3,11 +3,6 @@
#include <net/macsec.h>
#include "netdevsim.h"
static inline u64 sci_to_cpu(sci_t sci)
{
return be64_to_cpu((__force __be64)sci);
}
static int nsim_macsec_find_secy(struct netdevsim *ns, sci_t sci)
{
int i;
......
......@@ -317,9 +317,10 @@ config NXP_CBTX_PHY
config NXP_C45_TJA11XX_PHY
tristate "NXP C45 TJA11XX PHYs"
depends on PTP_1588_CLOCK_OPTIONAL
depends on MACSEC || !MACSEC
help
Enable support for NXP C45 TJA11XX PHYs.
Currently supports the TJA1103 and TJA1120 PHYs.
Currently supports the TJA1103, TJA1104 and TJA1120 PHYs.
config NXP_TJA11XX_PHY
tristate "NXP TJA11xx PHYs support"
......
......@@ -84,7 +84,11 @@ obj-$(CONFIG_MICROSEMI_PHY) += mscc/
obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_NCN26000_PHY) += ncn26000.o
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
nxp-c45-tja-objs += nxp-c45-tja11xx.o
ifdef CONFIG_MACSEC
nxp-c45-tja-objs += nxp-c45-tja11xx-macsec.o
endif
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja.o
obj-$(CONFIG_NXP_CBTX_PHY) += nxp-cbtx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
......
// SPDX-License-Identifier: GPL-2.0
/* NXP C45 PTP PHY driver interface
* Copyright 2023 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
#include <linux/delay.h>
#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/processor.h>
#include <net/dst_metadata.h>
#include <net/macsec.h>
#include "nxp-c45-tja11xx.h"
#define MACSEC_REG_SIZE 32
#define TX_SC_MAX 4
#define TX_SC_BIT(secy_id) BIT(MACSEC_REG_SIZE - (secy_id) - 1)
#define VEND1_MACSEC_BASE 0x9000
#define MACSEC_CFG 0x0000
#define MACSEC_CFG_BYPASS BIT(1)
#define MACSEC_CFG_S0I BIT(0)
#define MACSEC_TPNET 0x0044
#define PN_WRAP_THRESHOLD 0xffffffff
#define MACSEC_RXSCA 0x0080
#define MACSEC_RXSCKA 0x0084
#define MACSEC_TXSCA 0x00C0
#define MACSEC_TXSCKA 0x00C4
#define MACSEC_RXSC_SCI_1H 0x0100
#define MACSEC_RXSC_CFG 0x0128
#define MACSEC_RXSC_CFG_XPN BIT(25)
#define MACSEC_RXSC_CFG_AES_256 BIT(24)
#define MACSEC_RXSC_CFG_SCI_EN BIT(11)
#define MACSEC_RXSC_CFG_RP BIT(10)
#define MACSEC_RXSC_CFG_VF_MASK GENMASK(9, 8)
#define MACSEC_RXSC_CFG_VF_OFF 8
#define MACSEC_RPW 0x012C
#define MACSEC_RXSA_A_CS 0x0180
#define MACSEC_RXSA_A_NPN 0x0184
#define MACSEC_RXSA_A_XNPN 0x0188
#define MACSEC_RXSA_A_LNPN 0x018C
#define MACSEC_RXSA_A_LXNPN 0x0190
#define MACSEC_RXSA_B_CS 0x01C0
#define MACSEC_RXSA_B_NPN 0x01C4
#define MACSEC_RXSA_B_XNPN 0x01C8
#define MACSEC_RXSA_B_LNPN 0x01CC
#define MACSEC_RXSA_B_LXNPN 0x01D0
#define MACSEC_RXSA_CS_AN_OFF 1
#define MACSEC_RXSA_CS_EN BIT(0)
#define MACSEC_TXSC_SCI_1H 0x0200
#define MACSEC_TXSC_CFG 0x0228
#define MACSEC_TXSC_CFG_XPN BIT(25)
#define MACSEC_TXSC_CFG_AES_256 BIT(24)
#define MACSEC_TXSC_CFG_AN_MASK GENMASK(19, 18)
#define MACSEC_TXSC_CFG_AN_OFF 18
#define MACSEC_TXSC_CFG_ASA BIT(17)
#define MACSEC_TXSC_CFG_SCE BIT(16)
#define MACSEC_TXSC_CFG_ENCRYPT BIT(4)
#define MACSEC_TXSC_CFG_PROTECT BIT(3)
#define MACSEC_TXSC_CFG_SEND_SCI BIT(2)
#define MACSEC_TXSC_CFG_END_STATION BIT(1)
#define MACSEC_TXSC_CFG_SCB BIT(0)
#define MACSEC_TXSA_A_CS 0x0280
#define MACSEC_TXSA_A_NPN 0x0284
#define MACSEC_TXSA_A_XNPN 0x0288
#define MACSEC_TXSA_B_CS 0x02C0
#define MACSEC_TXSA_B_NPN 0x02C4
#define MACSEC_TXSA_B_XNPN 0x02C8
#define MACSEC_SA_CS_A BIT(31)
#define MACSEC_EVR 0x0400
#define MACSEC_EVER 0x0404
#define MACSEC_RXSA_A_KA 0x0700
#define MACSEC_RXSA_A_SSCI 0x0720
#define MACSEC_RXSA_A_SALT 0x0724
#define MACSEC_RXSA_B_KA 0x0740
#define MACSEC_RXSA_B_SSCI 0x0760
#define MACSEC_RXSA_B_SALT 0x0764
#define MACSEC_TXSA_A_KA 0x0780
#define MACSEC_TXSA_A_SSCI 0x07A0
#define MACSEC_TXSA_A_SALT 0x07A4
#define MACSEC_TXSA_B_KA 0x07C0
#define MACSEC_TXSA_B_SSCI 0x07E0
#define MACSEC_TXSA_B_SALT 0x07E4
#define MACSEC_UPFR0D2 0x0A08
#define MACSEC_UPFR0M1 0x0A10
#define MACSEC_OVP BIT(12)
#define MACSEC_UPFR0M2 0x0A14
#define ETYPE_MASK 0xffff
#define MACSEC_UPFR0R 0x0A18
#define MACSEC_UPFR_EN BIT(0)
#define ADPTR_CNTRL 0x0F00
#define ADPTR_CNTRL_CONFIG_EN BIT(14)
#define ADPTR_CNTRL_ADPTR_EN BIT(12)
#define ADPTR_TX_TAG_CNTRL 0x0F0C
#define ADPTR_TX_TAG_CNTRL_ENA BIT(31)
#define TX_SC_FLT_BASE 0x800
#define TX_SC_FLT_SIZE 0x10
#define TX_FLT_BASE(flt_id) (TX_SC_FLT_BASE + \
TX_SC_FLT_SIZE * (flt_id))
#define TX_SC_FLT_OFF_MAC_DA_SA 0x04
#define TX_SC_FLT_OFF_MAC_SA 0x08
#define TX_SC_FLT_OFF_MAC_CFG 0x0C
#define TX_SC_FLT_BY_SA BIT(14)
#define TX_SC_FLT_EN BIT(8)
#define TX_SC_FLT_MAC_DA_SA(base) ((base) + TX_SC_FLT_OFF_MAC_DA_SA)
#define TX_SC_FLT_MAC_SA(base) ((base) + TX_SC_FLT_OFF_MAC_SA)
#define TX_SC_FLT_MAC_CFG(base) ((base) + TX_SC_FLT_OFF_MAC_CFG)
#define ADAPTER_EN BIT(6)
#define MACSEC_EN BIT(5)
#define MACSEC_INOV1HS 0x0140
#define MACSEC_INOV2HS 0x0144
#define MACSEC_INOD1HS 0x0148
#define MACSEC_INOD2HS 0x014C
#define MACSEC_RXSCIPUS 0x0150
#define MACSEC_RXSCIPDS 0x0154
#define MACSEC_RXSCIPLS 0x0158
#define MACSEC_RXAN0INUSS 0x0160
#define MACSEC_RXAN0IPUSS 0x0170
#define MACSEC_RXSA_A_IPOS 0x0194
#define MACSEC_RXSA_A_IPIS 0x01B0
#define MACSEC_RXSA_A_IPNVS 0x01B4
#define MACSEC_RXSA_B_IPOS 0x01D4
#define MACSEC_RXSA_B_IPIS 0x01F0
#define MACSEC_RXSA_B_IPNVS 0x01F4
#define MACSEC_OPUS 0x021C
#define MACSEC_OPTLS 0x022C
#define MACSEC_OOP1HS 0x0240
#define MACSEC_OOP2HS 0x0244
#define MACSEC_OOE1HS 0x0248
#define MACSEC_OOE2HS 0x024C
#define MACSEC_TXSA_A_OPPS 0x028C
#define MACSEC_TXSA_A_OPES 0x0290
#define MACSEC_TXSA_B_OPPS 0x02CC
#define MACSEC_TXSA_B_OPES 0x02D0
#define MACSEC_INPWTS 0x0630
#define MACSEC_INPBTS 0x0638
#define MACSEC_IPSNFS 0x063C
#define TJA11XX_TLV_TX_NEEDED_HEADROOM (32)
#define TJA11XX_TLV_NEEDED_TAILROOM (0)
#define ETH_P_TJA11XX_TLV (0x4e58)
enum nxp_c45_sa_type {
TX_SA,
RX_SA,
};
struct nxp_c45_sa {
void *sa;
const struct nxp_c45_sa_regs *regs;
enum nxp_c45_sa_type type;
bool is_key_a;
u8 an;
struct list_head list;
};
struct nxp_c45_secy {
struct macsec_secy *secy;
struct macsec_rx_sc *rx_sc;
struct list_head sa_list;
int secy_id;
bool rx_sc0_impl;
struct list_head list;
};
struct nxp_c45_macsec {
struct list_head secy_list;
DECLARE_BITMAP(secy_bitmap, TX_SC_MAX);
DECLARE_BITMAP(tx_sc_bitmap, TX_SC_MAX);
};
struct nxp_c45_sa_regs {
u16 cs;
u16 npn;
u16 xnpn;
u16 lnpn;
u16 lxnpn;
u16 ka;
u16 ssci;
u16 salt;
u16 ipis;
u16 ipnvs;
u16 ipos;
u16 opps;
u16 opes;
};
static const struct nxp_c45_sa_regs rx_sa_a_regs = {
.cs = MACSEC_RXSA_A_CS,
.npn = MACSEC_RXSA_A_NPN,
.xnpn = MACSEC_RXSA_A_XNPN,
.lnpn = MACSEC_RXSA_A_LNPN,
.lxnpn = MACSEC_RXSA_A_LXNPN,
.ka = MACSEC_RXSA_A_KA,
.ssci = MACSEC_RXSA_A_SSCI,
.salt = MACSEC_RXSA_A_SALT,
.ipis = MACSEC_RXSA_A_IPIS,
.ipnvs = MACSEC_RXSA_A_IPNVS,
.ipos = MACSEC_RXSA_A_IPOS,
};
static const struct nxp_c45_sa_regs rx_sa_b_regs = {
.cs = MACSEC_RXSA_B_CS,
.npn = MACSEC_RXSA_B_NPN,
.xnpn = MACSEC_RXSA_B_XNPN,
.lnpn = MACSEC_RXSA_B_LNPN,
.lxnpn = MACSEC_RXSA_B_LXNPN,
.ka = MACSEC_RXSA_B_KA,
.ssci = MACSEC_RXSA_B_SSCI,
.salt = MACSEC_RXSA_B_SALT,
.ipis = MACSEC_RXSA_B_IPIS,
.ipnvs = MACSEC_RXSA_B_IPNVS,
.ipos = MACSEC_RXSA_B_IPOS,
};
static const struct nxp_c45_sa_regs tx_sa_a_regs = {
.cs = MACSEC_TXSA_A_CS,
.npn = MACSEC_TXSA_A_NPN,
.xnpn = MACSEC_TXSA_A_XNPN,
.ka = MACSEC_TXSA_A_KA,
.ssci = MACSEC_TXSA_A_SSCI,
.salt = MACSEC_TXSA_A_SALT,
.opps = MACSEC_TXSA_A_OPPS,
.opes = MACSEC_TXSA_A_OPES,
};
static const struct nxp_c45_sa_regs tx_sa_b_regs = {
.cs = MACSEC_TXSA_B_CS,
.npn = MACSEC_TXSA_B_NPN,
.xnpn = MACSEC_TXSA_B_XNPN,
.ka = MACSEC_TXSA_B_KA,
.ssci = MACSEC_TXSA_B_SSCI,
.salt = MACSEC_TXSA_B_SALT,
.opps = MACSEC_TXSA_B_OPPS,
.opes = MACSEC_TXSA_B_OPES,
};
static const
struct nxp_c45_sa_regs *nxp_c45_sa_regs_get(enum nxp_c45_sa_type sa_type,
bool key_a)
{
if (sa_type == RX_SA)
if (key_a)
return &rx_sa_a_regs;
else
return &rx_sa_b_regs;
else if (sa_type == TX_SA)
if (key_a)
return &tx_sa_a_regs;
else
return &tx_sa_b_regs;
else
return NULL;
}
static int nxp_c45_macsec_write(struct phy_device *phydev, u16 addr, u32 value)
{
u32 lvalue = value;
u16 laddr;
int ret;
WARN_ON_ONCE(addr % 4);
phydev_dbg(phydev, "write addr 0x%x value 0x%x\n", addr, value);
laddr = VEND1_MACSEC_BASE + addr / 2;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, laddr, lvalue);
if (ret)
return ret;
laddr += 1;
lvalue >>= 16;
ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, laddr, lvalue);
return ret;
}
static int nxp_c45_macsec_read(struct phy_device *phydev, u16 addr, u32 *value)
{
u32 lvalue;
u16 laddr;
int ret;
WARN_ON_ONCE(addr % 4);
laddr = VEND1_MACSEC_BASE + addr / 2;
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, laddr);
if (ret < 0)
return ret;
laddr += 1;
lvalue = (u32)ret & 0xffff;
ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, laddr);
if (ret < 0)
return ret;
lvalue |= (u32)ret << 16;
*value = lvalue;
phydev_dbg(phydev, "read addr 0x%x value 0x%x\n", addr, *value);
return 0;
}
static void nxp_c45_macsec_read32_64(struct phy_device *phydev, u16 addr,
u64 *value)
{
u32 lvalue;
nxp_c45_macsec_read(phydev, addr, &lvalue);
*value = lvalue;
}
static void nxp_c45_macsec_read64(struct phy_device *phydev, u16 addr,
u64 *value)
{
u32 lvalue;
nxp_c45_macsec_read(phydev, addr, &lvalue);
*value = (u64)lvalue << 32;
nxp_c45_macsec_read(phydev, addr + 4, &lvalue);
*value |= lvalue;
}
static void nxp_c45_secy_irq_en(struct phy_device *phydev,
struct nxp_c45_secy *phy_secy, bool en)
{
u32 reg;
nxp_c45_macsec_read(phydev, MACSEC_EVER, &reg);
if (en)
reg |= TX_SC_BIT(phy_secy->secy_id);
else
reg &= ~TX_SC_BIT(phy_secy->secy_id);
nxp_c45_macsec_write(phydev, MACSEC_EVER, reg);
}
static struct nxp_c45_secy *nxp_c45_find_secy(struct list_head *secy_list,
sci_t sci)
{
struct nxp_c45_secy *pos, *tmp;
list_for_each_entry_safe(pos, tmp, secy_list, list)
if (pos->secy->sci == sci)
return pos;
return ERR_PTR(-EINVAL);
}
static struct
nxp_c45_secy *nxp_c45_find_secy_by_id(struct list_head *secy_list,
int id)
{
struct nxp_c45_secy *pos, *tmp;
list_for_each_entry_safe(pos, tmp, secy_list, list)
if (pos->secy_id == id)
return pos;
return ERR_PTR(-EINVAL);
}
static void nxp_c45_secy_free(struct nxp_c45_secy *phy_secy)
{
list_del(&phy_secy->list);
kfree(phy_secy);
}
static struct nxp_c45_sa *nxp_c45_find_sa(struct list_head *sa_list,
enum nxp_c45_sa_type sa_type, u8 an)
{
struct nxp_c45_sa *pos, *tmp;
list_for_each_entry_safe(pos, tmp, sa_list, list)
if (pos->an == an && pos->type == sa_type)
return pos;
return ERR_PTR(-EINVAL);
}
static struct nxp_c45_sa *nxp_c45_sa_alloc(struct list_head *sa_list, void *sa,
enum nxp_c45_sa_type sa_type, u8 an)
{
struct nxp_c45_sa *first = NULL, *pos, *tmp;
int occurrences = 0;
list_for_each_entry_safe(pos, tmp, sa_list, list) {
if (pos->type != sa_type)
continue;
if (pos->an == an)
return ERR_PTR(-EINVAL);
first = pos;
occurrences++;
if (occurrences >= 2)
return ERR_PTR(-ENOSPC);
}
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return ERR_PTR(-ENOMEM);
if (first)
tmp->is_key_a = !first->is_key_a;
else
tmp->is_key_a = true;
tmp->sa = sa;
tmp->type = sa_type;
tmp->an = an;
tmp->regs = nxp_c45_sa_regs_get(tmp->type, tmp->is_key_a);
list_add_tail(&tmp->list, sa_list);
return tmp;
}
static void nxp_c45_sa_free(struct nxp_c45_sa *sa)
{
list_del(&sa->list);
kfree(sa);
}
static void nxp_c45_sa_list_free(struct list_head *sa_list)
{
struct nxp_c45_sa *pos, *tmp;
list_for_each_entry_safe(pos, tmp, sa_list, list)
nxp_c45_sa_free(pos);
}
static void nxp_c45_sa_set_pn(struct phy_device *phydev,
struct nxp_c45_sa *sa, u64 pn,
u32 replay_window)
{
const struct nxp_c45_sa_regs *sa_regs = sa->regs;
pn_t npn = {.full64 = pn};
pn_t lnpn;
nxp_c45_macsec_write(phydev, sa_regs->npn, npn.lower);
nxp_c45_macsec_write(phydev, sa_regs->xnpn, npn.upper);
if (sa->type != RX_SA)
return;
if (pn > replay_window)
lnpn.full64 = pn - replay_window;
else
lnpn.full64 = 1;
nxp_c45_macsec_write(phydev, sa_regs->lnpn, lnpn.lower);
nxp_c45_macsec_write(phydev, sa_regs->lxnpn, lnpn.upper);
}
static void nxp_c45_sa_set_key(struct macsec_context *ctx,
const struct nxp_c45_sa_regs *sa_regs,
u8 *salt, ssci_t ssci)
{
struct phy_device *phydev = ctx->phydev;
u32 key_size = ctx->secy->key_len / 4;
u32 salt_size = MACSEC_SALT_LEN / 4;
u32 *key_u32 = (u32 *)ctx->sa.key;
u32 *salt_u32 = (u32 *)salt;
u32 reg, value;
int i;
for (i = 0; i < key_size; i++) {
reg = sa_regs->ka + i * 4;
value = (__force u32)cpu_to_be32(key_u32[i]);
nxp_c45_macsec_write(phydev, reg, value);
}
if (ctx->secy->xpn) {
for (i = 0; i < salt_size; i++) {
reg = sa_regs->salt + (2 - i) * 4;
value = (__force u32)cpu_to_be32(salt_u32[i]);
nxp_c45_macsec_write(phydev, reg, value);
}
value = (__force u32)cpu_to_be32((__force u32)ssci);
nxp_c45_macsec_write(phydev, sa_regs->ssci, value);
}
nxp_c45_macsec_write(phydev, sa_regs->cs, MACSEC_SA_CS_A);
}
static void nxp_c45_rx_sa_clear_stats(struct phy_device *phydev,
struct nxp_c45_sa *sa)
{
nxp_c45_macsec_write(phydev, sa->regs->ipis, 0);
nxp_c45_macsec_write(phydev, sa->regs->ipnvs, 0);
nxp_c45_macsec_write(phydev, sa->regs->ipos, 0);
nxp_c45_macsec_write(phydev, MACSEC_RXAN0INUSS + sa->an * 4, 0);
nxp_c45_macsec_write(phydev, MACSEC_RXAN0IPUSS + sa->an * 4, 0);
}
static void nxp_c45_rx_sa_read_stats(struct phy_device *phydev,
struct nxp_c45_sa *sa,
struct macsec_rx_sa_stats *stats)
{
nxp_c45_macsec_read(phydev, sa->regs->ipis, &stats->InPktsInvalid);
nxp_c45_macsec_read(phydev, sa->regs->ipnvs, &stats->InPktsNotValid);
nxp_c45_macsec_read(phydev, sa->regs->ipos, &stats->InPktsOK);
}
static void nxp_c45_tx_sa_clear_stats(struct phy_device *phydev,
struct nxp_c45_sa *sa)
{
nxp_c45_macsec_write(phydev, sa->regs->opps, 0);
nxp_c45_macsec_write(phydev, sa->regs->opes, 0);
}
static void nxp_c45_tx_sa_read_stats(struct phy_device *phydev,
struct nxp_c45_sa *sa,
struct macsec_tx_sa_stats *stats)
{
nxp_c45_macsec_read(phydev, sa->regs->opps, &stats->OutPktsProtected);
nxp_c45_macsec_read(phydev, sa->regs->opes, &stats->OutPktsEncrypted);
}
static void nxp_c45_rx_sa_update(struct phy_device *phydev,
struct nxp_c45_sa *sa, bool en)
{
const struct nxp_c45_sa_regs *sa_regs = sa->regs;
u32 cfg;
cfg = sa->an << MACSEC_RXSA_CS_AN_OFF;
cfg |= en ? MACSEC_RXSA_CS_EN : 0;
nxp_c45_macsec_write(phydev, sa_regs->cs, cfg);
}
static void nxp_c45_tx_sa_update(struct phy_device *phydev,
struct nxp_c45_sa *sa, bool en)
{
u32 cfg = 0;
nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg);
cfg &= ~MACSEC_TXSC_CFG_AN_MASK;
cfg |= sa->an << MACSEC_TXSC_CFG_AN_OFF;
if (sa->is_key_a)
cfg &= ~MACSEC_TXSC_CFG_ASA;
else
cfg |= MACSEC_TXSC_CFG_ASA;
if (en)
cfg |= MACSEC_TXSC_CFG_SCE;
else
cfg &= ~MACSEC_TXSC_CFG_SCE;
nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
}
static void nxp_c45_set_sci(struct phy_device *phydev, u16 sci_base_addr,
sci_t sci)
{
u64 lsci = sci_to_cpu(sci);
nxp_c45_macsec_write(phydev, sci_base_addr, lsci >> 32);
nxp_c45_macsec_write(phydev, sci_base_addr + 4, lsci);
}
static bool nxp_c45_port_is_1(sci_t sci)
{
u16 port = sci_to_cpu(sci);
return port == 1;
}
static void nxp_c45_select_secy(struct phy_device *phydev, u8 id)
{
nxp_c45_macsec_write(phydev, MACSEC_RXSCA, id);
nxp_c45_macsec_write(phydev, MACSEC_RXSCKA, id);
nxp_c45_macsec_write(phydev, MACSEC_TXSCA, id);
nxp_c45_macsec_write(phydev, MACSEC_TXSCKA, id);
}
static bool nxp_c45_secy_valid(struct nxp_c45_secy *phy_secy,
bool can_rx_sc0_impl)
{
bool end_station = phy_secy->secy->tx_sc.end_station;
bool scb = phy_secy->secy->tx_sc.scb;
phy_secy->rx_sc0_impl = false;
if (end_station) {
if (!nxp_c45_port_is_1(phy_secy->secy->sci))
return false;
if (!phy_secy->rx_sc)
return true;
return nxp_c45_port_is_1(phy_secy->rx_sc->sci);
}
if (scb)
return false;
if (!can_rx_sc0_impl)
return false;
if (phy_secy->secy_id != 0)
return false;
phy_secy->rx_sc0_impl = true;
return true;
}
static bool nxp_c45_rx_sc0_impl(struct nxp_c45_secy *phy_secy)
{
bool end_station = phy_secy->secy->tx_sc.end_station;
bool send_sci = phy_secy->secy->tx_sc.send_sci;
bool scb = phy_secy->secy->tx_sc.scb;
return !end_station && !send_sci && !scb;
}
static bool nxp_c45_mac_addr_free(struct macsec_context *ctx)
{
struct nxp_c45_phy *priv = ctx->phydev->priv;
struct nxp_c45_secy *pos, *tmp;
list_for_each_entry_safe(pos, tmp, &priv->macsec->secy_list, list) {
if (pos->secy == ctx->secy)
continue;
if (memcmp(pos->secy->netdev->dev_addr,
ctx->secy->netdev->dev_addr, ETH_ALEN) == 0)
return false;
}
return true;
}
static void nxp_c45_tx_sc_en_flt(struct phy_device *phydev, int secy_id,
bool en)
{
u32 tx_flt_base = TX_FLT_BASE(secy_id);
u32 reg = 0;
nxp_c45_macsec_read(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), &reg);
if (en)
reg |= TX_SC_FLT_EN;
else
reg &= ~TX_SC_FLT_EN;
nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), reg);
}
static void nxp_c45_tx_sc_set_flt(struct phy_device *phydev,
struct nxp_c45_secy *phy_secy)
{
const u8 *dev_addr = phy_secy->secy->netdev->dev_addr;
u32 tx_flt_base = TX_FLT_BASE(phy_secy->secy_id);
u32 reg;
reg = dev_addr[0] << 8 | dev_addr[1];
nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_DA_SA(tx_flt_base), reg);
reg = dev_addr[5] | dev_addr[4] << 8 | dev_addr[3] << 16 |
dev_addr[2] << 24;
nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_SA(tx_flt_base), reg);
nxp_c45_macsec_read(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), &reg);
reg &= TX_SC_FLT_EN;
reg |= TX_SC_FLT_BY_SA | phy_secy->secy_id;
nxp_c45_macsec_write(phydev, TX_SC_FLT_MAC_CFG(tx_flt_base), reg);
}
static void nxp_c45_tx_sc_update(struct phy_device *phydev,
struct nxp_c45_secy *phy_secy)
{
u32 cfg = 0;
nxp_c45_macsec_read(phydev, MACSEC_TXSC_CFG, &cfg);
phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off");
if (phy_secy->secy->xpn)
cfg |= MACSEC_TXSC_CFG_XPN;
else
cfg &= ~MACSEC_TXSC_CFG_XPN;
phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len);
if (phy_secy->secy->key_len == 32)
cfg |= MACSEC_TXSC_CFG_AES_256;
else
cfg &= ~MACSEC_TXSC_CFG_AES_256;
phydev_dbg(phydev, "encryption %s\n",
phy_secy->secy->tx_sc.encrypt ? "on" : "off");
if (phy_secy->secy->tx_sc.encrypt)
cfg |= MACSEC_TXSC_CFG_ENCRYPT;
else
cfg &= ~MACSEC_TXSC_CFG_ENCRYPT;
phydev_dbg(phydev, "protect frames %s\n",
phy_secy->secy->protect_frames ? "on" : "off");
if (phy_secy->secy->protect_frames)
cfg |= MACSEC_TXSC_CFG_PROTECT;
else
cfg &= ~MACSEC_TXSC_CFG_PROTECT;
phydev_dbg(phydev, "send sci %s\n",
phy_secy->secy->tx_sc.send_sci ? "on" : "off");
if (phy_secy->secy->tx_sc.send_sci)
cfg |= MACSEC_TXSC_CFG_SEND_SCI;
else
cfg &= ~MACSEC_TXSC_CFG_SEND_SCI;
phydev_dbg(phydev, "end station %s\n",
phy_secy->secy->tx_sc.end_station ? "on" : "off");
if (phy_secy->secy->tx_sc.end_station)
cfg |= MACSEC_TXSC_CFG_END_STATION;
else
cfg &= ~MACSEC_TXSC_CFG_END_STATION;
phydev_dbg(phydev, "scb %s\n",
phy_secy->secy->tx_sc.scb ? "on" : "off");
if (phy_secy->secy->tx_sc.scb)
cfg |= MACSEC_TXSC_CFG_SCB;
else
cfg &= ~MACSEC_TXSC_CFG_SCB;
nxp_c45_macsec_write(phydev, MACSEC_TXSC_CFG, cfg);
}
static void nxp_c45_tx_sc_clear_stats(struct phy_device *phydev,
struct nxp_c45_secy *phy_secy)
{
struct nxp_c45_sa *pos, *tmp;
list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list)
if (pos->type == TX_SA)
nxp_c45_tx_sa_clear_stats(phydev, pos);
nxp_c45_macsec_write(phydev, MACSEC_OPUS, 0);
nxp_c45_macsec_write(phydev, MACSEC_OPTLS, 0);
nxp_c45_macsec_write(phydev, MACSEC_OOP1HS, 0);
nxp_c45_macsec_write(phydev, MACSEC_OOP2HS, 0);
nxp_c45_macsec_write(phydev, MACSEC_OOE1HS, 0);
nxp_c45_macsec_write(phydev, MACSEC_OOE2HS, 0);
}
static void nxp_c45_set_rx_sc0_impl(struct phy_device *phydev,
bool enable)
{
u32 reg = 0;
nxp_c45_macsec_read(phydev, MACSEC_CFG, &reg);
if (enable)
reg |= MACSEC_CFG_S0I;
else
reg &= ~MACSEC_CFG_S0I;
nxp_c45_macsec_write(phydev, MACSEC_CFG, reg);
}
static bool nxp_c45_is_rx_sc0_impl(struct list_head *secy_list)
{
struct nxp_c45_secy *pos, *tmp;
list_for_each_entry_safe(pos, tmp, secy_list, list)
if (pos->rx_sc0_impl)
return pos->rx_sc0_impl;
return false;
}
static void nxp_c45_rx_sc_en(struct phy_device *phydev,
struct macsec_rx_sc *rx_sc, bool en)
{
u32 reg = 0;
nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, &reg);
if (rx_sc->active && en)
reg |= MACSEC_RXSC_CFG_SCI_EN;
else
reg &= ~MACSEC_RXSC_CFG_SCI_EN;
nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, reg);
}
static void nxp_c45_rx_sc_update(struct phy_device *phydev,
struct nxp_c45_secy *phy_secy)
{
struct macsec_rx_sc *rx_sc = phy_secy->rx_sc;
struct nxp_c45_phy *priv = phydev->priv;
u32 cfg = 0;
nxp_c45_macsec_read(phydev, MACSEC_RXSC_CFG, &cfg);
cfg &= ~MACSEC_RXSC_CFG_VF_MASK;
cfg = phy_secy->secy->validate_frames << MACSEC_RXSC_CFG_VF_OFF;
phydev_dbg(phydev, "validate frames %u\n",
phy_secy->secy->validate_frames);
phydev_dbg(phydev, "replay_protect %s window %u\n",
phy_secy->secy->replay_protect ? "on" : "off",
phy_secy->secy->replay_window);
if (phy_secy->secy->replay_protect) {
cfg |= MACSEC_RXSC_CFG_RP;
nxp_c45_macsec_write(phydev, MACSEC_RPW,
phy_secy->secy->replay_window);
} else {
cfg &= ~MACSEC_RXSC_CFG_RP;
}
phydev_dbg(phydev, "rx_sc->active %s\n",
rx_sc->active ? "on" : "off");
if (rx_sc->active &&
test_bit(phy_secy->secy_id, priv->macsec->secy_bitmap))
cfg |= MACSEC_RXSC_CFG_SCI_EN;
else
cfg &= ~MACSEC_RXSC_CFG_SCI_EN;
phydev_dbg(phydev, "key len %u\n", phy_secy->secy->key_len);
if (phy_secy->secy->key_len == 32)
cfg |= MACSEC_RXSC_CFG_AES_256;
else
cfg &= ~MACSEC_RXSC_CFG_AES_256;
phydev_dbg(phydev, "XPN %s\n", phy_secy->secy->xpn ? "on" : "off");
if (phy_secy->secy->xpn)
cfg |= MACSEC_RXSC_CFG_XPN;
else
cfg &= ~MACSEC_RXSC_CFG_XPN;
nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, cfg);
}
static void nxp_c45_rx_sc_clear_stats(struct phy_device *phydev,
struct nxp_c45_secy *phy_secy)
{
struct nxp_c45_sa *pos, *tmp;
int i;
list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list)
if (pos->type == RX_SA)
nxp_c45_rx_sa_clear_stats(phydev, pos);
nxp_c45_macsec_write(phydev, MACSEC_INOD1HS, 0);
nxp_c45_macsec_write(phydev, MACSEC_INOD2HS, 0);
nxp_c45_macsec_write(phydev, MACSEC_INOV1HS, 0);
nxp_c45_macsec_write(phydev, MACSEC_INOV2HS, 0);
nxp_c45_macsec_write(phydev, MACSEC_RXSCIPDS, 0);
nxp_c45_macsec_write(phydev, MACSEC_RXSCIPLS, 0);
nxp_c45_macsec_write(phydev, MACSEC_RXSCIPUS, 0);
for (i = 0; i < MACSEC_NUM_AN; i++) {
nxp_c45_macsec_write(phydev, MACSEC_RXAN0INUSS + i * 4, 0);
nxp_c45_macsec_write(phydev, MACSEC_RXAN0IPUSS + i * 4, 0);
}
}
static void nxp_c45_rx_sc_del(struct phy_device *phydev,
struct nxp_c45_secy *phy_secy)
{
struct nxp_c45_sa *pos, *tmp;
nxp_c45_macsec_write(phydev, MACSEC_RXSC_CFG, 0);
nxp_c45_macsec_write(phydev, MACSEC_RPW, 0);
nxp_c45_set_sci(phydev, MACSEC_RXSC_SCI_1H, 0);
nxp_c45_rx_sc_clear_stats(phydev, phy_secy);
list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) {
if (pos->type == RX_SA) {
nxp_c45_rx_sa_update(phydev, pos, false);
nxp_c45_sa_free(pos);
}
}
}
static void nxp_c45_clear_global_stats(struct phy_device *phydev)
{
nxp_c45_macsec_write(phydev, MACSEC_INPBTS, 0);
nxp_c45_macsec_write(phydev, MACSEC_INPWTS, 0);
nxp_c45_macsec_write(phydev, MACSEC_IPSNFS, 0);
}
static void nxp_c45_macsec_en(struct phy_device *phydev, bool en)
{
u32 reg;
nxp_c45_macsec_read(phydev, MACSEC_CFG, &reg);
if (en)
reg |= MACSEC_CFG_BYPASS;
else
reg &= ~MACSEC_CFG_BYPASS;
nxp_c45_macsec_write(phydev, MACSEC_CFG, reg);
}
static int nxp_c45_mdo_dev_open(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
int any_bit_set;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, true);
nxp_c45_set_rx_sc0_impl(phydev, phy_secy->rx_sc0_impl);
if (phy_secy->rx_sc)
nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, true);
any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
if (any_bit_set == TX_SC_MAX)
nxp_c45_macsec_en(phydev, true);
set_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
return 0;
}
static int nxp_c45_mdo_dev_stop(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
int any_bit_set;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_tx_sc_en_flt(phydev, phy_secy->secy_id, false);
if (phy_secy->rx_sc)
nxp_c45_rx_sc_en(phydev, phy_secy->rx_sc, false);
nxp_c45_set_rx_sc0_impl(phydev, false);
clear_bit(phy_secy->secy_id, priv->macsec->secy_bitmap);
any_bit_set = find_first_bit(priv->macsec->secy_bitmap, TX_SC_MAX);
if (any_bit_set == TX_SC_MAX)
nxp_c45_macsec_en(phydev, false);
return 0;
}
static int nxp_c45_mdo_add_secy(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
bool can_rx_sc0_impl;
int idx;
phydev_dbg(phydev, "add SecY SCI %016llx\n",
sci_to_cpu(ctx->secy->sci));
if (!nxp_c45_mac_addr_free(ctx))
return -EBUSY;
if (nxp_c45_is_rx_sc0_impl(&priv->macsec->secy_list))
return -EBUSY;
idx = find_first_zero_bit(priv->macsec->tx_sc_bitmap, TX_SC_MAX);
if (idx == TX_SC_MAX)
return -ENOSPC;
phy_secy = kzalloc(sizeof(*phy_secy), GFP_KERNEL);
if (!phy_secy)
return -ENOMEM;
INIT_LIST_HEAD(&phy_secy->sa_list);
phy_secy->secy = ctx->secy;
phy_secy->secy_id = idx;
/* If the point to point mode should be enabled, we should have no
* SecY added yet.
*/
can_rx_sc0_impl = list_count_nodes(&priv->macsec->secy_list) == 0;
if (!nxp_c45_secy_valid(phy_secy, can_rx_sc0_impl)) {
kfree(phy_secy);
return -EINVAL;
}
phy_secy->rx_sc0_impl = nxp_c45_rx_sc0_impl(phy_secy);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_set_sci(phydev, MACSEC_TXSC_SCI_1H, ctx->secy->sci);
nxp_c45_tx_sc_set_flt(phydev, phy_secy);
nxp_c45_tx_sc_update(phydev, phy_secy);
if (phy_interrupt_is_valid(phydev))
nxp_c45_secy_irq_en(phydev, phy_secy, true);
set_bit(idx, priv->macsec->tx_sc_bitmap);
list_add_tail(&phy_secy->list, &priv->macsec->secy_list);
return 0;
}
static void nxp_c45_tx_sa_next(struct nxp_c45_secy *phy_secy,
struct nxp_c45_sa *next_sa, u8 encoding_sa)
{
struct nxp_c45_sa *sa;
sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, encoding_sa);
if (!IS_ERR(sa)) {
memcpy(next_sa, sa, sizeof(*sa));
} else {
next_sa->is_key_a = true;
next_sa->an = encoding_sa;
}
}
static int nxp_c45_mdo_upd_secy(struct macsec_context *ctx)
{
u8 encoding_sa = ctx->secy->tx_sc.encoding_sa;
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
struct nxp_c45_sa next_sa;
bool can_rx_sc0_impl;
phydev_dbg(phydev, "update SecY SCI %016llx\n",
sci_to_cpu(ctx->secy->sci));
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
if (!nxp_c45_mac_addr_free(ctx))
return -EBUSY;
/* If the point to point mode should be enabled, we should have only
* one SecY added, respectively the updated one.
*/
can_rx_sc0_impl = list_count_nodes(&priv->macsec->secy_list) == 1;
if (!nxp_c45_secy_valid(phy_secy, can_rx_sc0_impl))
return -EINVAL;
phy_secy->rx_sc0_impl = nxp_c45_rx_sc0_impl(phy_secy);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_tx_sc_set_flt(phydev, phy_secy);
nxp_c45_tx_sc_update(phydev, phy_secy);
nxp_c45_tx_sa_next(phy_secy, &next_sa, encoding_sa);
nxp_c45_tx_sa_update(phydev, &next_sa, ctx->secy->operational);
nxp_c45_set_rx_sc0_impl(phydev, phy_secy->rx_sc0_impl);
if (phy_secy->rx_sc)
nxp_c45_rx_sc_update(phydev, phy_secy);
return 0;
}
static int nxp_c45_mdo_del_secy(struct macsec_context *ctx)
{
u8 encoding_sa = ctx->secy->tx_sc.encoding_sa;
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
struct nxp_c45_sa next_sa;
phydev_dbg(phydev, "delete SecY SCI %016llx\n",
sci_to_cpu(ctx->secy->sci));
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_mdo_dev_stop(ctx);
nxp_c45_tx_sa_next(phy_secy, &next_sa, encoding_sa);
nxp_c45_tx_sa_update(phydev, &next_sa, false);
nxp_c45_tx_sc_clear_stats(phydev, phy_secy);
if (phy_secy->rx_sc)
nxp_c45_rx_sc_del(phydev, phy_secy);
nxp_c45_sa_list_free(&phy_secy->sa_list);
if (phy_interrupt_is_valid(phydev))
nxp_c45_secy_irq_en(phydev, phy_secy, false);
clear_bit(phy_secy->secy_id, priv->macsec->tx_sc_bitmap);
nxp_c45_secy_free(phy_secy);
if (list_empty(&priv->macsec->secy_list))
nxp_c45_clear_global_stats(phydev);
return 0;
}
static int nxp_c45_mdo_add_rxsc(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
phydev_dbg(phydev, "add RX SC SCI %016llx %s\n",
sci_to_cpu(ctx->rx_sc->sci),
ctx->rx_sc->active ? "enabled" : "disabled");
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
if (phy_secy->rx_sc)
return -ENOSPC;
if (phy_secy->secy->tx_sc.end_station &&
!nxp_c45_port_is_1(ctx->rx_sc->sci))
return -EINVAL;
phy_secy->rx_sc = ctx->rx_sc;
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_set_sci(phydev, MACSEC_RXSC_SCI_1H, ctx->rx_sc->sci);
nxp_c45_rx_sc_update(phydev, phy_secy);
return 0;
}
static int nxp_c45_mdo_upd_rxsc(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
phydev_dbg(phydev, "update RX SC SCI %016llx %s\n",
sci_to_cpu(ctx->rx_sc->sci),
ctx->rx_sc->active ? "enabled" : "disabled");
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_rx_sc_update(phydev, phy_secy);
return 0;
}
static int nxp_c45_mdo_del_rxsc(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
phydev_dbg(phydev, "delete RX SC SCI %016llx %s\n",
sci_to_cpu(ctx->rx_sc->sci),
ctx->rx_sc->active ? "enabled" : "disabled");
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_rx_sc_del(phydev, phy_secy);
phy_secy->rx_sc = NULL;
return 0;
}
static int nxp_c45_mdo_add_rxsa(struct macsec_context *ctx)
{
struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
u8 an = ctx->sa.assoc_num;
struct nxp_c45_sa *sa;
phydev_dbg(phydev, "add RX SA %u %s to RX SC SCI %016llx\n",
an, rx_sa->active ? "enabled" : "disabled",
sci_to_cpu(rx_sa->sc->sci));
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
sa = nxp_c45_sa_alloc(&phy_secy->sa_list, rx_sa, RX_SA, an);
if (IS_ERR(sa))
return PTR_ERR(sa);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_sa_set_pn(phydev, sa, rx_sa->next_pn,
ctx->secy->replay_window);
nxp_c45_sa_set_key(ctx, sa->regs, rx_sa->key.salt.bytes, rx_sa->ssci);
nxp_c45_rx_sa_update(phydev, sa, rx_sa->active);
return 0;
}
static int nxp_c45_mdo_upd_rxsa(struct macsec_context *ctx)
{
struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
u8 an = ctx->sa.assoc_num;
struct nxp_c45_sa *sa;
phydev_dbg(phydev, "update RX SA %u %s to RX SC SCI %016llx\n",
an, rx_sa->active ? "enabled" : "disabled",
sci_to_cpu(rx_sa->sc->sci));
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an);
if (IS_ERR(sa))
return PTR_ERR(sa);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
if (ctx->sa.update_pn)
nxp_c45_sa_set_pn(phydev, sa, rx_sa->next_pn,
ctx->secy->replay_window);
nxp_c45_rx_sa_update(phydev, sa, rx_sa->active);
return 0;
}
static int nxp_c45_mdo_del_rxsa(struct macsec_context *ctx)
{
struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
u8 an = ctx->sa.assoc_num;
struct nxp_c45_sa *sa;
phydev_dbg(phydev, "delete RX SA %u %s to RX SC SCI %016llx\n",
an, rx_sa->active ? "enabled" : "disabled",
sci_to_cpu(rx_sa->sc->sci));
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an);
if (IS_ERR(sa))
return PTR_ERR(sa);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_rx_sa_update(phydev, sa, false);
nxp_c45_rx_sa_clear_stats(phydev, sa);
nxp_c45_sa_free(sa);
return 0;
}
static int nxp_c45_mdo_add_txsa(struct macsec_context *ctx)
{
struct macsec_tx_sa *tx_sa = ctx->sa.tx_sa;
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
u8 an = ctx->sa.assoc_num;
struct nxp_c45_sa *sa;
phydev_dbg(phydev, "add TX SA %u %s to TX SC %016llx\n",
an, ctx->sa.tx_sa->active ? "enabled" : "disabled",
sci_to_cpu(ctx->secy->sci));
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
sa = nxp_c45_sa_alloc(&phy_secy->sa_list, tx_sa, TX_SA, an);
if (IS_ERR(sa))
return PTR_ERR(sa);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_sa_set_pn(phydev, sa, tx_sa->next_pn, 0);
nxp_c45_sa_set_key(ctx, sa->regs, tx_sa->key.salt.bytes, tx_sa->ssci);
if (ctx->secy->tx_sc.encoding_sa == sa->an)
nxp_c45_tx_sa_update(phydev, sa, tx_sa->active);
return 0;
}
static int nxp_c45_mdo_upd_txsa(struct macsec_context *ctx)
{
struct macsec_tx_sa *tx_sa = ctx->sa.tx_sa;
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
u8 an = ctx->sa.assoc_num;
struct nxp_c45_sa *sa;
phydev_dbg(phydev, "update TX SA %u %s to TX SC %016llx\n",
an, ctx->sa.tx_sa->active ? "enabled" : "disabled",
sci_to_cpu(ctx->secy->sci));
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an);
if (IS_ERR(sa))
return PTR_ERR(sa);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
if (ctx->sa.update_pn)
nxp_c45_sa_set_pn(phydev, sa, tx_sa->next_pn, 0);
if (ctx->secy->tx_sc.encoding_sa == sa->an)
nxp_c45_tx_sa_update(phydev, sa, tx_sa->active);
return 0;
}
static int nxp_c45_mdo_del_txsa(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *phy_secy;
u8 an = ctx->sa.assoc_num;
struct nxp_c45_sa *sa;
phydev_dbg(phydev, "delete TX SA %u %s to TX SC %016llx\n",
an, ctx->sa.tx_sa->active ? "enabled" : "disabled",
sci_to_cpu(ctx->secy->sci));
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an);
if (IS_ERR(sa))
return PTR_ERR(sa);
nxp_c45_select_secy(phydev, phy_secy->secy_id);
if (ctx->secy->tx_sc.encoding_sa == sa->an)
nxp_c45_tx_sa_update(phydev, sa, false);
nxp_c45_tx_sa_clear_stats(phydev, sa);
nxp_c45_sa_free(sa);
return 0;
}
static int nxp_c45_mdo_get_dev_stats(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct macsec_dev_stats *dev_stats;
struct nxp_c45_secy *phy_secy;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
dev_stats = ctx->stats.dev_stats;
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_macsec_read32_64(phydev, MACSEC_OPUS,
&dev_stats->OutPktsUntagged);
nxp_c45_macsec_read32_64(phydev, MACSEC_OPTLS,
&dev_stats->OutPktsTooLong);
nxp_c45_macsec_read32_64(phydev, MACSEC_INPBTS,
&dev_stats->InPktsBadTag);
if (phy_secy->secy->validate_frames == MACSEC_VALIDATE_STRICT)
nxp_c45_macsec_read32_64(phydev, MACSEC_INPWTS,
&dev_stats->InPktsNoTag);
else
nxp_c45_macsec_read32_64(phydev, MACSEC_INPWTS,
&dev_stats->InPktsUntagged);
if (phy_secy->secy->validate_frames == MACSEC_VALIDATE_STRICT)
nxp_c45_macsec_read32_64(phydev, MACSEC_IPSNFS,
&dev_stats->InPktsNoSCI);
else
nxp_c45_macsec_read32_64(phydev, MACSEC_IPSNFS,
&dev_stats->InPktsUnknownSCI);
/* Always 0. */
dev_stats->InPktsOverrun = 0;
return 0;
}
static int nxp_c45_mdo_get_tx_sc_stats(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct macsec_tx_sa_stats tx_sa_stats;
struct macsec_tx_sc_stats *stats;
struct nxp_c45_secy *phy_secy;
struct nxp_c45_sa *pos, *tmp;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
stats = ctx->stats.tx_sc_stats;
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_macsec_read64(phydev, MACSEC_OOE1HS,
&stats->OutOctetsEncrypted);
nxp_c45_macsec_read64(phydev, MACSEC_OOP1HS,
&stats->OutOctetsProtected);
list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) {
if (pos->type != TX_SA)
continue;
memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
nxp_c45_tx_sa_read_stats(phydev, pos, &tx_sa_stats);
stats->OutPktsEncrypted += tx_sa_stats.OutPktsEncrypted;
stats->OutPktsProtected += tx_sa_stats.OutPktsProtected;
}
return 0;
}
static int nxp_c45_mdo_get_tx_sa_stats(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct macsec_tx_sa_stats *stats;
struct nxp_c45_secy *phy_secy;
u8 an = ctx->sa.assoc_num;
struct nxp_c45_sa *sa;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
sa = nxp_c45_find_sa(&phy_secy->sa_list, TX_SA, an);
if (IS_ERR(sa))
return PTR_ERR(sa);
stats = ctx->stats.tx_sa_stats;
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_tx_sa_read_stats(phydev, sa, stats);
return 0;
}
static int nxp_c45_mdo_get_rx_sc_stats(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct macsec_rx_sa_stats rx_sa_stats;
struct macsec_rx_sc_stats *stats;
struct nxp_c45_secy *phy_secy;
struct nxp_c45_sa *pos, *tmp;
u32 reg = 0;
int i;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
if (phy_secy->rx_sc != ctx->rx_sc)
return -EINVAL;
stats = ctx->stats.rx_sc_stats;
nxp_c45_select_secy(phydev, phy_secy->secy_id);
list_for_each_entry_safe(pos, tmp, &phy_secy->sa_list, list) {
if (pos->type != RX_SA)
continue;
memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
nxp_c45_rx_sa_read_stats(phydev, pos, &rx_sa_stats);
stats->InPktsInvalid += rx_sa_stats.InPktsInvalid;
stats->InPktsNotValid += rx_sa_stats.InPktsNotValid;
stats->InPktsOK += rx_sa_stats.InPktsOK;
}
for (i = 0; i < MACSEC_NUM_AN; i++) {
nxp_c45_macsec_read(phydev, MACSEC_RXAN0INUSS + i * 4, &reg);
stats->InPktsNotUsingSA += reg;
nxp_c45_macsec_read(phydev, MACSEC_RXAN0IPUSS + i * 4, &reg);
stats->InPktsUnusedSA += reg;
}
nxp_c45_macsec_read64(phydev, MACSEC_INOD1HS,
&stats->InOctetsDecrypted);
nxp_c45_macsec_read64(phydev, MACSEC_INOV1HS,
&stats->InOctetsValidated);
nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPDS,
&stats->InPktsDelayed);
nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPLS,
&stats->InPktsLate);
nxp_c45_macsec_read32_64(phydev, MACSEC_RXSCIPUS,
&stats->InPktsUnchecked);
return 0;
}
static int nxp_c45_mdo_get_rx_sa_stats(struct macsec_context *ctx)
{
struct phy_device *phydev = ctx->phydev;
struct nxp_c45_phy *priv = phydev->priv;
struct macsec_rx_sa_stats *stats;
struct nxp_c45_secy *phy_secy;
u8 an = ctx->sa.assoc_num;
struct nxp_c45_sa *sa;
phy_secy = nxp_c45_find_secy(&priv->macsec->secy_list, ctx->secy->sci);
if (IS_ERR(phy_secy))
return PTR_ERR(phy_secy);
sa = nxp_c45_find_sa(&phy_secy->sa_list, RX_SA, an);
if (IS_ERR(sa))
return PTR_ERR(sa);
stats = ctx->stats.rx_sa_stats;
nxp_c45_select_secy(phydev, phy_secy->secy_id);
nxp_c45_rx_sa_read_stats(phydev, sa, stats);
nxp_c45_macsec_read(phydev, MACSEC_RXAN0INUSS + an * 4,
&stats->InPktsNotUsingSA);
nxp_c45_macsec_read(phydev, MACSEC_RXAN0IPUSS + an * 4,
&stats->InPktsUnusedSA);
return 0;
}
struct tja11xx_tlv_header {
struct ethhdr eth;
u8 subtype;
u8 len;
u8 payload[28];
};
static int nxp_c45_mdo_insert_tx_tag(struct phy_device *phydev,
struct sk_buff *skb)
{
struct tja11xx_tlv_header *tlv;
struct ethhdr *eth;
eth = eth_hdr(skb);
tlv = skb_push(skb, TJA11XX_TLV_TX_NEEDED_HEADROOM);
memmove(tlv, eth, sizeof(*eth));
skb_reset_mac_header(skb);
tlv->eth.h_proto = htons(ETH_P_TJA11XX_TLV);
tlv->subtype = 1;
tlv->len = sizeof(tlv->payload);
memset(tlv->payload, 0, sizeof(tlv->payload));
return 0;
}
static const struct macsec_ops nxp_c45_macsec_ops = {
.mdo_dev_open = nxp_c45_mdo_dev_open,
.mdo_dev_stop = nxp_c45_mdo_dev_stop,
.mdo_add_secy = nxp_c45_mdo_add_secy,
.mdo_upd_secy = nxp_c45_mdo_upd_secy,
.mdo_del_secy = nxp_c45_mdo_del_secy,
.mdo_add_rxsc = nxp_c45_mdo_add_rxsc,
.mdo_upd_rxsc = nxp_c45_mdo_upd_rxsc,
.mdo_del_rxsc = nxp_c45_mdo_del_rxsc,
.mdo_add_rxsa = nxp_c45_mdo_add_rxsa,
.mdo_upd_rxsa = nxp_c45_mdo_upd_rxsa,
.mdo_del_rxsa = nxp_c45_mdo_del_rxsa,
.mdo_add_txsa = nxp_c45_mdo_add_txsa,
.mdo_upd_txsa = nxp_c45_mdo_upd_txsa,
.mdo_del_txsa = nxp_c45_mdo_del_txsa,
.mdo_get_dev_stats = nxp_c45_mdo_get_dev_stats,
.mdo_get_tx_sc_stats = nxp_c45_mdo_get_tx_sc_stats,
.mdo_get_tx_sa_stats = nxp_c45_mdo_get_tx_sa_stats,
.mdo_get_rx_sc_stats = nxp_c45_mdo_get_rx_sc_stats,
.mdo_get_rx_sa_stats = nxp_c45_mdo_get_rx_sa_stats,
.mdo_insert_tx_tag = nxp_c45_mdo_insert_tx_tag,
.needed_headroom = TJA11XX_TLV_TX_NEEDED_HEADROOM,
.needed_tailroom = TJA11XX_TLV_NEEDED_TAILROOM,
};
int nxp_c45_macsec_config_init(struct phy_device *phydev)
{
struct nxp_c45_phy *priv = phydev->priv;
int ret;
if (!priv->macsec)
return 0;
ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
MACSEC_EN | ADAPTER_EN);
if (ret)
return ret;
ret = nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_CONFIG_EN |
ADPTR_CNTRL_ADPTR_EN);
if (ret)
return ret;
ret = nxp_c45_macsec_write(phydev, ADPTR_TX_TAG_CNTRL,
ADPTR_TX_TAG_CNTRL_ENA);
if (ret)
return ret;
ret = nxp_c45_macsec_write(phydev, ADPTR_CNTRL, ADPTR_CNTRL_ADPTR_EN);
if (ret)
return ret;
ret = nxp_c45_macsec_write(phydev, MACSEC_TPNET, PN_WRAP_THRESHOLD);
if (ret)
return ret;
/* Set MKA filter. */
ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0D2, ETH_P_PAE);
if (ret)
return ret;
ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0M1, MACSEC_OVP);
if (ret)
return ret;
ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0M2, ETYPE_MASK);
if (ret)
return ret;
ret = nxp_c45_macsec_write(phydev, MACSEC_UPFR0R, MACSEC_UPFR_EN);
return ret;
}
int nxp_c45_macsec_probe(struct phy_device *phydev)
{
struct nxp_c45_phy *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
priv->macsec = devm_kzalloc(dev, sizeof(*priv->macsec), GFP_KERNEL);
if (!priv->macsec)
return -ENOMEM;
INIT_LIST_HEAD(&priv->macsec->secy_list);
phydev->macsec_ops = &nxp_c45_macsec_ops;
return 0;
}
void nxp_c45_macsec_remove(struct phy_device *phydev)
{
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *secy_p, *secy_t;
struct nxp_c45_sa *sa_p, *sa_t;
struct list_head *secy_list;
if (!priv->macsec)
return;
secy_list = &priv->macsec->secy_list;
nxp_c45_macsec_en(phydev, false);
list_for_each_entry_safe(secy_p, secy_t, secy_list, list) {
list_for_each_entry_safe(sa_p, sa_t, &secy_p->sa_list, list)
nxp_c45_sa_free(sa_p);
nxp_c45_secy_free(secy_p);
}
}
void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
irqreturn_t *ret)
{
struct nxp_c45_phy *priv = phydev->priv;
struct nxp_c45_secy *secy;
struct nxp_c45_sa *sa;
u8 encoding_sa;
int secy_id;
u32 reg = 0;
if (!priv->macsec)
return;
do {
nxp_c45_macsec_read(phydev, MACSEC_EVR, &reg);
if (!reg)
return;
secy_id = MACSEC_REG_SIZE - ffs(reg);
secy = nxp_c45_find_secy_by_id(&priv->macsec->secy_list,
secy_id);
if (IS_ERR(secy)) {
WARN_ON(1);
goto macsec_ack_irq;
}
encoding_sa = secy->secy->tx_sc.encoding_sa;
phydev_dbg(phydev, "pn_wrapped: TX SC %d, encoding_sa %u\n",
secy->secy_id, encoding_sa);
sa = nxp_c45_find_sa(&secy->sa_list, TX_SA, encoding_sa);
if (!IS_ERR(sa))
macsec_pn_wrapped(secy->secy, sa->sa);
else
WARN_ON(1);
macsec_ack_irq:
nxp_c45_macsec_write(phydev, MACSEC_EVR,
TX_SC_BIT(secy_id));
*ret = IRQ_HANDLED;
} while (reg);
}
// SPDX-License-Identifier: GPL-2.0
/* NXP C45 PHY driver
* Copyright (C) 2021 NXP
* Copyright 2021-2023 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
......@@ -14,9 +14,10 @@
#include <linux/processor.h>
#include <linux/property.h>
#include <linux/ptp_classify.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
#include "nxp-c45-tja11xx.h"
#define PHY_ID_TJA_1103 0x001BB010
#define PHY_ID_TJA_1120 0x001BB031
......@@ -75,9 +76,11 @@
#define PORT_CONTROL_EN BIT(14)
#define VEND1_PORT_ABILITIES 0x8046
#define MACSEC_ABILITY BIT(5)
#define PTP_ABILITY BIT(3)
#define VEND1_PORT_FUNC_IRQ_EN 0x807A
#define MACSEC_IRQS BIT(5)
#define PTP_IRQS BIT(3)
#define VEND1_PTP_IRQ_ACK 0x9008
......@@ -148,7 +151,6 @@
#define TS_SEC_MASK GENMASK(1, 0)
#define VEND1_PORT_FUNC_ENABLES 0x8048
#define PTP_ENABLE BIT(3)
#define PHY_TEST_ENABLE BIT(0)
......@@ -281,25 +283,6 @@ struct nxp_c45_phy_data {
irqreturn_t *irq_status);
};
struct nxp_c45_phy {
const struct nxp_c45_phy_data *phy_data;
struct phy_device *phydev;
struct mii_timestamper mii_ts;
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
struct sk_buff_head tx_queue;
struct sk_buff_head rx_queue;
/* used to access the PTP registers atomic */
struct mutex ptp_lock;
int hwts_tx;
int hwts_rx;
u32 tx_delay;
u32 rx_delay;
struct timespec64 extts_ts;
int extts_index;
bool extts;
};
static const
struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
{
......@@ -1215,10 +1198,23 @@ static int nxp_c45_start_op(struct phy_device *phydev)
static int nxp_c45_config_intr(struct phy_device *phydev)
{
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
int ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
if (ret)
return ret;
return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
else
}
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
if (ret)
return ret;
return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
}
......@@ -1286,6 +1282,7 @@ static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
}
data->nmi_handler(phydev, &ret);
nxp_c45_handle_macsec_interrupt(phydev, &ret);
return ret;
}
......@@ -1611,6 +1608,9 @@ static int nxp_c45_config_init(struct phy_device *phydev)
nxp_c45_counters_enable(phydev);
nxp_c45_ptp_init(phydev);
ret = nxp_c45_macsec_config_init(phydev);
if (ret)
return ret;
return nxp_c45_start_op(phydev);
}
......@@ -1626,7 +1626,9 @@ static int nxp_c45_get_features(struct phy_device *phydev)
static int nxp_c45_probe(struct phy_device *phydev)
{
struct nxp_c45_phy *priv;
int ptp_ability;
bool macsec_ability;
int phy_abilities;
bool ptp_ability;
int ret = 0;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
......@@ -1642,9 +1644,9 @@ static int nxp_c45_probe(struct phy_device *phydev)
mutex_init(&priv->ptp_lock);
ptp_ability = phy_read_mmd(phydev, MDIO_MMD_VEND1,
phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
VEND1_PORT_ABILITIES);
ptp_ability = !!(ptp_ability & PTP_ABILITY);
ptp_ability = !!(phy_abilities & PTP_ABILITY);
if (!ptp_ability) {
phydev_dbg(phydev, "the phy does not support PTP");
goto no_ptp_support;
......@@ -1663,6 +1665,20 @@ static int nxp_c45_probe(struct phy_device *phydev)
}
no_ptp_support:
macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
if (!macsec_ability) {
phydev_info(phydev, "the phy does not support MACsec\n");
goto no_macsec_support;
}
if (IS_ENABLED(CONFIG_MACSEC)) {
ret = nxp_c45_macsec_probe(phydev);
phydev_dbg(phydev, "MACsec support enabled.");
} else {
phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
}
no_macsec_support:
return ret;
}
......@@ -1676,6 +1692,7 @@ static void nxp_c45_remove(struct phy_device *phydev)
skb_queue_purge(&priv->tx_queue);
skb_queue_purge(&priv->rx_queue);
nxp_c45_macsec_remove(phydev);
}
static void tja1103_counters_enable(struct phy_device *phydev)
......
/* SPDX-License-Identifier: GPL-2.0 */
/* NXP C45 PHY driver header file
* Copyright 2023 NXP
* Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
*/
#include <linux/ptp_clock_kernel.h>
#define VEND1_PORT_FUNC_ENABLES 0x8048
struct nxp_c45_macsec;
struct nxp_c45_phy {
const struct nxp_c45_phy_data *phy_data;
struct phy_device *phydev;
struct mii_timestamper mii_ts;
struct ptp_clock *ptp_clock;
struct ptp_clock_info caps;
struct sk_buff_head tx_queue;
struct sk_buff_head rx_queue;
/* used to access the PTP registers atomic */
struct mutex ptp_lock;
int hwts_tx;
int hwts_rx;
u32 tx_delay;
u32 rx_delay;
struct timespec64 extts_ts;
int extts_index;
bool extts;
struct nxp_c45_macsec *macsec;
};
#if IS_ENABLED(CONFIG_MACSEC)
int nxp_c45_macsec_config_init(struct phy_device *phydev);
int nxp_c45_macsec_probe(struct phy_device *phydev);
void nxp_c45_macsec_remove(struct phy_device *phydev);
void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
irqreturn_t *ret);
#else
static inline
int nxp_c45_macsec_config_init(struct phy_device *phydev)
{
return 0;
}
static inline
int nxp_c45_macsec_probe(struct phy_device *phydev)
{
return 0;
}
static inline
void nxp_c45_macsec_remove(struct phy_device *phydev)
{
}
static inline
void nxp_c45_handle_macsec_interrupt(struct phy_device *phydev,
irqreturn_t *ret)
{
}
#endif
......@@ -4007,6 +4007,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features
unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len);
int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
......
......@@ -247,6 +247,23 @@ struct macsec_secy {
/**
* struct macsec_context - MACsec context for hardware offloading
* @netdev: a valid pointer to a struct net_device if @offload ==
* MACSEC_OFFLOAD_MAC
* @phydev: a valid pointer to a struct phy_device if @offload ==
* MACSEC_OFFLOAD_PHY
* @offload: MACsec offload status
* @secy: pointer to a MACsec SecY
* @rx_sc: pointer to a RX SC
* @update_pn: when updating the SA, update the next PN
* @assoc_num: association number of the target SA
* @key: key of the target SA
* @rx_sa: pointer to an RX SA if a RX SA is added/updated/removed
* @tx_sa: pointer to an TX SA if a TX SA is added/updated/removed
* @tx_sc_stats: pointer to TX SC stats structure
* @tx_sa_stats: pointer to TX SA stats structure
* @rx_sc_stats: pointer to RX SC stats structure
* @rx_sa_stats: pointer to RX SA stats structure
* @dev_stats: pointer to dev stats structure
*/
struct macsec_context {
union {
......@@ -277,6 +294,33 @@ struct macsec_context {
/**
* struct macsec_ops - MACsec offloading operations
* @mdo_dev_open: called when the MACsec interface transitions to the up state
* @mdo_dev_stop: called when the MACsec interface transitions to the down
* state
* @mdo_add_secy: called when a new SecY is added
* @mdo_upd_secy: called when the SecY flags are changed or the MAC address of
* the MACsec interface is changed
* @mdo_del_secy: called when the hw offload is disabled or the MACsec
* interface is removed
* @mdo_add_rxsc: called when a new RX SC is added
* @mdo_upd_rxsc: called when a certain RX SC is updated
* @mdo_del_rxsc: called when a certain RX SC is removed
* @mdo_add_rxsa: called when a new RX SA is added
* @mdo_upd_rxsa: called when a certain RX SA is updated
* @mdo_del_rxsa: called when a certain RX SA is removed
* @mdo_add_txsa: called when a new TX SA is added
* @mdo_upd_txsa: called when a certain TX SA is updated
* @mdo_del_txsa: called when a certain TX SA is removed
* @mdo_get_dev_stats: called when dev stats are read
* @mdo_get_tx_sc_stats: called when TX SC stats are read
* @mdo_get_tx_sa_stats: called when TX SA stats are read
* @mdo_get_rx_sc_stats: called when RX SC stats are read
* @mdo_get_rx_sa_stats: called when RX SA stats are read
* @mdo_insert_tx_tag: called to insert the TX tag
* @needed_headroom: number of bytes reserved at the beginning of the sk_buff
* for the TX tag
* @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
* TX tag
*/
struct macsec_ops {
/* Device wide */
......@@ -303,6 +347,11 @@ struct macsec_ops {
int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
/* Offload tag */
int (*mdo_insert_tx_tag)(struct phy_device *phydev,
struct sk_buff *skb);
unsigned int needed_headroom;
unsigned int needed_tailroom;
};
void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
......@@ -325,4 +374,9 @@ static inline void *macsec_netdev_priv(const struct net_device *dev)
return netdev_priv(dev);
}
static inline u64 sci_to_cpu(sci_t sci)
{
return be64_to_cpu((__force __be64)sci);
}
#endif /* _NET_MACSEC_H_ */
......@@ -5995,6 +5995,31 @@ int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
}
EXPORT_SYMBOL(skb_ensure_writable);
int skb_ensure_writable_head_tail(struct sk_buff *skb, struct net_device *dev)
{
int needed_headroom = dev->needed_headroom;
int needed_tailroom = dev->needed_tailroom;
/* For tail taggers, we need to pad short frames ourselves, to ensure
* that the tail tag does not fail at its role of being at the end of
* the packet, once the conduit interface pads the frame. Account for
* that pad length here, and pad later.
*/
if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
needed_tailroom += ETH_ZLEN - skb->len;
/* skb_headroom() returns unsigned int... */
needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
/* No reallocation needed, yay! */
return 0;
return pskb_expand_head(skb, needed_headroom, needed_tailroom,
GFP_ATOMIC);
}
EXPORT_SYMBOL(skb_ensure_writable_head_tail);
/* remove VLAN header from packet and update csum accordingly.
* expects a non skb_vlan_tag_present skb with a vlan tag payload
*/
......
......@@ -920,30 +920,6 @@ netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev)
}
EXPORT_SYMBOL_GPL(dsa_enqueue_skb);
static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev)
{
int needed_headroom = dev->needed_headroom;
int needed_tailroom = dev->needed_tailroom;
/* For tail taggers, we need to pad short frames ourselves, to ensure
* that the tail tag does not fail at its role of being at the end of
* the packet, once the conduit interface pads the frame. Account for
* that pad length here, and pad later.
*/
if (unlikely(needed_tailroom && skb->len < ETH_ZLEN))
needed_tailroom += ETH_ZLEN - skb->len;
/* skb_headroom() returns unsigned int... */
needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0);
needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0);
if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb)))
/* No reallocation needed, yay! */
return 0;
return pskb_expand_head(skb, needed_headroom, needed_tailroom,
GFP_ATOMIC);
}
static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_user_priv *p = netdev_priv(dev);
......@@ -956,13 +932,14 @@ static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev)
/* Handle tx timestamp if any */
dsa_skb_tx_timestamp(p, skb);
if (dsa_realloc_skb(skb, dev)) {
if (skb_ensure_writable_head_tail(skb, dev)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* needed_tailroom should still be 'warm' in the cache line from
* dsa_realloc_skb(), which has also ensured that padding is safe.
* skb_ensure_writable_head_tail(), which has also ensured that
* padding is safe.
*/
if (dev->needed_tailroom)
eth_skb_pad(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment