Commit 92b54e09 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-dsa-qca8k-code-split-for-qca8k'

Christian Marangi says:

====================
net: dsa: qca8k: code split for qca8k

This is needed ad ipq4019 SoC have an internal switch that is
based on qca8k with very minor changes. The general function is equal.

Because of this we split the driver to common and specific code.

As the common function needs to be moved to a different file to be
reused, we had to convert every remaining user of qca8k_read/write/rmw
to regmap variant.
We had also to generilized the special handling for the ethtool_stats
function that makes use of the autocast mib. (ipq4019 will have a
different tagger and use mmio so it could be quicker to use mmio instead
of automib feature)
And we had to convert the regmap read/write to bulk implementation to
drop the special function that makes use of it. This will be compatible
with ipq4019 and at the same time permits normal switch to use the eth
mgmt way to send the entire ATU table read/write in one go.
====================

Link: https://lore.kernel.org/r/20220727113523.19742-1-ansuelsmth@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 29192a17 9d1bcb1f
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o obj-$(CONFIG_NET_DSA_AR9331) += ar9331.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
qca8k-y += qca8k-common.o qca8k-8xxx.o
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/of_net.h> #include <linux/of_net.h>
#include <linux/of_mdio.h> #include <linux/of_mdio.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/if_bridge.h>
#include <linux/mdio.h> #include <linux/mdio.h>
#include <linux/phylink.h> #include <linux/phylink.h>
#include <linux/gpio/consumer.h> #include <linux/gpio/consumer.h>
...@@ -24,57 +23,6 @@ ...@@ -24,57 +23,6 @@
#include "qca8k.h" #include "qca8k.h"
#define MIB_DESC(_s, _o, _n) \
{ \
.size = (_s), \
.offset = (_o), \
.name = (_n), \
}
static const struct qca8k_mib_desc ar8327_mib[] = {
MIB_DESC(1, 0x00, "RxBroad"),
MIB_DESC(1, 0x04, "RxPause"),
MIB_DESC(1, 0x08, "RxMulti"),
MIB_DESC(1, 0x0c, "RxFcsErr"),
MIB_DESC(1, 0x10, "RxAlignErr"),
MIB_DESC(1, 0x14, "RxRunt"),
MIB_DESC(1, 0x18, "RxFragment"),
MIB_DESC(1, 0x1c, "Rx64Byte"),
MIB_DESC(1, 0x20, "Rx128Byte"),
MIB_DESC(1, 0x24, "Rx256Byte"),
MIB_DESC(1, 0x28, "Rx512Byte"),
MIB_DESC(1, 0x2c, "Rx1024Byte"),
MIB_DESC(1, 0x30, "Rx1518Byte"),
MIB_DESC(1, 0x34, "RxMaxByte"),
MIB_DESC(1, 0x38, "RxTooLong"),
MIB_DESC(2, 0x3c, "RxGoodByte"),
MIB_DESC(2, 0x44, "RxBadByte"),
MIB_DESC(1, 0x4c, "RxOverFlow"),
MIB_DESC(1, 0x50, "Filtered"),
MIB_DESC(1, 0x54, "TxBroad"),
MIB_DESC(1, 0x58, "TxPause"),
MIB_DESC(1, 0x5c, "TxMulti"),
MIB_DESC(1, 0x60, "TxUnderRun"),
MIB_DESC(1, 0x64, "Tx64Byte"),
MIB_DESC(1, 0x68, "Tx128Byte"),
MIB_DESC(1, 0x6c, "Tx256Byte"),
MIB_DESC(1, 0x70, "Tx512Byte"),
MIB_DESC(1, 0x74, "Tx1024Byte"),
MIB_DESC(1, 0x78, "Tx1518Byte"),
MIB_DESC(1, 0x7c, "TxMaxByte"),
MIB_DESC(1, 0x80, "TxOverSize"),
MIB_DESC(2, 0x84, "TxByte"),
MIB_DESC(1, 0x8c, "TxCollision"),
MIB_DESC(1, 0x90, "TxAbortCol"),
MIB_DESC(1, 0x94, "TxMultiCol"),
MIB_DESC(1, 0x98, "TxSingleCol"),
MIB_DESC(1, 0x9c, "TxExcDefer"),
MIB_DESC(1, 0xa0, "TxDefer"),
MIB_DESC(1, 0xa4, "TxLateCol"),
MIB_DESC(1, 0xa8, "RXUnicast"),
MIB_DESC(1, 0xac, "TXUnicast"),
};
static void static void
qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page) qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
{ {
...@@ -184,24 +132,6 @@ qca8k_set_page(struct qca8k_priv *priv, u16 page) ...@@ -184,24 +132,6 @@ qca8k_set_page(struct qca8k_priv *priv, u16 page)
return 0; return 0;
} }
static int
qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
{
return regmap_read(priv->regmap, reg, val);
}
static int
qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
{
return regmap_write(priv->regmap, reg, val);
}
static int
qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
{
return regmap_update_bits(priv->regmap, reg, mask, write_val);
}
static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
{ {
struct qca8k_mgmt_eth_data *mgmt_eth_data; struct qca8k_mgmt_eth_data *mgmt_eth_data;
...@@ -411,43 +341,6 @@ qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 wri ...@@ -411,43 +341,6 @@ qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 wri
return qca8k_write_eth(priv, reg, &val, sizeof(val)); return qca8k_write_eth(priv, reg, &val, sizeof(val));
} }
static int
qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
int i, count = len / sizeof(u32), ret;
if (priv->mgmt_master && !qca8k_read_eth(priv, reg, val, len))
return 0;
for (i = 0; i < count; i++) {
ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
if (ret < 0)
return ret;
}
return 0;
}
static int
qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
int i, count = len / sizeof(u32), ret;
u32 tmp;
if (priv->mgmt_master && !qca8k_write_eth(priv, reg, val, len))
return 0;
for (i = 0; i < count; i++) {
tmp = val[i];
ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
if (ret < 0)
return ret;
}
return 0;
}
static int static int
qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val) qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{ {
...@@ -534,30 +427,6 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_ ...@@ -534,30 +427,6 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
return ret; return ret;
} }
static const struct regmap_range qca8k_readable_ranges[] = {
regmap_reg_range(0x0000, 0x00e4), /* Global control */
regmap_reg_range(0x0100, 0x0168), /* EEE control */
regmap_reg_range(0x0200, 0x0270), /* Parser control */
regmap_reg_range(0x0400, 0x0454), /* ACL */
regmap_reg_range(0x0600, 0x0718), /* Lookup */
regmap_reg_range(0x0800, 0x0b70), /* QM */
regmap_reg_range(0x0c00, 0x0c80), /* PKT */
regmap_reg_range(0x0e00, 0x0e98), /* L3 */
regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
};
static const struct regmap_access_table qca8k_readable_table = {
.yes_ranges = qca8k_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
};
static struct regmap_config qca8k_regmap_config = { static struct regmap_config qca8k_regmap_config = {
.reg_bits = 16, .reg_bits = 16,
.val_bits = 32, .val_bits = 32,
...@@ -572,2047 +441,1131 @@ static struct regmap_config qca8k_regmap_config = { ...@@ -572,2047 +441,1131 @@ static struct regmap_config qca8k_regmap_config = {
}; };
static int static int
qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask) qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data,
{ struct sk_buff *read_skb, u32 *val)
u32 val;
return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
}
static int
qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{ {
u32 reg[3]; struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL);
bool ack;
int ret; int ret;
/* load the ARL table into an array */ reinit_completion(&mgmt_eth_data->rw_done);
ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
if (ret)
return ret;
/* vid - 83:72 */
fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
/* aging - 67:64 */
fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
/* portmask - 54:48 */
fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
/* mac - 47:0 */
fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
return 0; /* Increment seq_num and set it in the copy pkt */
} mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
static void dev_queue_xmit(skb);
qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask, const u8 *mac,
u8 aging)
{
u32 reg[3] = { 0 };
/* vid - 83:72 */
reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
/* aging - 67:64 */
reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
/* portmask - 54:48 */
reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
/* mac - 47:0 */
reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
}
static int ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port) QCA8K_ETHERNET_TIMEOUT);
{
u32 reg;
int ret;
/* Set the command and FDB index */ ack = mgmt_eth_data->ack;
reg = QCA8K_ATU_FUNC_BUSY;
reg |= cmd;
if (port >= 0) {
reg |= QCA8K_ATU_FUNC_PORT_EN;
reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
}
/* Write the function register triggering the table access */ if (ret <= 0)
ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg); return -ETIMEDOUT;
if (ret)
return ret;
/* wait for completion */ if (!ack)
ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY); return -EINVAL;
if (ret)
return ret;
/* Check for table full violation when adding an entry */ *val = mgmt_eth_data->data[0];
if (cmd == QCA8K_FDB_LOAD) {
ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
if (ret < 0)
return ret;
if (reg & QCA8K_ATU_FUNC_FULL)
return -1;
}
return 0; return 0;
} }
static int static int
qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb, int port) qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
int regnum, u16 data)
{ {
int ret; struct sk_buff *write_skb, *clear_skb, *read_skb;
struct qca8k_mgmt_eth_data *mgmt_eth_data;
u32 write_val, clear_val = 0, val;
struct net_device *mgmt_master;
int ret, ret1;
bool ack;
qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging); if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port); return -EINVAL;
if (ret < 0)
return ret;
return qca8k_fdb_read(priv, fdb); mgmt_eth_data = &priv->mgmt_eth_data;
}
static int write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
u16 vid, u8 aging) QCA8K_MDIO_MASTER_REG_ADDR(regnum);
{
int ret;
mutex_lock(&priv->reg_mutex); if (read) {
qca8k_fdb_write(priv, vid, port_mask, mac, aging); write_val |= QCA8K_MDIO_MASTER_READ;
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); } else {
mutex_unlock(&priv->reg_mutex); write_val |= QCA8K_MDIO_MASTER_WRITE;
write_val |= QCA8K_MDIO_MASTER_DATA(data);
}
return ret; /* Prealloc all the needed skb before the lock */
} write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val,
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
if (!write_skb)
return -ENOMEM;
static int clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val,
qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac, u16 port_mask, u16 vid) QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
{ if (!clear_skb) {
int ret; ret = -ENOMEM;
goto err_clear_skb;
}
mutex_lock(&priv->reg_mutex); read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val,
qca8k_fdb_write(priv, vid, port_mask, mac, 0); QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val));
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); if (!read_skb) {
mutex_unlock(&priv->reg_mutex); ret = -ENOMEM;
goto err_read_skb;
}
return ret; /* Actually start the request:
} * 1. Send mdio master packet
* 2. Busy Wait for mdio master command
* 3. Get the data if we are reading
* 4. Reset the mdio master (even with error)
*/
mutex_lock(&mgmt_eth_data->mutex);
static void /* Check if mgmt_master is operational */
qca8k_fdb_flush(struct qca8k_priv *priv) mgmt_master = priv->mgmt_master;
{ if (!mgmt_master) {
mutex_lock(&priv->reg_mutex); mutex_unlock(&mgmt_eth_data->mutex);
qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1); ret = -EINVAL;
mutex_unlock(&priv->reg_mutex); goto err_mgmt_master;
} }
static int read_skb->dev = mgmt_master;
qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask, clear_skb->dev = mgmt_master;
const u8 *mac, u16 vid) write_skb->dev = mgmt_master;
{
struct qca8k_fdb fdb = { 0 };
int ret;
mutex_lock(&priv->reg_mutex); reinit_completion(&mgmt_eth_data->rw_done);
qca8k_fdb_write(priv, vid, 0, mac, 0); /* Increment seq_num and set it in the write pkt */
ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); mgmt_eth_data->seq++;
if (ret < 0) qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(write_skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0) {
ret = -ETIMEDOUT;
kfree_skb(read_skb);
goto exit; goto exit;
}
ret = qca8k_fdb_read(priv, &fdb); if (!ack) {
if (ret < 0) ret = -EINVAL;
kfree_skb(read_skb);
goto exit; goto exit;
}
/* Rule exist. Delete first */ ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
if (!fdb.aging) { !(val & QCA8K_MDIO_MASTER_BUSY), 0,
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
if (ret) mgmt_eth_data, read_skb, &val);
if (ret < 0 && ret1 < 0) {
ret = ret1;
goto exit; goto exit;
} }
/* Add port to fdb portmask */ if (read) {
fdb.port_mask |= port_mask; reinit_completion(&mgmt_eth_data->rw_done);
qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); /* Increment seq_num and set it in the read pkt */
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
exit: dev_queue_xmit(read_skb);
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask, QCA8K_ETHERNET_TIMEOUT);
const u8 *mac, u16 vid)
{
struct qca8k_fdb fdb = { 0 };
int ret;
mutex_lock(&priv->reg_mutex); ack = mgmt_eth_data->ack;
qca8k_fdb_write(priv, vid, 0, mac, 0); if (ret <= 0) {
ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1); ret = -ETIMEDOUT;
if (ret < 0)
goto exit; goto exit;
}
/* Rule doesn't exist. Why delete? */ if (!ack) {
if (!fdb.aging) {
ret = -EINVAL; ret = -EINVAL;
goto exit; goto exit;
} }
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1); ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
if (ret) } else {
goto exit; kfree_skb(read_skb);
}
exit:
reinit_completion(&mgmt_eth_data->rw_done);
/* Only port in the rule is this port. Don't re insert */ /* Increment seq_num and set it in the clear pkt */
if (fdb.port_mask == port_mask) mgmt_eth_data->seq++;
goto exit; qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
/* Remove port from port mask */ dev_queue_xmit(clear_skb);
fdb.port_mask &= ~port_mask;
qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging); wait_for_completion_timeout(&mgmt_eth_data->rw_done,
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1); QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
return ret;
/* Error handling before lock */
err_mgmt_master:
kfree_skb(read_skb);
err_read_skb:
kfree_skb(clear_skb);
err_clear_skb:
kfree_skb(write_skb);
exit:
mutex_unlock(&priv->reg_mutex);
return ret; return ret;
} }
static int static u32
qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid) qca8k_port_to_phy(int port)
{ {
u32 reg; /* From Andrew Lunn:
int ret; * Port 0 has no internal phy.
* Port 1 has an internal PHY at MDIO address 0.
/* Set the command and VLAN index */ * Port 2 has an internal PHY at MDIO address 1.
reg = QCA8K_VTU_FUNC1_BUSY; * ...
reg |= cmd; * Port 5 has an internal PHY at MDIO address 4.
reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid); * Port 6 has no internal PHY.
*/
/* Write the function register triggering the table access */
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
if (ret)
return ret;
/* wait for completion */
ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
if (ret)
return ret;
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_VLAN_LOAD) {
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
if (ret < 0)
return ret;
if (reg & QCA8K_VTU_FUNC1_FULL)
return -ENOMEM;
}
return 0; return port - 1;
} }
static int static int
qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged) qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
{ {
u32 reg; u16 r1, r2, page;
int ret; u32 val;
int ret, ret1;
/*
We do the right thing with VLAN 0 and treat it as untagged while
preserving the tag on egress.
*/
if (vid == 0)
return 0;
mutex_lock(&priv->reg_mutex);
ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
if (ret < 0)
goto out;
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg); qca8k_split_addr(reg, &r1, &r2, &page);
if (ret < 0)
goto out;
reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
if (untagged)
reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
else
reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg); ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
if (ret) QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
goto out; bus, 0x10 | r2, r1, &val);
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
out: /* Check if qca8k_read has failed for a different reason
mutex_unlock(&priv->reg_mutex); * before returnting -ETIMEDOUT
*/
if (ret < 0 && ret1 < 0)
return ret1;
return ret; return ret;
} }
static int static int
qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid) qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
{ {
u32 reg, mask; struct mii_bus *bus = priv->bus;
int ret, i; u16 r1, r2, page;
bool del; u32 val;
int ret;
mutex_lock(&priv->reg_mutex); if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid); return -EINVAL;
if (ret < 0)
goto out;
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg); val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
if (ret < 0) QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
goto out; QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port); QCA8K_MDIO_MASTER_DATA(data);
reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
/* Check if we're the last member to be removed */ qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
del = true;
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
if ((reg & mask) != mask) { mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
del = false;
break;
}
}
if (del) { ret = qca8k_set_page(priv, page);
ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
} else {
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
if (ret) if (ret)
goto out; goto exit;
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
}
out: qca8k_mii_write32(priv, 0x10 | r2, r1, val);
mutex_unlock(&priv->reg_mutex);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
mutex_unlock(&bus->mdio_lock);
return ret; return ret;
} }
static int static int
qca8k_mib_init(struct qca8k_priv *priv) qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
{ {
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret; int ret;
mutex_lock(&priv->reg_mutex); if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, return -EINVAL;
QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
QCA8K_MIB_BUSY);
if (ret)
goto exit;
ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY); val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret) if (ret)
goto exit; goto exit;
ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP); qca8k_mii_write32(priv, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
if (ret) if (ret)
goto exit; goto exit;
ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB); ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
exit: exit:
mutex_unlock(&priv->reg_mutex); /* even if the busy_wait timeouts try to clear the MASTER_EN */
return ret; qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
}
static void mutex_unlock(&bus->mdio_lock);
qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
{
u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
/* Port 0 and 6 have no internal PHY */ if (ret >= 0)
if (port > 0 && port < 6) ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
mask |= QCA8K_PORT_STATUS_LINK_AUTO;
if (enable) return ret;
regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
else
regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
} }
static int static int
qca8k_phy_eth_busy_wait(struct qca8k_mgmt_eth_data *mgmt_eth_data, qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
struct sk_buff *read_skb, u32 *val)
{ {
struct sk_buff *skb = skb_copy(read_skb, GFP_KERNEL); struct qca8k_priv *priv = slave_bus->priv;
bool ack;
int ret; int ret;
reinit_completion(&mgmt_eth_data->rw_done); /* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
/* Increment seq_num and set it in the copy pkt */ if (!ret)
mgmt_eth_data->seq++; return 0;
qca8k_mdio_header_fill_seq_num(skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done, return qca8k_mdio_write(priv, phy, regnum, data);
QCA8K_ETHERNET_TIMEOUT); }
ack = mgmt_eth_data->ack; static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
int ret;
if (ret <= 0) /* Use mdio Ethernet when available, fallback to legacy one on error */
return -ETIMEDOUT; ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
if (ret >= 0)
return ret;
if (!ack) ret = qca8k_mdio_read(priv, phy, regnum);
return -EINVAL;
*val = mgmt_eth_data->data[0]; if (ret < 0)
return 0xffff;
return 0; return ret;
} }
static int static int
qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
int regnum, u16 data)
{ {
struct sk_buff *write_skb, *clear_skb, *read_skb; port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
struct qca8k_mgmt_eth_data *mgmt_eth_data;
u32 write_val, clear_val = 0, val;
struct net_device *mgmt_master;
int ret, ret1;
bool ack;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG) return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
return -EINVAL; }
mgmt_eth_data = &priv->mgmt_eth_data; static int
qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
{
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
write_val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN | return qca8k_internal_mdio_read(slave_bus, port, regnum);
QCA8K_MDIO_MASTER_PHY_ADDR(phy) | }
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
if (read) { static int
write_val |= QCA8K_MDIO_MASTER_READ; qca8k_mdio_register(struct qca8k_priv *priv)
} else { {
write_val |= QCA8K_MDIO_MASTER_WRITE; struct dsa_switch *ds = priv->ds;
write_val |= QCA8K_MDIO_MASTER_DATA(data); struct device_node *mdio;
} struct mii_bus *bus;
/* Prealloc all the needed skb before the lock */ bus = devm_mdiobus_alloc(ds->dev);
write_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &write_val, if (!bus)
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(write_val));
if (!write_skb)
return -ENOMEM; return -ENOMEM;
clear_skb = qca8k_alloc_mdio_header(MDIO_WRITE, QCA8K_MDIO_MASTER_CTRL, &clear_val, bus->priv = (void *)priv;
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
if (!clear_skb) { ds->dst->index, ds->index);
ret = -ENOMEM; bus->parent = ds->dev;
goto err_clear_skb; bus->phy_mask = ~ds->phys_mii_mask;
} ds->slave_mii_bus = bus;
read_skb = qca8k_alloc_mdio_header(MDIO_READ, QCA8K_MDIO_MASTER_CTRL, &clear_val, /* Check if the devicetree declare the port:phy mapping */
QCA8K_ETHERNET_PHY_PRIORITY, sizeof(clear_val)); mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
if (!read_skb) { if (of_device_is_available(mdio)) {
ret = -ENOMEM; bus->name = "qca8k slave mii";
goto err_read_skb; bus->read = qca8k_internal_mdio_read;
bus->write = qca8k_internal_mdio_write;
return devm_of_mdiobus_register(priv->dev, bus, mdio);
} }
/* Actually start the request: /* If a mapping can't be found the legacy mapping is used,
* 1. Send mdio master packet * using the qca8k_port_to_phy function
* 2. Busy Wait for mdio master command
* 3. Get the data if we are reading
* 4. Reset the mdio master (even with error)
*/ */
mutex_lock(&mgmt_eth_data->mutex); bus->name = "qca8k-legacy slave mii";
bus->read = qca8k_legacy_mdio_read;
/* Check if mgmt_master is operational */ bus->write = qca8k_legacy_mdio_write;
mgmt_master = priv->mgmt_master; return devm_mdiobus_register(priv->dev, bus);
if (!mgmt_master) { }
mutex_unlock(&mgmt_eth_data->mutex);
ret = -EINVAL;
goto err_mgmt_master;
}
read_skb->dev = mgmt_master;
clear_skb->dev = mgmt_master;
write_skb->dev = mgmt_master;
reinit_completion(&mgmt_eth_data->rw_done); static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
/* Increment seq_num and set it in the write pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(write_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(write_skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0) {
ret = -ETIMEDOUT;
kfree_skb(read_skb);
goto exit;
}
if (!ack) {
ret = -EINVAL;
kfree_skb(read_skb);
goto exit;
}
ret = read_poll_timeout(qca8k_phy_eth_busy_wait, ret1,
!(val & QCA8K_MDIO_MASTER_BUSY), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
mgmt_eth_data, read_skb, &val);
if (ret < 0 && ret1 < 0) {
ret = ret1;
goto exit;
}
if (read) {
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the read pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(read_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(read_skb);
ret = wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
ack = mgmt_eth_data->ack;
if (ret <= 0) {
ret = -ETIMEDOUT;
goto exit;
}
if (!ack) {
ret = -EINVAL;
goto exit;
}
ret = mgmt_eth_data->data[0] & QCA8K_MDIO_MASTER_DATA_MASK;
} else {
kfree_skb(read_skb);
}
exit:
reinit_completion(&mgmt_eth_data->rw_done);
/* Increment seq_num and set it in the clear pkt */
mgmt_eth_data->seq++;
qca8k_mdio_header_fill_seq_num(clear_skb, mgmt_eth_data->seq);
mgmt_eth_data->ack = false;
dev_queue_xmit(clear_skb);
wait_for_completion_timeout(&mgmt_eth_data->rw_done,
QCA8K_ETHERNET_TIMEOUT);
mutex_unlock(&mgmt_eth_data->mutex);
return ret;
/* Error handling before lock */
err_mgmt_master:
kfree_skb(read_skb);
err_read_skb:
kfree_skb(clear_skb);
err_clear_skb:
kfree_skb(write_skb);
return ret;
}
static u32
qca8k_port_to_phy(int port)
{
/* From Andrew Lunn:
* Port 0 has no internal phy.
* Port 1 has an internal PHY at MDIO address 0.
* Port 2 has an internal PHY at MDIO address 1.
* ...
* Port 5 has an internal PHY at MDIO address 4.
* Port 6 has no internal PHY.
*/
return port - 1;
}
static int
qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
{
u16 r1, r2, page;
u32 val;
int ret, ret1;
qca8k_split_addr(reg, &r1, &r2, &page);
ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
bus, 0x10 | r2, r1, &val);
/* Check if qca8k_read has failed for a different reason
* before returnting -ETIMEDOUT
*/
if (ret < 0 && ret1 < 0)
return ret1;
return ret;
}
static int
qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
QCA8K_MDIO_MASTER_DATA(data);
qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
qca8k_mii_write32(priv, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
mutex_unlock(&bus->mdio_lock);
return ret;
}
static int
qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
return -EINVAL;
val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
QCA8K_MDIO_MASTER_REG_ADDR(regnum);
qca8k_split_addr(QCA8K_MDIO_MASTER_CTRL, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
ret = qca8k_set_page(priv, page);
if (ret)
goto exit;
qca8k_mii_write32(priv, 0x10 | r2, r1, val);
ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_BUSY);
if (ret)
goto exit;
ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
mutex_unlock(&bus->mdio_lock);
if (ret >= 0)
ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
return ret;
}
static int
qca8k_internal_mdio_write(struct mii_bus *slave_bus, int phy, int regnum, u16 data)
{
struct qca8k_priv *priv = slave_bus->priv;
int ret;
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, false, phy, regnum, data);
if (!ret)
return 0;
return qca8k_mdio_write(priv, phy, regnum, data);
}
static int
qca8k_internal_mdio_read(struct mii_bus *slave_bus, int phy, int regnum)
{
struct qca8k_priv *priv = slave_bus->priv;
int ret;
/* Use mdio Ethernet when available, fallback to legacy one on error */
ret = qca8k_phy_eth_command(priv, true, phy, regnum, 0);
if (ret >= 0)
return ret;
ret = qca8k_mdio_read(priv, phy, regnum);
if (ret < 0)
return 0xffff;
return ret;
}
static int
qca8k_legacy_mdio_write(struct mii_bus *slave_bus, int port, int regnum, u16 data)
{
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
return qca8k_internal_mdio_write(slave_bus, port, regnum, data);
}
static int
qca8k_legacy_mdio_read(struct mii_bus *slave_bus, int port, int regnum)
{
port = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
return qca8k_internal_mdio_read(slave_bus, port, regnum);
}
static int
qca8k_mdio_register(struct qca8k_priv *priv)
{
struct dsa_switch *ds = priv->ds;
struct device_node *mdio;
struct mii_bus *bus;
bus = devm_mdiobus_alloc(ds->dev);
if (!bus)
return -ENOMEM;
bus->priv = (void *)priv;
snprintf(bus->id, MII_BUS_ID_SIZE, "qca8k-%d.%d",
ds->dst->index, ds->index);
bus->parent = ds->dev;
bus->phy_mask = ~ds->phys_mii_mask;
ds->slave_mii_bus = bus;
/* Check if the devicetree declare the port:phy mapping */
mdio = of_get_child_by_name(priv->dev->of_node, "mdio");
if (of_device_is_available(mdio)) {
bus->name = "qca8k slave mii";
bus->read = qca8k_internal_mdio_read;
bus->write = qca8k_internal_mdio_write;
return devm_of_mdiobus_register(priv->dev, bus, mdio);
}
/* If a mapping can't be found the legacy mapping is used,
* using the qca8k_port_to_phy function
*/
bus->name = "qca8k-legacy slave mii";
bus->read = qca8k_legacy_mdio_read;
bus->write = qca8k_legacy_mdio_write;
return devm_mdiobus_register(priv->dev, bus);
}
static int
qca8k_setup_mdio_bus(struct qca8k_priv *priv)
{
u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
phy_interface_t mode;
int err;
ports = of_get_child_by_name(priv->dev->of_node, "ports");
if (!ports)
ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
if (!ports)
return -EINVAL;
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &reg);
if (err) {
of_node_put(port);
of_node_put(ports);
return err;
}
if (!dsa_is_user_port(priv->ds, reg))
continue;
of_get_phy_mode(port, &mode);
if (of_property_read_bool(port, "phy-handle") &&
mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
internal_mdio_mask |= BIT(reg);
}
of_node_put(ports);
if (!external_mdio_mask && !internal_mdio_mask) {
dev_err(priv->dev, "no PHYs are defined.\n");
return -EINVAL;
}
/* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
* the MDIO_MASTER register also _disconnects_ the external MDC
* passthrough to the internal PHYs. It's not possible to use both
* configurations at the same time!
*
* Because this came up during the review process:
* If the external mdio-bus driver is capable magically disabling
* the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
* accessors for the time being, it would be possible to pull this
* off.
*/
if (!!external_mdio_mask && !!internal_mdio_mask) {
dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
return -EINVAL;
}
if (external_mdio_mask) {
/* Make sure to disable the internal mdio bus in cases
* a dt-overlay and driver reload changed the configuration
*/
return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
QCA8K_MDIO_MASTER_EN);
}
return qca8k_mdio_register(priv);
}
static int
qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
{
u32 mask = 0;
int ret = 0;
/* SoC specific settings for ipq8064.
* If more device require this consider adding
* a dedicated binding.
*/
if (of_machine_is_compatible("qcom,ipq8064"))
mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
/* SoC specific settings for ipq8065 */
if (of_machine_is_compatible("qcom,ipq8065"))
mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
if (mask) {
ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
QCA8K_MAC_PWR_RGMII0_1_8V |
QCA8K_MAC_PWR_RGMII1_1_8V,
mask);
}
return ret;
}
static int qca8k_find_cpu_port(struct dsa_switch *ds)
{
struct qca8k_priv *priv = ds->priv;
/* Find the connected cpu port. Valid port are 0 or 6 */
if (dsa_is_cpu_port(ds, 0))
return 0;
dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
if (dsa_is_cpu_port(ds, 6))
return 6;
return -EINVAL;
}
static int
qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
{
struct device_node *node = priv->dev->of_node;
const struct qca8k_match_data *data;
u32 val = 0;
int ret;
/* QCA8327 require to set to the correct mode.
* His bigger brother QCA8328 have the 172 pin layout.
* Should be applied by default but we set this just to make sure.
*/
if (priv->switch_id == QCA8K_ID_QCA8327) {
data = of_device_get_match_data(priv->dev);
/* Set the correct package of 148 pin for QCA8327 */
if (data->reduced_package)
val |= QCA8327_PWS_PACKAGE148_EN;
ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
val);
if (ret)
return ret;
}
if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
val |= QCA8K_PWS_POWER_ON_SEL;
if (of_property_read_bool(node, "qca,led-open-drain")) {
if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
return -EINVAL;
}
val |= QCA8K_PWS_LED_OPEN_EN_CSR;
}
return qca8k_rmw(priv, QCA8K_REG_PWS,
QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
val);
}
static int
qca8k_parse_port_config(struct qca8k_priv *priv)
{
int port, cpu_port_index = -1, ret;
struct device_node *port_dn;
phy_interface_t mode;
struct dsa_port *dp;
u32 delay;
/* We have 2 CPU port. Check them */
for (port = 0; port < QCA8K_NUM_PORTS; port++) {
/* Skip every other port */
if (port != 0 && port != 6)
continue;
dp = dsa_to_port(priv->ds, port);
port_dn = dp->dn;
cpu_port_index++;
if (!of_device_is_available(port_dn))
continue;
ret = of_get_phy_mode(port_dn, &mode);
if (ret)
continue;
switch (mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_SGMII:
delay = 0;
if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
/* Switch regs accept value in ns, convert ps to ns */
delay = delay / 1000;
else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_TXID)
delay = 1;
if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
delay = 0;
if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
/* Switch regs accept value in ns, convert ps to ns */
delay = delay / 1000;
else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_RXID)
delay = 2;
if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
/* Skip sgmii parsing for rgmii* mode */
if (mode == PHY_INTERFACE_MODE_RGMII ||
mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_TXID ||
mode == PHY_INTERFACE_MODE_RGMII_RXID)
break;
if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
priv->ports_config.sgmii_tx_clk_falling_edge = true;
if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
priv->ports_config.sgmii_rx_clk_falling_edge = true;
if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
priv->ports_config.sgmii_enable_pll = true;
if (priv->switch_id == QCA8K_ID_QCA8327) {
dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
priv->ports_config.sgmii_enable_pll = false;
}
if (priv->switch_revision < 2)
dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
}
break;
default:
continue;
}
}
return 0;
}
static void
qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
u32 reg)
{
u32 delay, val = 0;
int ret;
/* Delay can be declared in 3 different way.
* Mode to rgmii and internal-delay standard binding defined
* rgmii-id or rgmii-tx/rx phy mode set.
* The parse logic set a delay different than 0 only when one
* of the 3 different way is used. In all other case delay is
* not enabled. With ID or TX/RXID delay is enabled and set
* to the default and recommended value.
*/
if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
}
if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
}
/* Set RGMII delay based on the selected values */
ret = qca8k_rmw(priv, reg,
QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
val);
if (ret)
dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
}
static struct phylink_pcs *
qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct qca8k_priv *priv = ds->priv;
struct phylink_pcs *pcs = NULL;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
switch (port) {
case 0:
pcs = &priv->pcs_port_0.pcs;
break;
case 6:
pcs = &priv->pcs_port_6.pcs;
break;
}
break;
default:
break;
}
return pcs;
}
static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
int cpu_port_index;
u32 reg;
switch (port) {
case 0: /* 1st CPU port */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII)
return;
reg = QCA8K_REG_PORT0_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT0;
break;
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY, nothing to do */
return;
case 6: /* 2nd CPU port / external PHY */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_1000BASEX)
return;
reg = QCA8K_REG_PORT6_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT6;
break;
default:
dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
return;
}
if (port != 6 && phylink_autoneg_inband(mode)) {
dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
__func__);
return;
}
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
/* Configure rgmii delay */
qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
/* QCA8337 requires to set rgmii rx delay for all ports.
* This is enabled through PORT5_PAD_CTRL for all ports,
* rather than individual port registers.
*/
if (priv->switch_id == QCA8K_ID_QCA8337)
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
break;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
/* Enable SGMII on the port */
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
phy_modes(state->interface), port);
return;
}
}
static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
switch (port) {
case 0: /* 1st CPU port */
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
break;
case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY */
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
case 6: /* 2nd CPU port / external PHY */
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_1000BASEX,
config->supported_interfaces);
break;
}
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
config->legacy_pre_march2020 = false;
}
static void
qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{ {
struct qca8k_priv *priv = ds->priv; u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
struct device_node *ports, *port;
phy_interface_t mode;
int err;
qca8k_port_set_status(priv, port, 0); ports = of_get_child_by_name(priv->dev->of_node, "ports");
} if (!ports)
ports = of_get_child_by_name(priv->dev->of_node, "ethernet-ports");
static void if (!ports)
qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode, return -EINVAL;
phy_interface_t interface, struct phy_device *phydev,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct qca8k_priv *priv = ds->priv;
u32 reg;
if (phylink_autoneg_inband(mode)) { for_each_available_child_of_node(ports, port) {
reg = QCA8K_PORT_STATUS_LINK_AUTO; err = of_property_read_u32(port, "reg", &reg);
} else { if (err) {
switch (speed) { of_node_put(port);
case SPEED_10: of_node_put(ports);
reg = QCA8K_PORT_STATUS_SPEED_10; return err;
break;
case SPEED_100:
reg = QCA8K_PORT_STATUS_SPEED_100;
break;
case SPEED_1000:
reg = QCA8K_PORT_STATUS_SPEED_1000;
break;
default:
reg = QCA8K_PORT_STATUS_LINK_AUTO;
break;
} }
if (duplex == DUPLEX_FULL) if (!dsa_is_user_port(priv->ds, reg))
reg |= QCA8K_PORT_STATUS_DUPLEX; continue;
if (rx_pause || dsa_is_cpu_port(ds, port)) of_get_phy_mode(port, &mode);
reg |= QCA8K_PORT_STATUS_RXFLOW;
if (tx_pause || dsa_is_cpu_port(ds, port)) if (of_property_read_bool(port, "phy-handle") &&
reg |= QCA8K_PORT_STATUS_TXFLOW; mode != PHY_INTERFACE_MODE_INTERNAL)
external_mdio_mask |= BIT(reg);
else
internal_mdio_mask |= BIT(reg);
} }
reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; of_node_put(ports);
if (!external_mdio_mask && !internal_mdio_mask) {
qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); dev_err(priv->dev, "no PHYs are defined.\n");
} return -EINVAL;
}
static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
{
return container_of(pcs, struct qca8k_pcs, pcs);
}
static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
int port = pcs_to_qca8k_pcs(pcs)->port;
u32 reg;
int ret;
ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg); /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
if (ret < 0) { * the MDIO_MASTER register also _disconnects_ the external MDC
state->link = false; * passthrough to the internal PHYs. It's not possible to use both
return; * configurations at the same time!
*
* Because this came up during the review process:
* If the external mdio-bus driver is capable magically disabling
* the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
* accessors for the time being, it would be possible to pull this
* off.
*/
if (!!external_mdio_mask && !!internal_mdio_mask) {
dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
return -EINVAL;
} }
state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP); if (external_mdio_mask) {
state->an_complete = state->link; /* Make sure to disable the internal mdio bus in cases
state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO); * a dt-overlay and driver reload changed the configuration
state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL : */
DUPLEX_HALF;
switch (reg & QCA8K_PORT_STATUS_SPEED) { return regmap_clear_bits(priv->regmap, QCA8K_MDIO_MASTER_CTRL,
case QCA8K_PORT_STATUS_SPEED_10: QCA8K_MDIO_MASTER_EN);
state->speed = SPEED_10;
break;
case QCA8K_PORT_STATUS_SPEED_100:
state->speed = SPEED_100;
break;
case QCA8K_PORT_STATUS_SPEED_1000:
state->speed = SPEED_1000;
break;
default:
state->speed = SPEED_UNKNOWN;
break;
} }
if (reg & QCA8K_PORT_STATUS_RXFLOW) return qca8k_mdio_register(priv);
state->pause |= MLO_PAUSE_RX;
if (reg & QCA8K_PORT_STATUS_TXFLOW)
state->pause |= MLO_PAUSE_TX;
} }
static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode, static int
phy_interface_t interface, qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
const unsigned long *advertising,
bool permit_pause_to_mac)
{ {
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv; u32 mask = 0;
int cpu_port_index, ret, port; int ret = 0;
u32 reg, val;
port = pcs_to_qca8k_pcs(pcs)->port; /* SoC specific settings for ipq8064.
switch (port) { * If more device require this consider adding
case 0: * a dedicated binding.
reg = QCA8K_REG_PORT0_PAD_CTRL; */
cpu_port_index = QCA8K_CPU_PORT0; if (of_machine_is_compatible("qcom,ipq8064"))
break; mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
case 6: /* SoC specific settings for ipq8065 */
reg = QCA8K_REG_PORT6_PAD_CTRL; if (of_machine_is_compatible("qcom,ipq8065"))
cpu_port_index = QCA8K_CPU_PORT6; mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
break;
default: if (mask) {
WARN_ON(1); ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
return -EINVAL; QCA8K_MAC_PWR_RGMII0_1_8V |
QCA8K_MAC_PWR_RGMII1_1_8V,
mask);
} }
/* Enable/disable SerDes auto-negotiation as necessary */
ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
if (ret)
return ret;
if (phylink_autoneg_inband(mode))
val &= ~QCA8K_PWS_SERDES_AEN_DIS;
else
val |= QCA8K_PWS_SERDES_AEN_DIS;
qca8k_write(priv, QCA8K_REG_PWS, val);
/* Configure the SGMII parameters */
ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
if (ret)
return ret; return ret;
val |= QCA8K_SGMII_EN_SD;
if (priv->ports_config.sgmii_enable_pll)
val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
QCA8K_SGMII_EN_TX;
if (dsa_is_cpu_port(priv->ds, port)) {
/* CPU port, we're talking to the CPU MAC, be a PHY */
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_PHY;
} else if (interface == PHY_INTERFACE_MODE_SGMII) {
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_MAC;
} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_BASEX;
}
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
/* From original code is reported port instability as SGMII also
* require delay set. Apply advised values here or take them from DT.
*/
if (interface == PHY_INTERFACE_MODE_SGMII)
qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
* falling edge is set writing in the PORT0 PAD reg
*/
if (priv->switch_id == QCA8K_ID_QCA8327 ||
priv->switch_id == QCA8K_ID_QCA8337)
reg = QCA8K_REG_PORT0_PAD_CTRL;
val = 0;
/* SGMII Clock phase configuration */
if (priv->ports_config.sgmii_rx_clk_falling_edge)
val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
if (priv->ports_config.sgmii_tx_clk_falling_edge)
val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
if (val)
ret = qca8k_rmw(priv, reg,
QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
val);
return 0;
}
static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
{
}
static const struct phylink_pcs_ops qca8k_pcs_ops = {
.pcs_get_state = qca8k_pcs_get_state,
.pcs_config = qca8k_pcs_config,
.pcs_an_restart = qca8k_pcs_an_restart,
};
static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
int port)
{
qpcs->pcs.ops = &qca8k_pcs_ops;
/* We don't have interrupts for link changes, so we need to poll */
qpcs->pcs.poll = true;
qpcs->priv = priv;
qpcs->port = port;
} }
static void static int qca8k_find_cpu_port(struct dsa_switch *ds)
qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
{ {
const struct qca8k_match_data *match_data;
struct qca8k_priv *priv = ds->priv; struct qca8k_priv *priv = ds->priv;
int i;
if (stringset != ETH_SS_STATS) /* Find the connected cpu port. Valid port are 0 or 6 */
return; if (dsa_is_cpu_port(ds, 0))
return 0;
dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
match_data = of_device_get_match_data(priv->dev); if (dsa_is_cpu_port(ds, 6))
return 6;
for (i = 0; i < match_data->mib_count; i++) return -EINVAL;
strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
ETH_GSTRING_LEN);
} }
static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb) static int
qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
{ {
const struct qca8k_match_data *match_data; const struct qca8k_match_data *data = priv->info;
struct qca8k_mib_eth_data *mib_eth_data; struct device_node *node = priv->dev->of_node;
struct qca8k_priv *priv = ds->priv; u32 val = 0;
const struct qca8k_mib_desc *mib; int ret;
struct mib_ethhdr *mib_ethhdr;
int i, mib_len, offset = 0;
u64 *data;
u8 port;
mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
mib_eth_data = &priv->mib_eth_data;
/* The switch autocast every port. Ignore other packet and /* QCA8327 require to set to the correct mode.
* parse only the requested one. * His bigger brother QCA8328 have the 172 pin layout.
* Should be applied by default but we set this just to make sure.
*/ */
port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr)); if (priv->switch_id == QCA8K_ID_QCA8327) {
if (port != mib_eth_data->req_port) /* Set the correct package of 148 pin for QCA8327 */
goto exit; if (data->reduced_package)
val |= QCA8327_PWS_PACKAGE148_EN;
match_data = device_get_match_data(priv->dev);
data = mib_eth_data->data;
for (i = 0; i < match_data->mib_count; i++) {
mib = &ar8327_mib[i];
/* First 3 mib are present in the skb head */ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
if (i < 3) { val);
data[i] = mib_ethhdr->data[i]; if (ret)
continue; return ret;
} }
mib_len = sizeof(uint32_t); if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
val |= QCA8K_PWS_POWER_ON_SEL;
/* Some mib are 64 bit wide */
if (mib->size == 2)
mib_len = sizeof(uint64_t);
/* Copy the mib value from packet to the */ if (of_property_read_bool(node, "qca,led-open-drain")) {
memcpy(data + i, skb->data + offset, mib_len); if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
return -EINVAL;
}
/* Set the offset for the next mib */ val |= QCA8K_PWS_LED_OPEN_EN_CSR;
offset += mib_len;
} }
exit: return qca8k_rmw(priv, QCA8K_REG_PWS,
/* Complete on receiving all the mib packet */ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
if (refcount_dec_and_test(&mib_eth_data->port_parsed)) val);
complete(&mib_eth_data->rw_done);
} }
static int static int
qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data) qca8k_parse_port_config(struct qca8k_priv *priv)
{ {
struct dsa_port *dp = dsa_to_port(ds, port); int port, cpu_port_index = -1, ret;
struct qca8k_mib_eth_data *mib_eth_data; struct device_node *port_dn;
struct qca8k_priv *priv = ds->priv; phy_interface_t mode;
int ret; struct dsa_port *dp;
u32 delay;
mib_eth_data = &priv->mib_eth_data; /* We have 2 CPU port. Check them */
for (port = 0; port < QCA8K_NUM_PORTS; port++) {
/* Skip every other port */
if (port != 0 && port != 6)
continue;
mutex_lock(&mib_eth_data->mutex); dp = dsa_to_port(priv->ds, port);
port_dn = dp->dn;
cpu_port_index++;
reinit_completion(&mib_eth_data->rw_done); if (!of_device_is_available(port_dn))
continue;
mib_eth_data->req_port = dp->index; ret = of_get_phy_mode(port_dn, &mode);
mib_eth_data->data = data; if (ret)
refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS); continue;
mutex_lock(&priv->reg_mutex); switch (mode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_SGMII:
delay = 0;
/* Send mib autocast request */ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB, /* Switch regs accept value in ns, convert ps to ns */
QCA8K_MIB_FUNC | QCA8K_MIB_BUSY, delay = delay / 1000;
FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) | else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
QCA8K_MIB_BUSY); mode == PHY_INTERFACE_MODE_RGMII_TXID)
delay = 1;
mutex_unlock(&priv->reg_mutex); if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK, delay)) {
dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
if (ret) priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
goto exit;
ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT); delay = 0;
exit: if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
mutex_unlock(&mib_eth_data->mutex); /* Switch regs accept value in ns, convert ps to ns */
delay = delay / 1000;
else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_RXID)
delay = 2;
return ret; if (!FIELD_FIT(QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK, delay)) {
} dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
delay = 3;
}
static void priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
const struct qca8k_match_data *match_data;
const struct qca8k_mib_desc *mib;
u32 reg, i, val;
u32 hi = 0;
int ret;
if (priv->mgmt_master && /* Skip sgmii parsing for rgmii* mode */
qca8k_get_ethtool_stats_eth(ds, port, data) > 0) if (mode == PHY_INTERFACE_MODE_RGMII ||
return; mode == PHY_INTERFACE_MODE_RGMII_ID ||
mode == PHY_INTERFACE_MODE_RGMII_TXID ||
mode == PHY_INTERFACE_MODE_RGMII_RXID)
break;
match_data = of_device_get_match_data(priv->dev); if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
priv->ports_config.sgmii_tx_clk_falling_edge = true;
for (i = 0; i < match_data->mib_count; i++) { if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
mib = &ar8327_mib[i]; priv->ports_config.sgmii_rx_clk_falling_edge = true;
reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
ret = qca8k_read(priv, reg, &val); if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
if (ret < 0) priv->ports_config.sgmii_enable_pll = true;
continue;
if (mib->size == 2) { if (priv->switch_id == QCA8K_ID_QCA8327) {
ret = qca8k_read(priv, reg + 4, &hi); dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
if (ret < 0) priv->ports_config.sgmii_enable_pll = false;
continue;
} }
data[i] = val; if (priv->switch_revision < 2)
if (mib->size == 2) dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
data[i] |= (u64)hi << 32;
} }
}
static int break;
qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset) default:
{ continue;
const struct qca8k_match_data *match_data; }
struct qca8k_priv *priv = ds->priv; }
if (sset != ETH_SS_STATS)
return 0; return 0;
match_data = of_device_get_match_data(priv->dev);
return match_data->mib_count;
} }
static int static void
qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
u32 reg)
{ {
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u32 delay, val = 0;
u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
u32 reg;
int ret; int ret;
mutex_lock(&priv->reg_mutex); /* Delay can be declared in 3 different way.
ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg); * Mode to rgmii and internal-delay standard binding defined
if (ret < 0) * rgmii-id or rgmii-tx/rx phy mode set.
goto exit; * The parse logic set a delay different than 0 only when one
* of the 3 different way is used. In all other case delay is
* not enabled. With ID or TX/RXID delay is enabled and set
* to the default and recommended value.
*/
if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
if (eee->eee_enabled) val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
reg |= lpi_en; QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
else }
reg &= ~lpi_en;
ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
exit: if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
mutex_unlock(&priv->reg_mutex); delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
return ret;
val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
}
/* Set RGMII delay based on the selected values */
ret = qca8k_rmw(priv, reg,
QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
val);
if (ret)
dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
} }
static int static struct phylink_pcs *
qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
phy_interface_t interface)
{ {
/* Nothing to do on the port's MAC */ struct qca8k_priv *priv = ds->priv;
return 0; struct phylink_pcs *pcs = NULL;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
switch (port) {
case 0:
pcs = &priv->pcs_port_0.pcs;
break;
case 6:
pcs = &priv->pcs_port_6.pcs;
break;
}
break;
default:
break;
}
return pcs;
} }
static void static void
qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{ {
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; struct qca8k_priv *priv = ds->priv;
u32 stp_state; int cpu_port_index;
u32 reg;
switch (state) { switch (port) {
case BR_STATE_DISABLED: case 0: /* 1st CPU port */
stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED; if (state->interface != PHY_INTERFACE_MODE_RGMII &&
break; state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
case BR_STATE_BLOCKING: state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING; state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
break; state->interface != PHY_INTERFACE_MODE_SGMII)
case BR_STATE_LISTENING: return;
stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
reg = QCA8K_REG_PORT0_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT0;
break; break;
case BR_STATE_LEARNING: case 1:
stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING; case 2:
case 3:
case 4:
case 5:
/* Internal PHY, nothing to do */
return;
case 6: /* 2nd CPU port / external PHY */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
state->interface != PHY_INTERFACE_MODE_RGMII_TXID &&
state->interface != PHY_INTERFACE_MODE_RGMII_RXID &&
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_1000BASEX)
return;
reg = QCA8K_REG_PORT6_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT6;
break; break;
case BR_STATE_FORWARDING:
default: default:
stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD; dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
break; return;
} }
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), if (port != 6 && phylink_autoneg_inband(mode)) {
QCA8K_PORT_LOOKUP_STATE_MASK, stp_state); dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
} __func__);
return;
}
static int qca8k_port_bridge_join(struct dsa_switch *ds, int port, switch (state->interface) {
struct dsa_bridge bridge, case PHY_INTERFACE_MODE_RGMII:
bool *tx_fwd_offload, case PHY_INTERFACE_MODE_RGMII_ID:
struct netlink_ext_ack *extack) case PHY_INTERFACE_MODE_RGMII_TXID:
{ case PHY_INTERFACE_MODE_RGMII_RXID:
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
int port_mask, cpu_port;
int i, ret;
cpu_port = dsa_to_port(ds, port)->cpu_dp->index; /* Configure rgmii delay */
port_mask = BIT(cpu_port); qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
for (i = 0; i < QCA8K_NUM_PORTS; i++) { /* QCA8337 requires to set rgmii rx delay for all ports.
if (dsa_is_cpu_port(ds, i)) * This is enabled through PORT5_PAD_CTRL for all ports,
continue; * rather than individual port registers.
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Add this port to the portvlan mask of the other ports
* in the bridge
*/ */
ret = regmap_set_bits(priv->regmap, if (priv->switch_id == QCA8K_ID_QCA8337)
QCA8K_PORT_LOOKUP_CTRL(i), qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
BIT(port)); QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
if (ret) break;
return ret; case PHY_INTERFACE_MODE_SGMII:
if (i != port) case PHY_INTERFACE_MODE_1000BASEX:
port_mask |= BIT(i); /* Enable SGMII on the port */
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
phy_modes(state->interface), port);
return;
} }
/* Add all other ports to this ports portvlan mask */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_MEMBER, port_mask);
return ret;
} }
static void qca8k_port_bridge_leave(struct dsa_switch *ds, int port, static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
struct dsa_bridge bridge) struct phylink_config *config)
{ {
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; switch (port) {
int cpu_port, i; case 0: /* 1st CPU port */
phy_interface_set_rgmii(config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_SGMII,
config->supported_interfaces);
break;
cpu_port = dsa_to_port(ds, port)->cpu_dp->index; case 1:
case 2:
case 3:
case 4:
case 5:
/* Internal PHY */
__set_bit(PHY_INTERFACE_MODE_GMII,
config->supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
for (i = 0; i < QCA8K_NUM_PORTS; i++) { case 6: /* 2nd CPU port / external PHY */
if (dsa_is_cpu_port(ds, i)) phy_interface_set_rgmii(config->supported_interfaces);
continue; __set_bit(PHY_INTERFACE_MODE_SGMII,
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) config->supported_interfaces);
continue; __set_bit(PHY_INTERFACE_MODE_1000BASEX,
/* Remove this port to the portvlan mask of the other ports config->supported_interfaces);
* in the bridge break;
*/
regmap_clear_bits(priv->regmap,
QCA8K_PORT_LOOKUP_CTRL(i),
BIT(port));
} }
/* Set the cpu port to be the only one in the portvlan mask of config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
* this port MAC_10 | MAC_100 | MAC_1000FD;
*/
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), config->legacy_pre_march2020 = false;
QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
} }
static void static void
qca8k_port_fast_age(struct dsa_switch *ds, int port) qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface)
{ {
struct qca8k_priv *priv = ds->priv; struct qca8k_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex); qca8k_port_set_status(priv, port, 0);
qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
mutex_unlock(&priv->reg_mutex);
} }
static int static void
qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs) qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
phy_interface_t interface, struct phy_device *phydev,
int speed, int duplex, bool tx_pause, bool rx_pause)
{ {
struct qca8k_priv *priv = ds->priv; struct qca8k_priv *priv = ds->priv;
unsigned int secs = msecs / 1000; u32 reg;
u32 val;
/* AGE_TIME reg is set in 7s step */
val = secs / 7;
/* Handle case with 0 as val to NOT disable if (phylink_autoneg_inband(mode)) {
* learning reg = QCA8K_PORT_STATUS_LINK_AUTO;
*/ } else {
if (!val) switch (speed) {
val = 1; case SPEED_10:
reg = QCA8K_PORT_STATUS_SPEED_10;
break;
case SPEED_100:
reg = QCA8K_PORT_STATUS_SPEED_100;
break;
case SPEED_1000:
reg = QCA8K_PORT_STATUS_SPEED_1000;
break;
default:
reg = QCA8K_PORT_STATUS_LINK_AUTO;
break;
}
return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL, QCA8K_ATU_AGE_TIME_MASK, if (duplex == DUPLEX_FULL)
QCA8K_ATU_AGE_TIME(val)); reg |= QCA8K_PORT_STATUS_DUPLEX;
}
static int if (rx_pause || dsa_is_cpu_port(ds, port))
qca8k_port_enable(struct dsa_switch *ds, int port, reg |= QCA8K_PORT_STATUS_RXFLOW;
struct phy_device *phy)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
qca8k_port_set_status(priv, port, 1); if (tx_pause || dsa_is_cpu_port(ds, port))
priv->port_enabled_map |= BIT(port); reg |= QCA8K_PORT_STATUS_TXFLOW;
}
if (dsa_is_user_port(ds, port)) reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
phy_support_asym_pause(phy);
return 0; qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
} }
static void static struct qca8k_pcs *pcs_to_qca8k_pcs(struct phylink_pcs *pcs)
qca8k_port_disable(struct dsa_switch *ds, int port)
{ {
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; return container_of(pcs, struct qca8k_pcs, pcs);
qca8k_port_set_status(priv, port, 0);
priv->port_enabled_map &= ~BIT(port);
} }
static int static void qca8k_pcs_get_state(struct phylink_pcs *pcs,
qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) struct phylink_link_state *state)
{ {
struct qca8k_priv *priv = ds->priv; struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
int port = pcs_to_qca8k_pcs(pcs)->port;
u32 reg;
int ret; int ret;
/* We have only have a general MTU setting. ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
* DSA always set the CPU port's MTU to the largest MTU of the slave if (ret < 0) {
* ports. state->link = false;
* Setting MTU just for the CPU port is sufficient to correctly set a return;
* value for every port. }
*/
if (!dsa_is_cpu_port(ds, port))
return 0;
/* To change the MAX_FRAME_SIZE the cpu ports must be off or state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
* the switch panics. state->an_complete = state->link;
* Turn off both cpu ports before applying the new value to prevent state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
* this. state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
*/ DUPLEX_HALF;
if (priv->port_enabled_map & BIT(0))
qca8k_port_set_status(priv, 0, 0); switch (reg & QCA8K_PORT_STATUS_SPEED) {
case QCA8K_PORT_STATUS_SPEED_10:
state->speed = SPEED_10;
break;
case QCA8K_PORT_STATUS_SPEED_100:
state->speed = SPEED_100;
break;
case QCA8K_PORT_STATUS_SPEED_1000:
state->speed = SPEED_1000;
break;
default:
state->speed = SPEED_UNKNOWN;
break;
}
if (reg & QCA8K_PORT_STATUS_RXFLOW)
state->pause |= MLO_PAUSE_RX;
if (reg & QCA8K_PORT_STATUS_TXFLOW)
state->pause |= MLO_PAUSE_TX;
}
if (priv->port_enabled_map & BIT(6)) static int qca8k_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
qca8k_port_set_status(priv, 6, 0); phy_interface_t interface,
const unsigned long *advertising,
bool permit_pause_to_mac)
{
struct qca8k_priv *priv = pcs_to_qca8k_pcs(pcs)->priv;
int cpu_port_index, ret, port;
u32 reg, val;
/* Include L2 header / FCS length */ port = pcs_to_qca8k_pcs(pcs)->port;
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); switch (port) {
case 0:
reg = QCA8K_REG_PORT0_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT0;
break;
if (priv->port_enabled_map & BIT(0)) case 6:
qca8k_port_set_status(priv, 0, 1); reg = QCA8K_REG_PORT6_PAD_CTRL;
cpu_port_index = QCA8K_CPU_PORT6;
break;
if (priv->port_enabled_map & BIT(6)) default:
qca8k_port_set_status(priv, 6, 1); WARN_ON(1);
return -EINVAL;
}
/* Enable/disable SerDes auto-negotiation as necessary */
ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
if (ret)
return ret; return ret;
} if (phylink_autoneg_inband(mode))
val &= ~QCA8K_PWS_SERDES_AEN_DIS;
else
val |= QCA8K_PWS_SERDES_AEN_DIS;
qca8k_write(priv, QCA8K_REG_PWS, val);
static int /* Configure the SGMII parameters */
qca8k_port_max_mtu(struct dsa_switch *ds, int port) ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
{ if (ret)
return QCA8K_MAX_MTU; return ret;
}
static int val |= QCA8K_SGMII_EN_SD;
qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
u16 port_mask, u16 vid)
{
/* Set the vid to the port vlan id if no vid is set */
if (!vid)
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_add(priv, addr, port_mask, vid, if (priv->ports_config.sgmii_enable_pll)
QCA8K_ATU_STATUS_STATIC); val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
} QCA8K_SGMII_EN_TX;
static int if (dsa_is_cpu_port(priv->ds, port)) {
qca8k_port_fdb_add(struct dsa_switch *ds, int port, /* CPU port, we're talking to the CPU MAC, be a PHY */
const unsigned char *addr, u16 vid, val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
struct dsa_db db) val |= QCA8K_SGMII_MODE_CTRL_PHY;
{ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
u16 port_mask = BIT(port); val |= QCA8K_SGMII_MODE_CTRL_MAC;
} else if (interface == PHY_INTERFACE_MODE_1000BASEX) {
val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
val |= QCA8K_SGMII_MODE_CTRL_BASEX;
}
return qca8k_port_fdb_insert(priv, addr, port_mask, vid); qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
}
static int /* From original code is reported port instability as SGMII also
qca8k_port_fdb_del(struct dsa_switch *ds, int port, * require delay set. Apply advised values here or take them from DT.
const unsigned char *addr, u16 vid, */
struct dsa_db db) if (interface == PHY_INTERFACE_MODE_SGMII)
{ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
u16 port_mask = BIT(port); * falling edge is set writing in the PORT0 PAD reg
*/
if (priv->switch_id == QCA8K_ID_QCA8327 ||
priv->switch_id == QCA8K_ID_QCA8337)
reg = QCA8K_REG_PORT0_PAD_CTRL;
if (!vid) val = 0;
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_del(priv, addr, port_mask, vid); /* SGMII Clock phase configuration */
} if (priv->ports_config.sgmii_rx_clk_falling_edge)
val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
static int if (priv->ports_config.sgmii_tx_clk_falling_edge)
qca8k_port_fdb_dump(struct dsa_switch *ds, int port, val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
dsa_fdb_dump_cb_t *cb, void *data)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
struct qca8k_fdb _fdb = { 0 };
int cnt = QCA8K_NUM_FDB_RECORDS;
bool is_static;
int ret = 0;
mutex_lock(&priv->reg_mutex); if (val)
while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { ret = qca8k_rmw(priv, reg,
if (!_fdb.aging) QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
break; QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); val);
ret = cb(_fdb.mac, _fdb.vid, is_static, data);
if (ret)
break;
}
mutex_unlock(&priv->reg_mutex);
return 0; return 0;
} }
static int static void qca8k_pcs_an_restart(struct phylink_pcs *pcs)
qca8k_port_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{ {
struct qca8k_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
} }
static int static const struct phylink_pcs_ops qca8k_pcs_ops = {
qca8k_port_mdb_del(struct dsa_switch *ds, int port, .pcs_get_state = qca8k_pcs_get_state,
const struct switchdev_obj_port_mdb *mdb, .pcs_config = qca8k_pcs_config,
struct dsa_db db) .pcs_an_restart = qca8k_pcs_an_restart,
};
static void qca8k_setup_pcs(struct qca8k_priv *priv, struct qca8k_pcs *qpcs,
int port)
{ {
struct qca8k_priv *priv = ds->priv; qpcs->pcs.ops = &qca8k_pcs_ops;
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid); /* We don't have interrupts for link changes, so we need to poll */
qpcs->pcs.poll = true;
qpcs->priv = priv;
qpcs->port = port;
} }
static int static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *skb)
qca8k_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{ {
struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv; struct qca8k_priv *priv = ds->priv;
int monitor_port, ret; const struct qca8k_mib_desc *mib;
u32 reg, val; struct mib_ethhdr *mib_ethhdr;
int i, mib_len, offset = 0;
/* Check for existent entry */ u64 *data;
if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port)) u8 port;
return -EEXIST;
ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
if (ret)
return ret;
/* QCA83xx can have only one port set to mirror mode.
* Check that the correct port is requested and return error otherwise.
* When no mirror port is set, the values is set to 0xF
*/
monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
return -EEXIST;
/* Set the monitor port */
val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
mirror->to_local_port);
ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
if (ret)
return ret;
if (ingress) {
reg = QCA8K_PORT_LOOKUP_CTRL(port);
val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
} else {
reg = QCA8K_REG_PORT_HOL_CTRL1(port);
val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
}
ret = regmap_update_bits(priv->regmap, reg, val, val); mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb);
if (ret) mib_eth_data = &priv->mib_eth_data;
return ret;
/* Track mirror port for tx and rx to decide when the /* The switch autocast every port. Ignore other packet and
* mirror port has to be disabled. * parse only the requested one.
*/ */
if (ingress) port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, ntohs(mib_ethhdr->hdr));
priv->mirror_rx |= BIT(port); if (port != mib_eth_data->req_port)
else goto exit;
priv->mirror_tx |= BIT(port);
return 0; data = mib_eth_data->data;
}
static void for (i = 0; i < priv->info->mib_count; i++) {
qca8k_port_mirror_del(struct dsa_switch *ds, int port, mib = &ar8327_mib[i];
struct dsa_mall_mirror_tc_entry *mirror)
{
struct qca8k_priv *priv = ds->priv;
u32 reg, val;
int ret;
if (mirror->ingress) { /* First 3 mib are present in the skb head */
reg = QCA8K_PORT_LOOKUP_CTRL(port); if (i < 3) {
val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN; data[i] = mib_ethhdr->data[i];
} else { continue;
reg = QCA8K_REG_PORT_HOL_CTRL1(port);
val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
} }
ret = regmap_clear_bits(priv->regmap, reg, val); mib_len = sizeof(uint32_t);
if (ret)
goto err;
if (mirror->ingress)
priv->mirror_rx &= ~BIT(port);
else
priv->mirror_tx &= ~BIT(port);
/* No port set to send packet to mirror port. Disable mirror port */ /* Some mib are 64 bit wide */
if (!priv->mirror_rx && !priv->mirror_tx) { if (mib->size == 2)
val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF); mib_len = sizeof(uint64_t);
ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
if (ret)
goto err;
}
err:
dev_err(priv->dev, "Failed to del mirror port from %d", port);
}
static int /* Copy the mib value from packet to the */
qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering, memcpy(data + i, skb->data + offset, mib_len);
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int ret;
if (vlan_filtering) { /* Set the offset for the next mib */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), offset += mib_len;
QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
} else {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
} }
return ret; exit:
/* Complete on receiving all the mib packet */
if (refcount_dec_and_test(&mib_eth_data->port_parsed))
complete(&mib_eth_data->rw_done);
} }
static int static int
qca8k_port_vlan_add(struct dsa_switch *ds, int port, qca8k_get_ethtool_stats_eth(struct dsa_switch *ds, int port, u64 *data)
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{ {
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; struct dsa_port *dp = dsa_to_port(ds, port);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; struct qca8k_mib_eth_data *mib_eth_data;
struct qca8k_priv *priv = ds->priv; struct qca8k_priv *priv = ds->priv;
int ret; int ret;
ret = qca8k_vlan_add(priv, port, vlan->vid, untagged); mib_eth_data = &priv->mib_eth_data;
if (ret) {
dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
return ret;
}
if (pvid) { mutex_lock(&mib_eth_data->mutex);
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
QCA8K_EGREES_VLAN_PORT_MASK(port),
QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
if (ret)
return ret;
ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port), reinit_completion(&mib_eth_data->rw_done);
QCA8K_PORT_VLAN_CVID(vlan->vid) |
QCA8K_PORT_VLAN_SVID(vlan->vid));
}
return ret; mib_eth_data->req_port = dp->index;
} mib_eth_data->data = data;
refcount_set(&mib_eth_data->port_parsed, QCA8K_NUM_PORTS);
static int mutex_lock(&priv->reg_mutex);
qca8k_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan) /* Send mib autocast request */
{ ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
struct qca8k_priv *priv = ds->priv; QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
int ret; FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_CAST) |
QCA8K_MIB_BUSY);
mutex_unlock(&priv->reg_mutex);
ret = qca8k_vlan_del(priv, port, vlan->vid);
if (ret) if (ret)
dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret); goto exit;
ret = wait_for_completion_timeout(&mib_eth_data->rw_done, QCA8K_ETHERNET_TIMEOUT);
exit:
mutex_unlock(&mib_eth_data->mutex);
return ret; return ret;
} }
...@@ -2640,174 +1593,6 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port, ...@@ -2640,174 +1593,6 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
return DSA_TAG_PROTO_QCA; return DSA_TAG_PROTO_QCA;
} }
static bool
qca8k_lag_can_offload(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct dsa_port *dp;
int members = 0;
if (!lag.id)
return false;
dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
if (members > QCA8K_NUM_PORTS_FOR_LAG)
return false;
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
return false;
if (info->hash_type != NETDEV_LAG_HASH_L2 &&
info->hash_type != NETDEV_LAG_HASH_L23)
return false;
return true;
}
static int
qca8k_lag_setup_hash(struct dsa_switch *ds, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct net_device *lag_dev = lag.dev;
struct qca8k_priv *priv = ds->priv;
bool unique_lag = true;
unsigned int i;
u32 hash = 0;
switch (info->hash_type) {
case NETDEV_LAG_HASH_L23:
hash |= QCA8K_TRUNK_HASH_SIP_EN;
hash |= QCA8K_TRUNK_HASH_DIP_EN;
fallthrough;
case NETDEV_LAG_HASH_L2:
hash |= QCA8K_TRUNK_HASH_SA_EN;
hash |= QCA8K_TRUNK_HASH_DA_EN;
break;
default: /* We should NEVER reach this */
return -EOPNOTSUPP;
}
/* Check if we are the unique configured LAG */
dsa_lags_foreach_id(i, ds->dst)
if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
unique_lag = false;
break;
}
/* Hash Mode is global. Make sure the same Hash Mode
* is set to all the 4 possible lag.
* If we are the unique LAG we can set whatever hash
* mode we want.
* To change hash mode it's needed to remove all LAG
* and change the mode with the latest.
*/
if (unique_lag) {
priv->lag_hash_mode = hash;
} else if (priv->lag_hash_mode != hash) {
netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
return -EOPNOTSUPP;
}
return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
QCA8K_TRUNK_HASH_MASK, hash);
}
static int
qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
struct dsa_lag lag, bool delete)
{
struct qca8k_priv *priv = ds->priv;
int ret, id, i;
u32 val;
/* DSA LAG IDs are one-based, hardware is zero-based */
id = lag.id - 1;
/* Read current port member */
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
if (ret)
return ret;
/* Shift val to the correct trunk */
val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
if (delete)
val &= ~BIT(port);
else
val |= BIT(port);
/* Update port member. With empty portmap disable trunk */
ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
QCA8K_REG_GOL_TRUNK_MEMBER(id) |
QCA8K_REG_GOL_TRUNK_EN(id),
!val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
/* Search empty member if adding or port on deleting */
for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
if (ret)
return ret;
val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
if (delete) {
/* If port flagged to be disabled assume this member is
* empty
*/
if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
continue;
val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
if (val != port)
continue;
} else {
/* If port flagged to be enabled assume this member is
* already set
*/
if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
continue;
}
/* We have found the member to add/remove */
break;
}
/* Set port in the correct port mask or disable port if in delete mode */
return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
!delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
}
static int
qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
int ret;
if (!qca8k_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
ret = qca8k_lag_setup_hash(ds, lag, info);
if (ret)
return ret;
return qca8k_lag_refresh_portmap(ds, port, lag, false);
}
static int
qca8k_port_lag_leave(struct dsa_switch *ds, int port,
struct dsa_lag lag)
{
return qca8k_lag_refresh_portmap(ds, port, lag, true);
}
static void static void
qca8k_master_change(struct dsa_switch *ds, const struct net_device *master, qca8k_master_change(struct dsa_switch *ds, const struct net_device *master,
bool operational) bool operational)
...@@ -3091,36 +1876,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = { ...@@ -3091,36 +1876,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.connect_tag_protocol = qca8k_connect_tag_protocol, .connect_tag_protocol = qca8k_connect_tag_protocol,
}; };
static int qca8k_read_switch_id(struct qca8k_priv *priv)
{
const struct qca8k_match_data *data;
u32 val;
u8 id;
int ret;
/* get the switches ID from the compatible */
data = of_device_get_match_data(priv->dev);
if (!data)
return -ENODEV;
ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
if (ret < 0)
return -ENODEV;
id = QCA8K_MASK_CTRL_DEVICE_ID(val);
if (id != data->id) {
dev_err(priv->dev, "Switch id detected %x but expected %x", id, data->id);
return -ENODEV;
}
priv->switch_id = id;
/* Save revision to communicate to the internal PHY driver */
priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
return 0;
}
static int static int
qca8k_sw_probe(struct mdio_device *mdiodev) qca8k_sw_probe(struct mdio_device *mdiodev)
{ {
...@@ -3134,6 +1889,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) ...@@ -3134,6 +1889,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
priv->info = of_device_get_match_data(priv->dev);
priv->bus = mdiodev->bus; priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev; priv->dev = &mdiodev->dev;
...@@ -3256,20 +2012,29 @@ static int qca8k_resume(struct device *dev) ...@@ -3256,20 +2012,29 @@ static int qca8k_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume); qca8k_suspend, qca8k_resume);
static const struct qca8k_info_ops qca8xxx_ops = {
.autocast_mib = qca8k_get_ethtool_stats_eth,
.read_eth = qca8k_read_eth,
.write_eth = qca8k_write_eth,
};
static const struct qca8k_match_data qca8327 = { static const struct qca8k_match_data qca8327 = {
.id = QCA8K_ID_QCA8327, .id = QCA8K_ID_QCA8327,
.reduced_package = true, .reduced_package = true,
.mib_count = QCA8K_QCA832X_MIB_COUNT, .mib_count = QCA8K_QCA832X_MIB_COUNT,
.ops = &qca8xxx_ops,
}; };
static const struct qca8k_match_data qca8328 = { static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327, .id = QCA8K_ID_QCA8327,
.mib_count = QCA8K_QCA832X_MIB_COUNT, .mib_count = QCA8K_QCA832X_MIB_COUNT,
.ops = &qca8xxx_ops,
}; };
static const struct qca8k_match_data qca833x = { static const struct qca8k_match_data qca833x = {
.id = QCA8K_ID_QCA8337, .id = QCA8K_ID_QCA8337,
.mib_count = QCA8K_QCA833X_MIB_COUNT, .mib_count = QCA8K_QCA833X_MIB_COUNT,
.ops = &qca8xxx_ops,
}; };
static const struct of_device_id qca8k_of_match[] = { static const struct of_device_id qca8k_of_match[] = {
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
* Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016 John Crispin <john@phrozen.org>
*/
#include <linux/netdevice.h>
#include <net/dsa.h>
#include <linux/if_bridge.h>
#include "qca8k.h"
#define MIB_DESC(_s, _o, _n) \
{ \
.size = (_s), \
.offset = (_o), \
.name = (_n), \
}
const struct qca8k_mib_desc ar8327_mib[] = {
MIB_DESC(1, 0x00, "RxBroad"),
MIB_DESC(1, 0x04, "RxPause"),
MIB_DESC(1, 0x08, "RxMulti"),
MIB_DESC(1, 0x0c, "RxFcsErr"),
MIB_DESC(1, 0x10, "RxAlignErr"),
MIB_DESC(1, 0x14, "RxRunt"),
MIB_DESC(1, 0x18, "RxFragment"),
MIB_DESC(1, 0x1c, "Rx64Byte"),
MIB_DESC(1, 0x20, "Rx128Byte"),
MIB_DESC(1, 0x24, "Rx256Byte"),
MIB_DESC(1, 0x28, "Rx512Byte"),
MIB_DESC(1, 0x2c, "Rx1024Byte"),
MIB_DESC(1, 0x30, "Rx1518Byte"),
MIB_DESC(1, 0x34, "RxMaxByte"),
MIB_DESC(1, 0x38, "RxTooLong"),
MIB_DESC(2, 0x3c, "RxGoodByte"),
MIB_DESC(2, 0x44, "RxBadByte"),
MIB_DESC(1, 0x4c, "RxOverFlow"),
MIB_DESC(1, 0x50, "Filtered"),
MIB_DESC(1, 0x54, "TxBroad"),
MIB_DESC(1, 0x58, "TxPause"),
MIB_DESC(1, 0x5c, "TxMulti"),
MIB_DESC(1, 0x60, "TxUnderRun"),
MIB_DESC(1, 0x64, "Tx64Byte"),
MIB_DESC(1, 0x68, "Tx128Byte"),
MIB_DESC(1, 0x6c, "Tx256Byte"),
MIB_DESC(1, 0x70, "Tx512Byte"),
MIB_DESC(1, 0x74, "Tx1024Byte"),
MIB_DESC(1, 0x78, "Tx1518Byte"),
MIB_DESC(1, 0x7c, "TxMaxByte"),
MIB_DESC(1, 0x80, "TxOverSize"),
MIB_DESC(2, 0x84, "TxByte"),
MIB_DESC(1, 0x8c, "TxCollision"),
MIB_DESC(1, 0x90, "TxAbortCol"),
MIB_DESC(1, 0x94, "TxMultiCol"),
MIB_DESC(1, 0x98, "TxSingleCol"),
MIB_DESC(1, 0x9c, "TxExcDefer"),
MIB_DESC(1, 0xa0, "TxDefer"),
MIB_DESC(1, 0xa4, "TxLateCol"),
MIB_DESC(1, 0xa8, "RXUnicast"),
MIB_DESC(1, 0xac, "TXUnicast"),
};
int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
{
return regmap_read(priv->regmap, reg, val);
}
int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val)
{
return regmap_write(priv->regmap, reg, val);
}
int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
{
return regmap_update_bits(priv->regmap, reg, mask, write_val);
}
static const struct regmap_range qca8k_readable_ranges[] = {
regmap_reg_range(0x0000, 0x00e4), /* Global control */
regmap_reg_range(0x0100, 0x0168), /* EEE control */
regmap_reg_range(0x0200, 0x0270), /* Parser control */
regmap_reg_range(0x0400, 0x0454), /* ACL */
regmap_reg_range(0x0600, 0x0718), /* Lookup */
regmap_reg_range(0x0800, 0x0b70), /* QM */
regmap_reg_range(0x0c00, 0x0c80), /* PKT */
regmap_reg_range(0x0e00, 0x0e98), /* L3 */
regmap_reg_range(0x1000, 0x10ac), /* MIB - Port0 */
regmap_reg_range(0x1100, 0x11ac), /* MIB - Port1 */
regmap_reg_range(0x1200, 0x12ac), /* MIB - Port2 */
regmap_reg_range(0x1300, 0x13ac), /* MIB - Port3 */
regmap_reg_range(0x1400, 0x14ac), /* MIB - Port4 */
regmap_reg_range(0x1500, 0x15ac), /* MIB - Port5 */
regmap_reg_range(0x1600, 0x16ac), /* MIB - Port6 */
};
const struct regmap_access_table qca8k_readable_table = {
.yes_ranges = qca8k_readable_ranges,
.n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
};
/* TODO: remove these extra ops when we can support regmap bulk read/write */
static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
int i, count = len / sizeof(u32), ret;
if (priv->mgmt_master && priv->info->ops->read_eth &&
!priv->info->ops->read_eth(priv, reg, val, len))
return 0;
for (i = 0; i < count; i++) {
ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
if (ret < 0)
return ret;
}
return 0;
}
/* TODO: remove these extra ops when we can support regmap bulk read/write */
static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
{
int i, count = len / sizeof(u32), ret;
u32 tmp;
if (priv->mgmt_master && priv->info->ops->write_eth &&
!priv->info->ops->write_eth(priv, reg, val, len))
return 0;
for (i = 0; i < count; i++) {
tmp = val[i];
ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
if (ret < 0)
return ret;
}
return 0;
}
static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
{
u32 val;
return regmap_read_poll_timeout(priv->regmap, reg, val, !(val & mask), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC);
}
static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
u32 reg[3];
int ret;
/* load the ARL table into an array */
ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
if (ret)
return ret;
/* vid - 83:72 */
fdb->vid = FIELD_GET(QCA8K_ATU_VID_MASK, reg[2]);
/* aging - 67:64 */
fdb->aging = FIELD_GET(QCA8K_ATU_STATUS_MASK, reg[2]);
/* portmask - 54:48 */
fdb->port_mask = FIELD_GET(QCA8K_ATU_PORT_MASK, reg[1]);
/* mac - 47:0 */
fdb->mac[0] = FIELD_GET(QCA8K_ATU_ADDR0_MASK, reg[1]);
fdb->mac[1] = FIELD_GET(QCA8K_ATU_ADDR1_MASK, reg[1]);
fdb->mac[2] = FIELD_GET(QCA8K_ATU_ADDR2_MASK, reg[0]);
fdb->mac[3] = FIELD_GET(QCA8K_ATU_ADDR3_MASK, reg[0]);
fdb->mac[4] = FIELD_GET(QCA8K_ATU_ADDR4_MASK, reg[0]);
fdb->mac[5] = FIELD_GET(QCA8K_ATU_ADDR5_MASK, reg[0]);
return 0;
}
static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
const u8 *mac, u8 aging)
{
u32 reg[3] = { 0 };
/* vid - 83:72 */
reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
/* aging - 67:64 */
reg[2] |= FIELD_PREP(QCA8K_ATU_STATUS_MASK, aging);
/* portmask - 54:48 */
reg[1] = FIELD_PREP(QCA8K_ATU_PORT_MASK, port_mask);
/* mac - 47:0 */
reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR0_MASK, mac[0]);
reg[1] |= FIELD_PREP(QCA8K_ATU_ADDR1_MASK, mac[1]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR2_MASK, mac[2]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR3_MASK, mac[3]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR4_MASK, mac[4]);
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
}
static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
int port)
{
u32 reg;
int ret;
/* Set the command and FDB index */
reg = QCA8K_ATU_FUNC_BUSY;
reg |= cmd;
if (port >= 0) {
reg |= QCA8K_ATU_FUNC_PORT_EN;
reg |= FIELD_PREP(QCA8K_ATU_FUNC_PORT_MASK, port);
}
/* Write the function register triggering the table access */
ret = qca8k_write(priv, QCA8K_REG_ATU_FUNC, reg);
if (ret)
return ret;
/* wait for completion */
ret = qca8k_busy_wait(priv, QCA8K_REG_ATU_FUNC, QCA8K_ATU_FUNC_BUSY);
if (ret)
return ret;
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_FDB_LOAD) {
ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
if (ret < 0)
return ret;
if (reg & QCA8K_ATU_FUNC_FULL)
return -1;
}
return 0;
}
static int qca8k_fdb_next(struct qca8k_priv *priv, struct qca8k_fdb *fdb,
int port)
{
int ret;
qca8k_fdb_write(priv, fdb->vid, fdb->port_mask, fdb->mac, fdb->aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_NEXT, port);
if (ret < 0)
return ret;
return qca8k_fdb_read(priv, fdb);
}
static int qca8k_fdb_add(struct qca8k_priv *priv, const u8 *mac,
u16 port_mask, u16 vid, u8 aging)
{
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, port_mask, mac, aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int qca8k_fdb_del(struct qca8k_priv *priv, const u8 *mac,
u16 port_mask, u16 vid)
{
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, port_mask, mac, 0);
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
mutex_unlock(&priv->reg_mutex);
return ret;
}
void qca8k_fdb_flush(struct qca8k_priv *priv)
{
mutex_lock(&priv->reg_mutex);
qca8k_fdb_access(priv, QCA8K_FDB_FLUSH, -1);
mutex_unlock(&priv->reg_mutex);
}
static int qca8k_fdb_search_and_insert(struct qca8k_priv *priv, u8 port_mask,
const u8 *mac, u16 vid)
{
struct qca8k_fdb fdb = { 0 };
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, 0, mac, 0);
ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
if (ret < 0)
goto exit;
ret = qca8k_fdb_read(priv, &fdb);
if (ret < 0)
goto exit;
/* Rule exist. Delete first */
if (!fdb.aging) {
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
if (ret)
goto exit;
}
/* Add port to fdb portmask */
fdb.port_mask |= port_mask;
qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int qca8k_fdb_search_and_del(struct qca8k_priv *priv, u8 port_mask,
const u8 *mac, u16 vid)
{
struct qca8k_fdb fdb = { 0 };
int ret;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_write(priv, vid, 0, mac, 0);
ret = qca8k_fdb_access(priv, QCA8K_FDB_SEARCH, -1);
if (ret < 0)
goto exit;
/* Rule doesn't exist. Why delete? */
if (!fdb.aging) {
ret = -EINVAL;
goto exit;
}
ret = qca8k_fdb_access(priv, QCA8K_FDB_PURGE, -1);
if (ret)
goto exit;
/* Only port in the rule is this port. Don't re insert */
if (fdb.port_mask == port_mask)
goto exit;
/* Remove port from port mask */
fdb.port_mask &= ~port_mask;
qca8k_fdb_write(priv, vid, fdb.port_mask, mac, fdb.aging);
ret = qca8k_fdb_access(priv, QCA8K_FDB_LOAD, -1);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int qca8k_vlan_access(struct qca8k_priv *priv,
enum qca8k_vlan_cmd cmd, u16 vid)
{
u32 reg;
int ret;
/* Set the command and VLAN index */
reg = QCA8K_VTU_FUNC1_BUSY;
reg |= cmd;
reg |= FIELD_PREP(QCA8K_VTU_FUNC1_VID_MASK, vid);
/* Write the function register triggering the table access */
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC1, reg);
if (ret)
return ret;
/* wait for completion */
ret = qca8k_busy_wait(priv, QCA8K_REG_VTU_FUNC1, QCA8K_VTU_FUNC1_BUSY);
if (ret)
return ret;
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_VLAN_LOAD) {
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
if (ret < 0)
return ret;
if (reg & QCA8K_VTU_FUNC1_FULL)
return -ENOMEM;
}
return 0;
}
static int qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid,
bool untagged)
{
u32 reg;
int ret;
/* We do the right thing with VLAN 0 and treat it as untagged while
* preserving the tag on egress.
*/
if (vid == 0)
return 0;
mutex_lock(&priv->reg_mutex);
ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
if (ret < 0)
goto out;
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
if (ret < 0)
goto out;
reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
if (untagged)
reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_UNTAG(port);
else
reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_TAG(port);
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
if (ret)
goto out;
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
out:
mutex_unlock(&priv->reg_mutex);
return ret;
}
static int qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
{
u32 reg, mask;
int ret, i;
bool del;
mutex_lock(&priv->reg_mutex);
ret = qca8k_vlan_access(priv, QCA8K_VLAN_READ, vid);
if (ret < 0)
goto out;
ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
if (ret < 0)
goto out;
reg &= ~QCA8K_VTU_FUNC0_EG_MODE_PORT_MASK(port);
reg |= QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(port);
/* Check if we're the last member to be removed */
del = true;
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
mask = QCA8K_VTU_FUNC0_EG_MODE_PORT_NOT(i);
if ((reg & mask) != mask) {
del = false;
break;
}
}
if (del) {
ret = qca8k_vlan_access(priv, QCA8K_VLAN_PURGE, vid);
} else {
ret = qca8k_write(priv, QCA8K_REG_VTU_FUNC0, reg);
if (ret)
goto out;
ret = qca8k_vlan_access(priv, QCA8K_VLAN_LOAD, vid);
}
out:
mutex_unlock(&priv->reg_mutex);
return ret;
}
int qca8k_mib_init(struct qca8k_priv *priv)
{
int ret;
mutex_lock(&priv->reg_mutex);
ret = regmap_update_bits(priv->regmap, QCA8K_REG_MIB,
QCA8K_MIB_FUNC | QCA8K_MIB_BUSY,
FIELD_PREP(QCA8K_MIB_FUNC, QCA8K_MIB_FLUSH) |
QCA8K_MIB_BUSY);
if (ret)
goto exit;
ret = qca8k_busy_wait(priv, QCA8K_REG_MIB, QCA8K_MIB_BUSY);
if (ret)
goto exit;
ret = regmap_set_bits(priv->regmap, QCA8K_REG_MIB, QCA8K_MIB_CPU_KEEP);
if (ret)
goto exit;
ret = qca8k_write(priv, QCA8K_REG_MODULE_EN, QCA8K_MODULE_EN_MIB);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
{
u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
/* Port 0 and 6 have no internal PHY */
if (port > 0 && port < 6)
mask |= QCA8K_PORT_STATUS_LINK_AUTO;
if (enable)
regmap_set_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
else
regmap_clear_bits(priv->regmap, QCA8K_REG_PORT_STATUS(port), mask);
}
void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
{
struct qca8k_priv *priv = ds->priv;
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < priv->info->mib_count; i++)
strncpy(data + i * ETH_GSTRING_LEN, ar8327_mib[i].name,
ETH_GSTRING_LEN);
}
void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data)
{
struct qca8k_priv *priv = ds->priv;
const struct qca8k_mib_desc *mib;
u32 reg, i, val;
u32 hi = 0;
int ret;
if (priv->mgmt_master && priv->info->ops->autocast_mib &&
priv->info->ops->autocast_mib(ds, port, data) > 0)
return;
for (i = 0; i < priv->info->mib_count; i++) {
mib = &ar8327_mib[i];
reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
ret = qca8k_read(priv, reg, &val);
if (ret < 0)
continue;
if (mib->size == 2) {
ret = qca8k_read(priv, reg + 4, &hi);
if (ret < 0)
continue;
}
data[i] = val;
if (mib->size == 2)
data[i] |= (u64)hi << 32;
}
}
int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset)
{
struct qca8k_priv *priv = ds->priv;
if (sset != ETH_SS_STATS)
return 0;
return priv->info->mib_count;
}
int qca8k_set_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *eee)
{
u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port);
struct qca8k_priv *priv = ds->priv;
u32 reg;
int ret;
mutex_lock(&priv->reg_mutex);
ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
if (ret < 0)
goto exit;
if (eee->eee_enabled)
reg |= lpi_en;
else
reg &= ~lpi_en;
ret = qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg);
exit:
mutex_unlock(&priv->reg_mutex);
return ret;
}
int qca8k_get_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_eee *e)
{
/* Nothing to do on the port's MAC */
return 0;
}
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct qca8k_priv *priv = ds->priv;
u32 stp_state;
switch (state) {
case BR_STATE_DISABLED:
stp_state = QCA8K_PORT_LOOKUP_STATE_DISABLED;
break;
case BR_STATE_BLOCKING:
stp_state = QCA8K_PORT_LOOKUP_STATE_BLOCKING;
break;
case BR_STATE_LISTENING:
stp_state = QCA8K_PORT_LOOKUP_STATE_LISTENING;
break;
case BR_STATE_LEARNING:
stp_state = QCA8K_PORT_LOOKUP_STATE_LEARNING;
break;
case BR_STATE_FORWARDING:
default:
stp_state = QCA8K_PORT_LOOKUP_STATE_FORWARD;
break;
}
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_STATE_MASK, stp_state);
}
int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int port_mask, cpu_port;
int i, ret;
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
port_mask = BIT(cpu_port);
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
if (dsa_is_cpu_port(ds, i))
continue;
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Add this port to the portvlan mask of the other ports
* in the bridge
*/
ret = regmap_set_bits(priv->regmap,
QCA8K_PORT_LOOKUP_CTRL(i),
BIT(port));
if (ret)
return ret;
if (i != port)
port_mask |= BIT(i);
}
/* Add all other ports to this ports portvlan mask */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_MEMBER, port_mask);
return ret;
}
void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct qca8k_priv *priv = ds->priv;
int cpu_port, i;
cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
if (dsa_is_cpu_port(ds, i))
continue;
if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
continue;
/* Remove this port to the portvlan mask of the other ports
* in the bridge
*/
regmap_clear_bits(priv->regmap,
QCA8K_PORT_LOOKUP_CTRL(i),
BIT(port));
}
/* Set the cpu port to be the only one in the portvlan mask of
* this port
*/
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
}
void qca8k_port_fast_age(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
qca8k_fdb_access(priv, QCA8K_FDB_FLUSH_PORT, port);
mutex_unlock(&priv->reg_mutex);
}
int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
{
struct qca8k_priv *priv = ds->priv;
unsigned int secs = msecs / 1000;
u32 val;
/* AGE_TIME reg is set in 7s step */
val = secs / 7;
/* Handle case with 0 as val to NOT disable
* learning
*/
if (!val)
val = 1;
return regmap_update_bits(priv->regmap, QCA8K_REG_ATU_CTRL,
QCA8K_ATU_AGE_TIME_MASK,
QCA8K_ATU_AGE_TIME(val));
}
int qca8k_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
struct qca8k_priv *priv = ds->priv;
qca8k_port_set_status(priv, port, 1);
priv->port_enabled_map |= BIT(port);
if (dsa_is_user_port(ds, port))
phy_support_asym_pause(phy);
return 0;
}
void qca8k_port_disable(struct dsa_switch *ds, int port)
{
struct qca8k_priv *priv = ds->priv;
qca8k_port_set_status(priv, port, 0);
priv->port_enabled_map &= ~BIT(port);
}
int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct qca8k_priv *priv = ds->priv;
int ret;
/* We have only have a general MTU setting.
* DSA always set the CPU port's MTU to the largest MTU of the slave
* ports.
* Setting MTU just for the CPU port is sufficient to correctly set a
* value for every port.
*/
if (!dsa_is_cpu_port(ds, port))
return 0;
/* To change the MAX_FRAME_SIZE the cpu ports must be off or
* the switch panics.
* Turn off both cpu ports before applying the new value to prevent
* this.
*/
if (priv->port_enabled_map & BIT(0))
qca8k_port_set_status(priv, 0, 0);
if (priv->port_enabled_map & BIT(6))
qca8k_port_set_status(priv, 6, 0);
/* Include L2 header / FCS length */
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu +
ETH_HLEN + ETH_FCS_LEN);
if (priv->port_enabled_map & BIT(0))
qca8k_port_set_status(priv, 0, 1);
if (priv->port_enabled_map & BIT(6))
qca8k_port_set_status(priv, 6, 1);
return ret;
}
int qca8k_port_max_mtu(struct dsa_switch *ds, int port)
{
return QCA8K_MAX_MTU;
}
int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
u16 port_mask, u16 vid)
{
/* Set the vid to the port vlan id if no vid is set */
if (!vid)
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_add(priv, addr, port_mask, vid,
QCA8K_ATU_STATUS_STATIC);
}
int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u16 port_mask = BIT(port);
return qca8k_port_fdb_insert(priv, addr, port_mask, vid);
}
int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
u16 port_mask = BIT(port);
if (!vid)
vid = QCA8K_PORT_VID_DEF;
return qca8k_fdb_del(priv, addr, port_mask, vid);
}
int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
struct qca8k_fdb _fdb = { 0 };
int cnt = QCA8K_NUM_FDB_RECORDS;
bool is_static;
int ret = 0;
mutex_lock(&priv->reg_mutex);
while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) {
if (!_fdb.aging)
break;
is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC);
ret = cb(_fdb.mac, _fdb.vid, is_static, data);
if (ret)
break;
}
mutex_unlock(&priv->reg_mutex);
return 0;
}
int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
return qca8k_fdb_search_and_insert(priv, BIT(port), addr, vid);
}
int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db)
{
struct qca8k_priv *priv = ds->priv;
const u8 *addr = mdb->addr;
u16 vid = mdb->vid;
return qca8k_fdb_search_and_del(priv, BIT(port), addr, vid);
}
int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int monitor_port, ret;
u32 reg, val;
/* Check for existent entry */
if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
return -EEXIST;
ret = regmap_read(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0, &val);
if (ret)
return ret;
/* QCA83xx can have only one port set to mirror mode.
* Check that the correct port is requested and return error otherwise.
* When no mirror port is set, the values is set to 0xF
*/
monitor_port = FIELD_GET(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
if (monitor_port != 0xF && monitor_port != mirror->to_local_port)
return -EEXIST;
/* Set the monitor port */
val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM,
mirror->to_local_port);
ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
if (ret)
return ret;
if (ingress) {
reg = QCA8K_PORT_LOOKUP_CTRL(port);
val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
} else {
reg = QCA8K_REG_PORT_HOL_CTRL1(port);
val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
}
ret = regmap_update_bits(priv->regmap, reg, val, val);
if (ret)
return ret;
/* Track mirror port for tx and rx to decide when the
* mirror port has to be disabled.
*/
if (ingress)
priv->mirror_rx |= BIT(port);
else
priv->mirror_tx |= BIT(port);
return 0;
}
void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct qca8k_priv *priv = ds->priv;
u32 reg, val;
int ret;
if (mirror->ingress) {
reg = QCA8K_PORT_LOOKUP_CTRL(port);
val = QCA8K_PORT_LOOKUP_ING_MIRROR_EN;
} else {
reg = QCA8K_REG_PORT_HOL_CTRL1(port);
val = QCA8K_PORT_HOL_CTRL1_EG_MIRROR_EN;
}
ret = regmap_clear_bits(priv->regmap, reg, val);
if (ret)
goto err;
if (mirror->ingress)
priv->mirror_rx &= ~BIT(port);
else
priv->mirror_tx &= ~BIT(port);
/* No port set to send packet to mirror port. Disable mirror port */
if (!priv->mirror_rx && !priv->mirror_tx) {
val = FIELD_PREP(QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, 0xF);
ret = regmap_update_bits(priv->regmap, QCA8K_REG_GLOBAL_FW_CTRL0,
QCA8K_GLOBAL_FW_CTRL0_MIRROR_PORT_NUM, val);
if (ret)
goto err;
}
err:
dev_err(priv->dev, "Failed to del mirror port from %d", port);
}
int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering,
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
int ret;
if (vlan_filtering) {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_SECURE);
} else {
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE_MASK,
QCA8K_PORT_LOOKUP_VLAN_MODE_NONE);
}
return ret;
}
int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
{
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct qca8k_priv *priv = ds->priv;
int ret;
ret = qca8k_vlan_add(priv, port, vlan->vid, untagged);
if (ret) {
dev_err(priv->dev, "Failed to add VLAN to port %d (%d)", port, ret);
return ret;
}
if (pvid) {
ret = qca8k_rmw(priv, QCA8K_EGRESS_VLAN(port),
QCA8K_EGREES_VLAN_PORT_MASK(port),
QCA8K_EGREES_VLAN_PORT(port, vlan->vid));
if (ret)
return ret;
ret = qca8k_write(priv, QCA8K_REG_PORT_VLAN_CTRL0(port),
QCA8K_PORT_VLAN_CVID(vlan->vid) |
QCA8K_PORT_VLAN_SVID(vlan->vid));
}
return ret;
}
int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct qca8k_priv *priv = ds->priv;
int ret;
ret = qca8k_vlan_del(priv, port, vlan->vid);
if (ret)
dev_err(priv->dev, "Failed to delete VLAN from port %d (%d)", port, ret);
return ret;
}
static bool qca8k_lag_can_offload(struct dsa_switch *ds,
struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct dsa_port *dp;
int members = 0;
if (!lag.id)
return false;
dsa_lag_foreach_port(dp, ds->dst, &lag)
/* Includes the port joining the LAG */
members++;
if (members > QCA8K_NUM_PORTS_FOR_LAG)
return false;
if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
return false;
if (info->hash_type != NETDEV_LAG_HASH_L2 &&
info->hash_type != NETDEV_LAG_HASH_L23)
return false;
return true;
}
static int qca8k_lag_setup_hash(struct dsa_switch *ds,
struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
struct net_device *lag_dev = lag.dev;
struct qca8k_priv *priv = ds->priv;
bool unique_lag = true;
unsigned int i;
u32 hash = 0;
switch (info->hash_type) {
case NETDEV_LAG_HASH_L23:
hash |= QCA8K_TRUNK_HASH_SIP_EN;
hash |= QCA8K_TRUNK_HASH_DIP_EN;
fallthrough;
case NETDEV_LAG_HASH_L2:
hash |= QCA8K_TRUNK_HASH_SA_EN;
hash |= QCA8K_TRUNK_HASH_DA_EN;
break;
default: /* We should NEVER reach this */
return -EOPNOTSUPP;
}
/* Check if we are the unique configured LAG */
dsa_lags_foreach_id(i, ds->dst)
if (i != lag.id && dsa_lag_by_id(ds->dst, i)) {
unique_lag = false;
break;
}
/* Hash Mode is global. Make sure the same Hash Mode
* is set to all the 4 possible lag.
* If we are the unique LAG we can set whatever hash
* mode we want.
* To change hash mode it's needed to remove all LAG
* and change the mode with the latest.
*/
if (unique_lag) {
priv->lag_hash_mode = hash;
} else if (priv->lag_hash_mode != hash) {
netdev_err(lag_dev, "Error: Mismatched Hash Mode across different lag is not supported\n");
return -EOPNOTSUPP;
}
return regmap_update_bits(priv->regmap, QCA8K_TRUNK_HASH_EN_CTRL,
QCA8K_TRUNK_HASH_MASK, hash);
}
static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
struct dsa_lag lag, bool delete)
{
struct qca8k_priv *priv = ds->priv;
int ret, id, i;
u32 val;
/* DSA LAG IDs are one-based, hardware is zero-based */
id = lag.id - 1;
/* Read current port member */
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0, &val);
if (ret)
return ret;
/* Shift val to the correct trunk */
val >>= QCA8K_REG_GOL_TRUNK_SHIFT(id);
val &= QCA8K_REG_GOL_TRUNK_MEMBER_MASK;
if (delete)
val &= ~BIT(port);
else
val |= BIT(port);
/* Update port member. With empty portmap disable trunk */
ret = regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL0,
QCA8K_REG_GOL_TRUNK_MEMBER(id) |
QCA8K_REG_GOL_TRUNK_EN(id),
!val << QCA8K_REG_GOL_TRUNK_SHIFT(id) |
val << QCA8K_REG_GOL_TRUNK_SHIFT(id));
/* Search empty member if adding or port on deleting */
for (i = 0; i < QCA8K_NUM_PORTS_FOR_LAG; i++) {
ret = regmap_read(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id), &val);
if (ret)
return ret;
val >>= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i);
val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_MASK;
if (delete) {
/* If port flagged to be disabled assume this member is
* empty
*/
if (val != QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
continue;
val &= QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT_MASK;
if (val != port)
continue;
} else {
/* If port flagged to be enabled assume this member is
* already set
*/
if (val == QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN_MASK)
continue;
}
/* We have found the member to add/remove */
break;
}
/* Set port in the correct port mask or disable port if in delete mode */
return regmap_update_bits(priv->regmap, QCA8K_REG_GOL_TRUNK_CTRL(id),
QCA8K_REG_GOL_TRUNK_ID_MEM_ID_EN(id, i) |
QCA8K_REG_GOL_TRUNK_ID_MEM_ID_PORT(id, i),
!delete << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i) |
port << QCA8K_REG_GOL_TRUNK_ID_MEM_ID_SHIFT(id, i));
}
int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info)
{
int ret;
if (!qca8k_lag_can_offload(ds, lag, info))
return -EOPNOTSUPP;
ret = qca8k_lag_setup_hash(ds, lag, info);
if (ret)
return ret;
return qca8k_lag_refresh_portmap(ds, port, lag, false);
}
int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
struct dsa_lag lag)
{
return qca8k_lag_refresh_portmap(ds, port, lag, true);
}
int qca8k_read_switch_id(struct qca8k_priv *priv)
{
u32 val;
u8 id;
int ret;
if (!priv->info)
return -ENODEV;
ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
if (ret < 0)
return -ENODEV;
id = QCA8K_MASK_CTRL_DEVICE_ID(val);
if (id != priv->info->id) {
dev_err(priv->dev,
"Switch id detected %x but expected %x",
id, priv->info->id);
return -ENODEV;
}
priv->switch_id = id;
/* Save revision to communicate to the internal PHY driver */
priv->switch_revision = QCA8K_MASK_CTRL_REV_ID(val);
return 0;
}
...@@ -324,10 +324,20 @@ enum qca8k_mid_cmd { ...@@ -324,10 +324,20 @@ enum qca8k_mid_cmd {
QCA8K_MIB_CAST = 3, QCA8K_MIB_CAST = 3,
}; };
struct qca8k_priv;
struct qca8k_info_ops {
int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
/* TODO: remove these extra ops when we can support regmap bulk read/write */
int (*read_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
int (*write_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
};
struct qca8k_match_data { struct qca8k_match_data {
u8 id; u8 id;
bool reduced_package; bool reduced_package;
u8 mib_count; u8 mib_count;
const struct qca8k_info_ops *ops;
}; };
enum { enum {
...@@ -401,6 +411,7 @@ struct qca8k_priv { ...@@ -401,6 +411,7 @@ struct qca8k_priv {
struct qca8k_mdio_cache mdio_cache; struct qca8k_mdio_cache mdio_cache;
struct qca8k_pcs pcs_port_0; struct qca8k_pcs pcs_port_0;
struct qca8k_pcs pcs_port_6; struct qca8k_pcs pcs_port_6;
const struct qca8k_match_data *info;
}; };
struct qca8k_mib_desc { struct qca8k_mib_desc {
...@@ -416,4 +427,93 @@ struct qca8k_fdb { ...@@ -416,4 +427,93 @@ struct qca8k_fdb {
u8 mac[6]; u8 mac[6];
}; };
/* Common setup function */
extern const struct qca8k_mib_desc ar8327_mib[];
extern const struct regmap_access_table qca8k_readable_table;
int qca8k_mib_init(struct qca8k_priv *priv);
void qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable);
int qca8k_read_switch_id(struct qca8k_priv *priv);
/* Common read/write/rmw function */
int qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val);
int qca8k_write(struct qca8k_priv *priv, u32 reg, u32 val);
int qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val);
/* Common ops function */
void qca8k_fdb_flush(struct qca8k_priv *priv);
/* Common ethtool stats function */
void qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data);
void qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
uint64_t *data);
int qca8k_get_sset_count(struct dsa_switch *ds, int port, int sset);
/* Common eee function */
int qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee);
int qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
/* Common bridge function */
void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state);
int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge,
bool *tx_fwd_offload,
struct netlink_ext_ack *extack);
void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge);
/* Common port enable/disable function */
int qca8k_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy);
void qca8k_port_disable(struct dsa_switch *ds, int port);
/* Common MTU function */
int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu);
int qca8k_port_max_mtu(struct dsa_switch *ds, int port);
/* Common fast age function */
void qca8k_port_fast_age(struct dsa_switch *ds, int port);
int qca8k_set_ageing_time(struct dsa_switch *ds, unsigned int msecs);
/* Common FDB function */
int qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
u16 port_mask, u16 vid);
int qca8k_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db);
int qca8k_port_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db);
int qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data);
/* Common MDB function */
int qca8k_port_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db);
int qca8k_port_mdb_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb,
struct dsa_db db);
/* Common port mirror function */
int qca8k_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack);
void qca8k_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
/* Common port VLAN function */
int qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack);
int qca8k_port_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
/* Common port LAG function */
int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
struct netdev_lag_upper_info *info);
int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
struct dsa_lag lag);
#endif /* __QCA8K_H */ #endif /* __QCA8K_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment