Commit e7086213 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlxsw-spectrum-prepare-for-xm-implementation-prefix-insertion-and-removal'

Ido Schimmel says:

====================
mlxsw: spectrum: Prepare for XM implementation - prefix insertion and removal

Jiri says:

This is a preparation patchset for follow-up support of boards with
extended mezzanine (XM), which is going to allow extended (scale-wise)
router offload.

XM requires a separate PRM register named XMDR to be used instead of
RALUE to insert/update/remove FIB entries. Therefore, this patchset
extends the previously introduces low-level ops to be able to have
XM-specific FIB entry config implementation.

Currently the existing original RALUE implementation is moved to "basic"
low-level ops.

Unlike legacy router, insertion/update/removal of FIB entries into XM
could be done in bulks up to 4 items in a single PRM register write.
That is why this patchset implements "an op context", that allows the
future XM ops implementation to squash multiple FIB events to single
register write. For that, the way in which the FIB events are processed
by the work queue has to be changed.

The conversion from 1:1 FIB event - work callback call to event queue is
implemented in patch #3.

Patch #4 introduces "an op context" that will allow in future to squash
multiple FIB events into one XMDR register write. Patch #12 converts it
from stack to be allocated per instance.

Existing RALUE manipulations are pushed to ops in patch #10.

Patch #13 is introducing a possibility for low-level implementation to
have per FIB entry private memory.

The rest of the patches are either cosmetics or smaller preparations.
====================

Link: https://lore.kernel.org/r/20201110094900.1920158-1-idosch@idosch.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 85ce50d3 173f14cd
......@@ -7279,10 +7279,11 @@ static inline void mlxsw_reg_ralue_pack4(char *payload,
enum mlxsw_reg_ralxx_protocol protocol,
enum mlxsw_reg_ralue_op op,
u16 virtual_router, u8 prefix_len,
u32 dip)
u32 *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
mlxsw_reg_ralue_dip4_set(payload, dip);
if (dip)
mlxsw_reg_ralue_dip4_set(payload, *dip);
}
static inline void mlxsw_reg_ralue_pack6(char *payload,
......@@ -7292,6 +7293,7 @@ static inline void mlxsw_reg_ralue_pack6(char *payload,
const void *dip)
{
mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
if (dip)
mlxsw_reg_ralue_dip6_memcpy_to(payload, dip);
}
......
......@@ -181,23 +181,26 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
}
static int
mlxsw_sp_ipip_fib_entry_op_gre4_ralue(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_ipip_fib_entry_op_gre4_do(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
u32 dip, u8 prefix_len, u16 ul_vr_id,
enum mlxsw_reg_ralue_op op,
u32 tunnel_index)
enum mlxsw_sp_fib_entry_op op,
u32 tunnel_index,
struct mlxsw_sp_fib_entry_priv *priv)
{
char ralue_pl[MLXSW_REG_RALUE_LEN];
mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_REG_RALXX_PROTOCOL_IPV4, op,
ul_vr_id, prefix_len, dip);
mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl, tunnel_index);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
ll_ops->fib_entry_pack(op_ctx, MLXSW_SP_L3_PROTO_IPV4, op, ul_vr_id,
prefix_len, (unsigned char *) &dip, priv);
ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx, tunnel_index);
return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
}
static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_ipip_entry *ipip_entry,
enum mlxsw_reg_ralue_op op,
u32 tunnel_index)
enum mlxsw_sp_fib_entry_op op, u32 tunnel_index,
struct mlxsw_sp_fib_entry_priv *priv)
{
u16 ul_vr_id = mlxsw_sp_ipip_lb_ul_vr_id(ipip_entry->ol_lb);
__be32 dip;
......@@ -210,9 +213,8 @@ static int mlxsw_sp_ipip_fib_entry_op_gre4(struct mlxsw_sp *mlxsw_sp,
dip = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ipip_entry->ol_dev).addr4;
return mlxsw_sp_ipip_fib_entry_op_gre4_ralue(mlxsw_sp, be32_to_cpu(dip),
32, ul_vr_id, op,
tunnel_index);
return mlxsw_sp_ipip_fib_entry_op_gre4_do(mlxsw_sp, ll_ops, op_ctx, be32_to_cpu(dip),
32, ul_vr_id, op, tunnel_index, priv);
}
static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
......
......@@ -52,9 +52,12 @@ struct mlxsw_sp_ipip_ops {
const struct net_device *ol_dev);
int (*fib_entry_op)(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_router_ll_ops *ll_ops,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_ipip_entry *ipip_entry,
enum mlxsw_reg_ralue_op op,
u32 tunnel_index);
enum mlxsw_sp_fib_entry_op op,
u32 tunnel_index,
struct mlxsw_sp_fib_entry_priv *priv);
int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
......
......@@ -15,6 +15,26 @@ struct mlxsw_sp_router_nve_decap {
u8 valid:1;
};
struct mlxsw_sp_fib_entry_op_ctx {
u8 bulk_ok:1, /* Indicate to the low-level op it is ok to bulk
* the actual entry with the one that is the next
* in queue.
*/
initialized:1; /* Bit that the low-level op sets in case
* the context priv is initialized.
*/
struct list_head fib_entry_priv_list;
unsigned long ll_priv[];
};
static inline void
mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
{
WARN_ON_ONCE(!list_empty(&op_ctx->fib_entry_priv_list));
memset(op_ctx, 0, sizeof(*op_ctx));
INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
}
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
......@@ -48,8 +68,24 @@ struct mlxsw_sp_router {
bool adj_discard_index_valid;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
struct work_struct fib_event_work;
struct list_head fib_event_queue;
spinlock_t fib_event_queue_lock; /* Protects fib event queue list */
/* One set of ops for each protocol: IPv4 and IPv6 */
const struct mlxsw_sp_router_ll_ops *proto_ll_ops[MLXSW_SP_L3_PROTO_MAX];
struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
};
struct mlxsw_sp_fib_entry_priv {
refcount_t refcnt;
struct list_head list; /* Member in op_ctx->fib_entry_priv_list */
unsigned long priv[];
};
enum mlxsw_sp_fib_entry_op {
MLXSW_SP_FIB_ENTRY_OP_WRITE,
MLXSW_SP_FIB_ENTRY_OP_UPDATE,
MLXSW_SP_FIB_ENTRY_OP_DELETE,
};
/* Low-level router ops. Basically this is to handle the different
......@@ -59,8 +95,31 @@ struct mlxsw_sp_router_ll_ops {
int (*ralta_write)(struct mlxsw_sp *mlxsw_sp, char *xralta_pl);
int (*ralst_write)(struct mlxsw_sp *mlxsw_sp, char *xralst_pl);
int (*raltb_write)(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl);
size_t fib_entry_op_ctx_size;
size_t fib_entry_priv_size;
void (*fib_entry_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
enum mlxsw_sp_l3proto proto, enum mlxsw_sp_fib_entry_op op,
u16 virtual_router, u8 prefix_len, unsigned char *addr,
struct mlxsw_sp_fib_entry_priv *priv);
void (*fib_entry_act_remote_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
enum mlxsw_reg_ralue_trap_action trap_action,
u16 trap_id, u32 adjacency_index, u16 ecmp_size);
void (*fib_entry_act_local_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
enum mlxsw_reg_ralue_trap_action trap_action,
u16 trap_id, u16 local_erif);
void (*fib_entry_act_ip2me_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx);
void (*fib_entry_act_ip2me_tun_pack)(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
u32 tunnel_ptr);
int (*fib_entry_commit)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
bool *postponed_for_bulk);
bool (*fib_entry_is_committed)(struct mlxsw_sp_fib_entry_priv *priv);
};
int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
const struct mlxsw_sp_router_ll_ops *ll_ops);
struct mlxsw_sp_rif_ipip_lb;
struct mlxsw_sp_rif_ipip_lb_config {
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment