Commit c9c9af91 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

mlxsw: spectrum_acl: Allow to interrupt/continue rehash work

Currently, migration of vregions with many entries may take long time
during which insertions and removals of the rules are blocked
due to wait to acquire vregion->lock.

To overcome this, allow to interrupt and continue rehash work according
to the set credits - number of rules to migrate.
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 84350051
...@@ -27,6 +27,7 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp) ...@@ -27,6 +27,7 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */ #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */ #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam) struct mlxsw_sp_acl_tcam *tcam)
...@@ -732,16 +733,26 @@ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion ...@@ -732,16 +733,26 @@ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion
static int static int
mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion); struct mlxsw_sp_acl_tcam_vregion *vregion,
int *credits);
static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work) static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
{ {
struct mlxsw_sp_acl_tcam_vregion *vregion = struct mlxsw_sp_acl_tcam_vregion *vregion =
container_of(work, struct mlxsw_sp_acl_tcam_vregion, container_of(work, struct mlxsw_sp_acl_tcam_vregion,
rehash.dw.work); rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
int err;
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion); err = mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp,
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion); vregion, &credits);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
*/
mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
else
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
} }
static struct mlxsw_sp_acl_tcam_vregion * static struct mlxsw_sp_acl_tcam_vregion *
...@@ -1176,7 +1187,8 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp, ...@@ -1176,7 +1187,8 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
static int static int
mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry, struct mlxsw_sp_acl_tcam_ventry *ventry,
struct mlxsw_sp_acl_tcam_chunk *chunk) struct mlxsw_sp_acl_tcam_chunk *chunk,
int *credits)
{ {
struct mlxsw_sp_acl_tcam_entry *new_entry; struct mlxsw_sp_acl_tcam_entry *new_entry;
...@@ -1184,6 +1196,9 @@ mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp, ...@@ -1184,6 +1196,9 @@ mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
if (ventry->entry->chunk == chunk) if (ventry->entry->chunk == chunk)
return 0; return 0;
if (--(*credits) < 0)
return 0;
new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk); new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
if (IS_ERR(new_entry)) if (IS_ERR(new_entry))
return PTR_ERR(new_entry); return PTR_ERR(new_entry);
...@@ -1223,7 +1238,8 @@ static int ...@@ -1223,7 +1238,8 @@ static int
mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk, struct mlxsw_sp_acl_tcam_vchunk *vchunk,
struct mlxsw_sp_acl_tcam_region *region, struct mlxsw_sp_acl_tcam_region *region,
struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
int *credits)
{ {
struct mlxsw_sp_acl_tcam_ventry *ventry; struct mlxsw_sp_acl_tcam_ventry *ventry;
int err; int err;
...@@ -1240,7 +1256,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, ...@@ -1240,7 +1256,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(ventry, &vchunk->ventry_list, list) { list_for_each_entry(ventry, &vchunk->ventry_list, list) {
err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry, err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
vchunk->chunk); vchunk->chunk, credits);
if (err) { if (err) {
if (ctx->this_is_rollback) if (ctx->this_is_rollback)
return err; return err;
...@@ -1250,6 +1266,11 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, ...@@ -1250,6 +1266,11 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
*/ */
swap(vchunk->chunk, vchunk->chunk2); swap(vchunk->chunk, vchunk->chunk2);
return err; return err;
} else if (*credits < 0) {
/* We are out of credits, the rest of the ventries
* will be migrated later.
*/
return 0;
} }
} }
...@@ -1260,7 +1281,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp, ...@@ -1260,7 +1281,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
static int static int
mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion, struct mlxsw_sp_acl_tcam_vregion *vregion,
struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
int *credits)
{ {
struct mlxsw_sp_acl_tcam_vchunk *vchunk; struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err; int err;
...@@ -1268,8 +1290,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp, ...@@ -1268,8 +1290,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
list_for_each_entry(vchunk, &vregion->vchunk_list, list) { list_for_each_entry(vchunk, &vregion->vchunk_list, list) {
err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk, err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
vregion->region, vregion->region,
ctx); ctx, credits);
if (err) if (err || *credits < 0)
return err; return err;
} }
return 0; return 0;
...@@ -1278,13 +1300,15 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp, ...@@ -1278,13 +1300,15 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
static int static int
mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion, struct mlxsw_sp_acl_tcam_vregion *vregion,
struct mlxsw_sp_acl_tcam_rehash_ctx *ctx) struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
int *credits)
{ {
int err, err2; int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion); trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
mutex_lock(&vregion->lock); mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, ctx); err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) { if (err) {
/* In case migration was not successful, we need to swap /* In case migration was not successful, we need to swap
* so the original region pointer is assigned again * so the original region pointer is assigned again
...@@ -1292,7 +1316,8 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp, ...@@ -1292,7 +1316,8 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
*/ */
swap(vregion->region, vregion->region2); swap(vregion->region, vregion->region2);
ctx->this_is_rollback = true; ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion, ctx); err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err2) if (err2)
vregion->failed_rollback = true; vregion->failed_rollback = true;
} }
...@@ -1301,6 +1326,12 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp, ...@@ -1301,6 +1326,12 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
return err; return err;
} }
static bool
mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
{
return ctx->hints_priv;
}
static int static int
mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion, struct mlxsw_sp_acl_tcam_vregion *vregion,
...@@ -1372,19 +1403,28 @@ mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp, ...@@ -1372,19 +1403,28 @@ mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
static int static int
mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vregion *vregion) struct mlxsw_sp_acl_tcam_vregion *vregion,
int *credits)
{ {
struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx; struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
int err; int err;
err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp, vregion, ctx); /* Check if the previous rehash work was interrupted
if (err) { * which means we have to continue it now.
if (err != -EAGAIN) * If not, start a new rehash.
dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n"); */
return err; if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
vregion, ctx);
if (err) {
if (err != -EAGAIN)
dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
return err;
}
} }
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion, ctx); err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) { if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n"); dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
if (vregion->failed_rollback) { if (vregion->failed_rollback) {
...@@ -1394,7 +1434,9 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp, ...@@ -1394,7 +1434,9 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
} }
} }
mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx); if (*credits >= 0)
mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment