Commit ebcff743 authored by Jiri Pirko's avatar Jiri Pirko Committed by David S. Miller

mlxsw: spectrum_kvdl: Push out KVD linear management into ops

In Spectrum-2 there is a different implementation of KVD linear
management. Unlike in Spectrum where there is a single index space,
in Spectrum-2 the indexes are per-resource. Also there is need to
explicitly tell HW that an entry is no longer used.
So push out the existing implementation into spectrum1_kvdl.c and
prepare ops infrastructure to allow new implementation in a follow-up.
Signed-off-by: default avatarJiri Pirko <jiri@mellanox.com>
Signed-off-by: default avatarIdo Schimmel <idosch@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent eec4edc9
......@@ -15,10 +15,11 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o spectrum_router.o \
spectrum_kvdl.o spectrum_acl_tcam.o \
spectrum_acl.o spectrum_flower.o \
spectrum_cnt.o spectrum_fid.o \
spectrum_ipip.o spectrum_acl_flex_actions.o \
spectrum1_kvdl.o spectrum_kvdl.o \
spectrum_acl_tcam.o spectrum_acl.o \
spectrum_flower.o spectrum_cnt.o \
spectrum_fid.o spectrum_ipip.o \
spectrum_acl_flex_actions.o \
spectrum_mr.o spectrum_mr_tcam.o \
spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
......
......@@ -3621,6 +3621,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
int err;
mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->core = mlxsw_core;
......@@ -3880,7 +3881,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
return err;
err = mlxsw_sp_kvdl_resources_register(mlxsw_core);
err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
if (err)
return err;
......
......@@ -145,6 +145,7 @@ struct mlxsw_sp_acl;
struct mlxsw_sp_counter_pool;
struct mlxsw_sp_fid_core;
struct mlxsw_sp_kvdl;
struct mlxsw_sp_kvdl_ops;
struct mlxsw_sp {
struct mlxsw_sp_port **ports;
......@@ -168,6 +169,7 @@ struct mlxsw_sp {
struct mlxsw_sp_span_entry *entries;
int entries_count;
} span;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
const struct mlxsw_afa_ops *afa_ops;
};
......@@ -436,6 +438,20 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
/* spectrum_kvdl.c */
struct mlxsw_sp_kvdl_ops {
size_t priv_size;
int (*init)(struct mlxsw_sp *mlxsw_sp, void *priv);
void (*fini)(struct mlxsw_sp *mlxsw_sp, void *priv);
int (*alloc)(struct mlxsw_sp *mlxsw_sp, void *priv,
unsigned int entry_count, u32 *p_entry_index);
void (*free)(struct mlxsw_sp *mlxsw_sp, void *priv,
int entry_index);
int (*alloc_size_query)(struct mlxsw_sp *mlxsw_sp, void *priv,
unsigned int entry_count,
unsigned int *p_alloc_size);
int (*resources_register)(struct mlxsw_sp *mlxsw_sp, void *priv);
};
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
......@@ -444,7 +460,10 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
/* spectrum1_kvdl.c */
extern const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops;
int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
......
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum1_kvdl.c
* Copyright (c) 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include "spectrum.h"
#define MLXSW_SP1_KVDL_SINGLE_BASE 0
#define MLXSW_SP1_KVDL_SINGLE_SIZE 16384
#define MLXSW_SP1_KVDL_SINGLE_END \
(MLXSW_SP1_KVDL_SINGLE_SIZE + MLXSW_SP1_KVDL_SINGLE_BASE - 1)
#define MLXSW_SP1_KVDL_CHUNKS_BASE \
(MLXSW_SP1_KVDL_SINGLE_BASE + MLXSW_SP1_KVDL_SINGLE_SIZE)
#define MLXSW_SP1_KVDL_CHUNKS_SIZE 49152
#define MLXSW_SP1_KVDL_CHUNKS_END \
(MLXSW_SP1_KVDL_CHUNKS_SIZE + MLXSW_SP1_KVDL_CHUNKS_BASE - 1)
#define MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE \
(MLXSW_SP1_KVDL_CHUNKS_BASE + MLXSW_SP1_KVDL_CHUNKS_SIZE)
#define MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE \
(MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE)
#define MLXSW_SP1_KVDL_LARGE_CHUNKS_END \
(MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP1_KVDL_LARGE_CHUNKS_BASE - 1)
#define MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE 1
#define MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE 32
#define MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
struct mlxsw_sp1_kvdl_part_info {
unsigned int part_index;
unsigned int start_index;
unsigned int end_index;
unsigned int alloc_size;
enum mlxsw_sp_resource_id resource_id;
};
enum mlxsw_sp1_kvdl_part_id {
MLXSW_SP1_KVDL_PART_ID_SINGLE,
MLXSW_SP1_KVDL_PART_ID_CHUNKS,
MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS,
};
#define MLXSW_SP1_KVDL_PART_INFO(id) \
[MLXSW_SP1_KVDL_PART_ID_##id] = { \
.start_index = MLXSW_SP1_KVDL_##id##_BASE, \
.end_index = MLXSW_SP1_KVDL_##id##_END, \
.alloc_size = MLXSW_SP1_KVDL_##id##_ALLOC_SIZE, \
.resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
}
static const struct mlxsw_sp1_kvdl_part_info mlxsw_sp1_kvdl_parts_info[] = {
MLXSW_SP1_KVDL_PART_INFO(SINGLE),
MLXSW_SP1_KVDL_PART_INFO(CHUNKS),
MLXSW_SP1_KVDL_PART_INFO(LARGE_CHUNKS),
};
#define MLXSW_SP1_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp1_kvdl_parts_info)
struct mlxsw_sp1_kvdl_part {
struct mlxsw_sp1_kvdl_part_info info;
unsigned long usage[0]; /* Entries */
};
struct mlxsw_sp1_kvdl {
struct mlxsw_sp1_kvdl_part *parts[MLXSW_SP1_KVDL_PARTS_INFO_LEN];
};
static struct mlxsw_sp1_kvdl_part *
mlxsw_sp1_kvdl_alloc_size_part(struct mlxsw_sp1_kvdl *kvdl,
unsigned int alloc_size)
{
struct mlxsw_sp1_kvdl_part *part, *min_part = NULL;
int i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (alloc_size <= part->info.alloc_size &&
(!min_part ||
part->info.alloc_size <= min_part->info.alloc_size))
min_part = part;
}
return min_part ?: ERR_PTR(-ENOBUFS);
}
static struct mlxsw_sp1_kvdl_part *
mlxsw_sp1_kvdl_index_part(struct mlxsw_sp1_kvdl *kvdl, u32 kvdl_index)
{
struct mlxsw_sp1_kvdl_part *part;
int i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (kvdl_index >= part->info.start_index &&
kvdl_index <= part->info.end_index)
return part;
}
return ERR_PTR(-EINVAL);
}
static u32
mlxsw_sp1_kvdl_to_kvdl_index(const struct mlxsw_sp1_kvdl_part_info *info,
unsigned int entry_index)
{
return info->start_index + entry_index * info->alloc_size;
}
static unsigned int
mlxsw_sp1_kvdl_to_entry_index(const struct mlxsw_sp1_kvdl_part_info *info,
u32 kvdl_index)
{
return (kvdl_index - info->start_index) / info->alloc_size;
}
static int mlxsw_sp1_kvdl_part_alloc(struct mlxsw_sp1_kvdl_part *part,
u32 *p_kvdl_index)
{
const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
unsigned int entry_index, nr_entries;
nr_entries = (info->end_index - info->start_index + 1) /
info->alloc_size;
entry_index = find_first_zero_bit(part->usage, nr_entries);
if (entry_index == nr_entries)
return -ENOBUFS;
__set_bit(entry_index, part->usage);
*p_kvdl_index = mlxsw_sp1_kvdl_to_kvdl_index(info, entry_index);
return 0;
}
static void mlxsw_sp1_kvdl_part_free(struct mlxsw_sp1_kvdl_part *part,
u32 kvdl_index)
{
const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
unsigned int entry_index;
entry_index = mlxsw_sp1_kvdl_to_entry_index(info, kvdl_index);
__clear_bit(entry_index, part->usage);
}
static int mlxsw_sp1_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, void *priv,
unsigned int entry_count,
u32 *p_entry_index)
{
struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
/* Find partition with smallest allocation size satisfying the
* requested size.
*/
part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
if (IS_ERR(part))
return PTR_ERR(part);
return mlxsw_sp1_kvdl_part_alloc(part, p_entry_index);
}
static void mlxsw_sp1_kvdl_free(struct mlxsw_sp *mlxsw_sp, void *priv,
int entry_index)
{
struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = mlxsw_sp1_kvdl_index_part(kvdl, entry_index);
if (IS_ERR(part))
return;
mlxsw_sp1_kvdl_part_free(part, entry_index);
}
static int mlxsw_sp1_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
void *priv, unsigned int entry_count,
unsigned int *p_alloc_size)
{
struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = mlxsw_sp1_kvdl_alloc_size_part(kvdl, entry_count);
if (IS_ERR(part))
return PTR_ERR(part);
*p_alloc_size = part->info.alloc_size;
return 0;
}
static void mlxsw_sp1_kvdl_part_update(struct mlxsw_sp1_kvdl_part *part,
struct mlxsw_sp1_kvdl_part *part_prev,
unsigned int size)
{
if (!part_prev) {
part->info.end_index = size - 1;
} else {
part->info.start_index = part_prev->info.end_index + 1;
part->info.end_index = part->info.start_index + size - 1;
}
}
static struct mlxsw_sp1_kvdl_part *
mlxsw_sp1_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp1_kvdl_part_info *info,
struct mlxsw_sp1_kvdl_part *part_prev)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl_part *part;
bool need_update = true;
unsigned int nr_entries;
size_t usage_size;
u64 resource_size;
int err;
err = devlink_resource_size_get(devlink, info->resource_id,
&resource_size);
if (err) {
need_update = false;
resource_size = info->end_index - info->start_index + 1;
}
nr_entries = div_u64(resource_size, info->alloc_size);
usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
return ERR_PTR(-ENOMEM);
memcpy(&part->info, info, sizeof(part->info));
if (need_update)
mlxsw_sp1_kvdl_part_update(part, part_prev, resource_size);
return part;
}
static void mlxsw_sp1_kvdl_part_fini(struct mlxsw_sp1_kvdl_part *part)
{
kfree(part);
}
static int mlxsw_sp1_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp1_kvdl *kvdl)
{
const struct mlxsw_sp1_kvdl_part_info *info;
struct mlxsw_sp1_kvdl_part *part_prev = NULL;
int err, i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++) {
info = &mlxsw_sp1_kvdl_parts_info[i];
kvdl->parts[i] = mlxsw_sp1_kvdl_part_init(mlxsw_sp, info,
part_prev);
if (IS_ERR(kvdl->parts[i])) {
err = PTR_ERR(kvdl->parts[i]);
goto err_kvdl_part_init;
}
part_prev = kvdl->parts[i];
}
return 0;
err_kvdl_part_init:
for (i--; i >= 0; i--)
mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
return err;
}
static void mlxsw_sp1_kvdl_parts_fini(struct mlxsw_sp1_kvdl *kvdl)
{
int i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
mlxsw_sp1_kvdl_part_fini(kvdl->parts[i]);
}
static u64 mlxsw_sp1_kvdl_part_occ(struct mlxsw_sp1_kvdl_part *part)
{
const struct mlxsw_sp1_kvdl_part_info *info = &part->info;
unsigned int nr_entries;
int bit = -1;
u64 occ = 0;
nr_entries = (info->end_index -
info->start_index + 1) /
info->alloc_size;
while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
< nr_entries)
occ += info->alloc_size;
return occ;
}
static u64 mlxsw_sp1_kvdl_occ_get(void *priv)
{
const struct mlxsw_sp1_kvdl *kvdl = priv;
u64 occ = 0;
int i;
for (i = 0; i < MLXSW_SP1_KVDL_PARTS_INFO_LEN; i++)
occ += mlxsw_sp1_kvdl_part_occ(kvdl->parts[i]);
return occ;
}
static u64 mlxsw_sp1_kvdl_single_occ_get(void *priv)
{
const struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_SINGLE];
return mlxsw_sp1_kvdl_part_occ(part);
}
static u64 mlxsw_sp1_kvdl_chunks_occ_get(void *priv)
{
const struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_CHUNKS];
return mlxsw_sp1_kvdl_part_occ(part);
}
static u64 mlxsw_sp1_kvdl_large_chunks_occ_get(void *priv)
{
const struct mlxsw_sp1_kvdl *kvdl = priv;
struct mlxsw_sp1_kvdl_part *part;
part = kvdl->parts[MLXSW_SP1_KVDL_PART_ID_LARGE_CHUNKS];
return mlxsw_sp1_kvdl_part_occ(part);
}
static int mlxsw_sp1_kvdl_init(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl *kvdl = priv;
int err;
err = mlxsw_sp1_kvdl_parts_init(mlxsw_sp, kvdl);
if (err)
return err;
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR,
mlxsw_sp1_kvdl_occ_get,
kvdl);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
mlxsw_sp1_kvdl_single_occ_get,
kvdl);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
mlxsw_sp1_kvdl_chunks_occ_get,
kvdl);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
mlxsw_sp1_kvdl_large_chunks_occ_get,
kvdl);
return 0;
}
static void mlxsw_sp1_kvdl_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp1_kvdl *kvdl = priv;
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp1_kvdl_parts_fini(kvdl);
}
const struct mlxsw_sp_kvdl_ops mlxsw_sp1_kvdl_ops = {
.priv_size = sizeof(struct mlxsw_sp1_kvdl),
.init = mlxsw_sp1_kvdl_init,
.fini = mlxsw_sp1_kvdl_fini,
.alloc = mlxsw_sp1_kvdl_alloc,
.free = mlxsw_sp1_kvdl_free,
.alloc_size_query = mlxsw_sp1_kvdl_alloc_size_query,
};
int mlxsw_sp1_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
static struct devlink_resource_size_params size_params;
u32 kvdl_max_size;
int err;
kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_SINGLE_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
MLXSW_SP1_KVDL_SINGLE_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
MLXSW_SP1_KVDL_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP1_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP1_KVDL_LARGE_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
return err;
}
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
* Copyright (c) 2016 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016-2018 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
......@@ -33,422 +33,69 @@
*/
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include "spectrum.h"
#define MLXSW_SP_KVDL_SINGLE_BASE 0
#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
#define MLXSW_SP_KVDL_SINGLE_END \
(MLXSW_SP_KVDL_SINGLE_SIZE + MLXSW_SP_KVDL_SINGLE_BASE - 1)
#define MLXSW_SP_KVDL_CHUNKS_BASE \
(MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
#define MLXSW_SP_KVDL_CHUNKS_SIZE 49152
#define MLXSW_SP_KVDL_CHUNKS_END \
(MLXSW_SP_KVDL_CHUNKS_SIZE + MLXSW_SP_KVDL_CHUNKS_BASE - 1)
#define MLXSW_SP_KVDL_LARGE_CHUNKS_BASE \
(MLXSW_SP_KVDL_CHUNKS_BASE + MLXSW_SP_KVDL_CHUNKS_SIZE)
#define MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE \
(MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_LARGE_CHUNKS_BASE)
#define MLXSW_SP_KVDL_LARGE_CHUNKS_END \
(MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE + MLXSW_SP_KVDL_LARGE_CHUNKS_BASE - 1)
#define MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE 1
#define MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE 32
#define MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE 512
struct mlxsw_sp_kvdl_part_info {
unsigned int part_index;
unsigned int start_index;
unsigned int end_index;
unsigned int alloc_size;
enum mlxsw_sp_resource_id resource_id;
};
enum mlxsw_sp_kvdl_part_id {
MLXSW_SP_KVDL_PART_ID_SINGLE,
MLXSW_SP_KVDL_PART_ID_CHUNKS,
MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS,
};
#define MLXSW_SP_KVDL_PART_INFO(id) \
[MLXSW_SP_KVDL_PART_ID_##id] = { \
.start_index = MLXSW_SP_KVDL_##id##_BASE, \
.end_index = MLXSW_SP_KVDL_##id##_END, \
.alloc_size = MLXSW_SP_KVDL_##id##_ALLOC_SIZE, \
.resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_##id, \
}
static const struct mlxsw_sp_kvdl_part_info mlxsw_sp_kvdl_parts_info[] = {
MLXSW_SP_KVDL_PART_INFO(SINGLE),
MLXSW_SP_KVDL_PART_INFO(CHUNKS),
MLXSW_SP_KVDL_PART_INFO(LARGE_CHUNKS),
};
#define MLXSW_SP_KVDL_PARTS_INFO_LEN ARRAY_SIZE(mlxsw_sp_kvdl_parts_info)
struct mlxsw_sp_kvdl_part {
struct mlxsw_sp_kvdl_part_info info;
unsigned long usage[0]; /* Entries */
};
struct mlxsw_sp_kvdl {
struct mlxsw_sp_kvdl_part *parts[MLXSW_SP_KVDL_PARTS_INFO_LEN];
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
unsigned long priv[0];
/* priv has to be always the last item */
};
static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_alloc_size_part(struct mlxsw_sp_kvdl *kvdl,
unsigned int alloc_size)
{
struct mlxsw_sp_kvdl_part *part, *min_part = NULL;
int i;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (alloc_size <= part->info.alloc_size &&
(!min_part ||
part->info.alloc_size <= min_part->info.alloc_size))
min_part = part;
}
return min_part ?: ERR_PTR(-ENOBUFS);
}
static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_index_part(struct mlxsw_sp_kvdl *kvdl, u32 kvdl_index)
{
struct mlxsw_sp_kvdl_part *part;
int i;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
part = kvdl->parts[i];
if (kvdl_index >= part->info.start_index &&
kvdl_index <= part->info.end_index)
return part;
}
return ERR_PTR(-EINVAL);
}
static u32
mlxsw_sp_entry_index_kvdl_index(const struct mlxsw_sp_kvdl_part_info *info,
unsigned int entry_index)
{
return info->start_index + entry_index * info->alloc_size;
}
static unsigned int
mlxsw_sp_kvdl_index_entry_index(const struct mlxsw_sp_kvdl_part_info *info,
u32 kvdl_index)
{
return (kvdl_index - info->start_index) / info->alloc_size;
}
static int mlxsw_sp_kvdl_part_alloc(struct mlxsw_sp_kvdl_part *part,
u32 *p_kvdl_index)
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int entry_index, nr_entries;
nr_entries = (info->end_index - info->start_index + 1) /
info->alloc_size;
entry_index = find_first_zero_bit(part->usage, nr_entries);
if (entry_index == nr_entries)
return -ENOBUFS;
__set_bit(entry_index, part->usage);
const struct mlxsw_sp_kvdl_ops *kvdl_ops = mlxsw_sp->kvdl_ops;
struct mlxsw_sp_kvdl *kvdl;
int err;
*p_kvdl_index = mlxsw_sp_entry_index_kvdl_index(info, entry_index);
kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl) + kvdl_ops->priv_size,
GFP_KERNEL);
if (!kvdl)
return -ENOMEM;
kvdl->kvdl_ops = kvdl_ops;
mlxsw_sp->kvdl = kvdl;
err = kvdl_ops->init(mlxsw_sp, kvdl->priv);
if (err)
goto err_init;
return 0;
err_init:
kfree(kvdl);
return err;
}
static void mlxsw_sp_kvdl_part_free(struct mlxsw_sp_kvdl_part *part,
u32 kvdl_index)
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int entry_index;
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
entry_index = mlxsw_sp_kvdl_index_entry_index(info, kvdl_index);
__clear_bit(entry_index, part->usage);
kvdl->kvdl_ops->fini(mlxsw_sp, kvdl->priv);
kfree(kvdl);
}
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
u32 *p_entry_index)
{
struct mlxsw_sp_kvdl_part *part;
/* Find partition with smallest allocation size satisfying the
* requested size.
*/
part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
if (IS_ERR(part))
return PTR_ERR(part);
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
return mlxsw_sp_kvdl_part_alloc(part, p_entry_index);
return kvdl->kvdl_ops->alloc(mlxsw_sp, kvdl->priv,
entry_count, p_entry_index);
}
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
{
struct mlxsw_sp_kvdl_part *part;
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
part = mlxsw_sp_kvdl_index_part(mlxsw_sp->kvdl, entry_index);
if (IS_ERR(part))
return;
mlxsw_sp_kvdl_part_free(part, entry_index);
kvdl->kvdl_ops->free(mlxsw_sp, kvdl->priv, entry_index);
}
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size)
{
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp_kvdl_alloc_size_part(mlxsw_sp->kvdl, entry_count);
if (IS_ERR(part))
return PTR_ERR(part);
*p_alloc_size = part->info.alloc_size;
return 0;
}
static void mlxsw_sp_kvdl_part_update(struct mlxsw_sp_kvdl_part *part,
struct mlxsw_sp_kvdl_part *part_prev,
unsigned int size)
{
if (!part_prev) {
part->info.end_index = size - 1;
} else {
part->info.start_index = part_prev->info.end_index + 1;
part->info.end_index = part->info.start_index + size - 1;
}
}
static struct mlxsw_sp_kvdl_part *
mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_kvdl_part_info *info,
struct mlxsw_sp_kvdl_part *part_prev)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_kvdl_part *part;
bool need_update = true;
unsigned int nr_entries;
size_t usage_size;
u64 resource_size;
int err;
err = devlink_resource_size_get(devlink, info->resource_id,
&resource_size);
if (err) {
need_update = false;
resource_size = info->end_index - info->start_index + 1;
}
nr_entries = div_u64(resource_size, info->alloc_size);
usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
return ERR_PTR(-ENOMEM);
memcpy(&part->info, info, sizeof(part->info));
if (need_update)
mlxsw_sp_kvdl_part_update(part, part_prev, resource_size);
return part;
}
static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp_kvdl_part *part)
{
kfree(part);
}
static int mlxsw_sp_kvdl_parts_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
const struct mlxsw_sp_kvdl_part_info *info;
struct mlxsw_sp_kvdl_part *part_prev = NULL;
int err, i;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++) {
info = &mlxsw_sp_kvdl_parts_info[i];
kvdl->parts[i] = mlxsw_sp_kvdl_part_init(mlxsw_sp, info,
part_prev);
if (IS_ERR(kvdl->parts[i])) {
err = PTR_ERR(kvdl->parts[i]);
goto err_kvdl_part_init;
}
part_prev = kvdl->parts[i];
}
return 0;
err_kvdl_part_init:
for (i--; i >= 0; i--)
mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
return err;
}
static void mlxsw_sp_kvdl_parts_fini(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl = mlxsw_sp->kvdl;
int i;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
mlxsw_sp_kvdl_part_fini(kvdl->parts[i]);
}
static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
{
const struct mlxsw_sp_kvdl_part_info *info = &part->info;
unsigned int nr_entries;
int bit = -1;
u64 occ = 0;
nr_entries = (info->end_index -
info->start_index + 1) /
info->alloc_size;
while ((bit = find_next_bit(part->usage, nr_entries, bit + 1))
< nr_entries)
occ += info->alloc_size;
return occ;
}
static u64 mlxsw_sp_kvdl_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
u64 occ = 0;
int i;
for (i = 0; i < MLXSW_SP_KVDL_PARTS_INFO_LEN; i++)
occ += mlxsw_sp_kvdl_part_occ(mlxsw_sp->kvdl->parts[i]);
return occ;
}
static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
return mlxsw_sp_kvdl_part_occ(part);
}
static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
{
const struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_kvdl_part *part;
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
return mlxsw_sp_kvdl_part_occ(part);
}
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
static struct devlink_resource_size_params size_params;
u32 kvdl_max_size;
int err;
kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP_KVDL_SINGLE_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
MLXSW_SP_KVDL_SINGLE_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP_KVDL_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
MLXSW_SP_KVDL_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
if (err)
return err;
devlink_resource_size_params_init(&size_params, 0, kvdl_max_size,
MLXSW_SP_KVDL_LARGE_CHUNKS_ALLOC_SIZE,
DEVLINK_RESOURCE_UNIT_ENTRY);
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
MLXSW_SP_RESOURCE_KVD_LINEAR,
&size_params);
return err;
}
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_kvdl *kvdl;
int err;
kvdl = kzalloc(sizeof(*mlxsw_sp->kvdl), GFP_KERNEL);
if (!kvdl)
return -ENOMEM;
mlxsw_sp->kvdl = kvdl;
err = mlxsw_sp_kvdl_parts_init(mlxsw_sp);
if (err)
goto err_kvdl_parts_init;
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR,
mlxsw_sp_kvdl_occ_get,
mlxsw_sp);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
mlxsw_sp_kvdl_single_occ_get,
mlxsw_sp);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
mlxsw_sp_kvdl_chunks_occ_get,
mlxsw_sp);
devlink_resource_occ_get_register(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
mlxsw_sp_kvdl_large_chunks_occ_get,
mlxsw_sp);
return 0;
err_kvdl_parts_init:
kfree(mlxsw_sp->kvdl);
return err;
}
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
{
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_KVD_LINEAR);
mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
kfree(mlxsw_sp->kvdl);
return kvdl->kvdl_ops->alloc_size_query(mlxsw_sp, kvdl->priv,
entry_count, p_alloc_size);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment