Commit 71920a77 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'net-ipa-validation-cleanup'

Alex Elder says:

====================
net: ipa: validation cleanup

This series gathers a set of IPA driver cleanups, mostly involving
code that ensures certain things are known to be correct *early*
(either at build or initializatin time), so they can be assumed good
during normal operation.

The first removes three constant symbols, by making a (reasonable)
assumption that a routing table consists of entries for the modem
followed by entries for the AP, with no unused entries between them.

The second removes two checks that are redundant (they verify the
sizes of two memory regions are in range, which will have been done
earlier for all regions).

The third adds some new checks to routing and filter tables that
can be done at "init time" (without requiring any access to IPA
hardware).

The fourth moves a check that routing and filter table addresses can
be encoded within certain IPA immediate commands, so it's performed
earlier; the checks can be done without touching IPA hardware.  The
fifth moves some other command-related checks earlier, for the same
reason.

The sixth removes the definition ipa_table_valid(), because what it
does has become redundant.  Finally, the last patch moves two more
validation calls so they're done very early in the probe process.
This will be required by some upcoming patches, which will record
the size of the routing and filter tables at this time so they're
available for subsequent initialization.
====================

Link: https://lore.kernel.org/r/20221021191340.4187935-1-elder@linaro.orgSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 34802d06 73da9cac
......@@ -171,7 +171,8 @@ static void ipa_cmd_validate_build(void)
}
/* Validate a memory region holding a table */
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
bool route)
{
u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
......@@ -197,21 +198,11 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
return false;
}
/* Entire memory range must fit within IPA-local memory */
if (mem->offset > ipa->mem_size ||
mem->size > ipa->mem_size - mem->offset) {
dev_err(dev, "%s table region out of range\n", table);
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
mem->offset, mem->size, ipa->mem_size);
return false;
}
return true;
}
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_valid(struct ipa *ipa)
static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct ipa_mem *mem;
......@@ -257,15 +248,6 @@ static bool ipa_cmd_header_valid(struct ipa *ipa)
return false;
}
/* Make sure the entire combined area fits in IPA memory */
if (size > ipa->mem_size || offset > ipa->mem_size - size) {
dev_err(dev, "header table region out of range\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
offset, size, ipa->mem_size);
return false;
}
return true;
}
......@@ -336,26 +318,11 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
return true;
}
bool ipa_cmd_data_valid(struct ipa *ipa)
{
if (!ipa_cmd_header_valid(ipa))
return false;
if (!ipa_cmd_register_write_valid(ipa))
return false;
return true;
}
int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
/* This is as good a place as any to validate build constants */
ipa_cmd_validate_build();
/* Command payloads are allocated one at a time, but a single
* transaction can require up to the maximum supported by the
* channel; treat them as if they were allocated all at once.
......@@ -655,3 +622,17 @@ struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
tre_count, DMA_NONE);
}
/* Init function for immediate commands; there is no ipa_cmd_exit() */
int ipa_cmd_init(struct ipa *ipa)
{
ipa_cmd_validate_build();
if (!ipa_cmd_header_init_local_valid(ipa))
return -EINVAL;
if (!ipa_cmd_register_write_valid(ipa))
return -EINVAL;
return 0;
}
......@@ -47,15 +47,15 @@ enum ipa_cmd_opcode {
};
/**
* ipa_cmd_table_valid() - Validate a memory region holding a table
* ipa_cmd_table_init_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
* @mem: - IPA memory region descriptor
* @route: - Whether the region holds a route or filter table
*
* Return: true if region is valid, false otherwise
*/
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
bool route);
bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
bool route);
/**
* ipa_cmd_data_valid() - Validate command-realted configuration is valid
......@@ -162,4 +162,14 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
*/
struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
/**
* ipa_cmd_init() - Initialize IPA immediate commands
* @ipa: - IPA pointer
*
* Return: 0 if successful, or a negative error code
*
* There is no need for a matching ipa_cmd_exit() function.
*/
int ipa_cmd_init(struct ipa *ipa);
#endif /* _IPA_CMD_H_ */
......@@ -366,14 +366,6 @@ int ipa_mem_config(struct ipa *ipa)
while (--canary_count);
}
/* Make sure filter and route table memory regions are valid */
if (!ipa_table_valid(ipa))
goto err_dma_free;
/* Validate memory-related properties relevant to immediate commands */
if (!ipa_cmd_data_valid(ipa))
goto err_dma_free;
/* Verify the microcontroller ring alignment (if defined) */
mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
if (mem && mem->offset % 1024) {
......@@ -625,6 +617,12 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
ipa->mem_count = mem_data->local_count;
ipa->mem = mem_data->local;
/* Check the route and filter table memory regions */
if (!ipa_table_mem_valid(ipa, 0))
return -EINVAL;
if (!ipa_table_mem_valid(ipa, IPA_ROUTE_MODEM_COUNT))
return -EINVAL;
ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "error %d setting DMA mask\n", ret);
......
......@@ -106,12 +106,6 @@
* ----------------------
*/
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
#define IPA_ROUTE_AP_COUNT \
(IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
/* Filter or route rules consist of a set of 32-bit values followed by a
* 32-bit all-zero rule list terminator. The "zero rule" is simply an
* all-zero rule followed by the list terminator.
......@@ -140,63 +134,25 @@ static void ipa_table_validate_build(void)
BUILD_BUG_ON(IPA_ROUTE_COUNT_MAX > 32);
/* The modem must be allotted at least one route table entry */
BUILD_BUG_ON(!IPA_ROUTE_MODEM_COUNT);
/* But it can't have more than what is available */
BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT > IPA_ROUTE_COUNT_MAX);
/* AP must too, but we can't use more than what is available */
BUILD_BUG_ON(IPA_ROUTE_MODEM_COUNT >= IPA_ROUTE_COUNT_MAX);
}
static bool
ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
static const struct ipa_mem *
ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
{
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
struct device *dev = &ipa->pdev->dev;
u32 size;
if (route)
size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
else
size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
if (!ipa_cmd_table_valid(ipa, mem, route))
return false;
/* mem->size >= size is sufficient, but we'll demand more */
if (mem->size == size)
return true;
/* Hashed table regions can be zero size if hashing is not supported */
if (ipa_table_hash_support(ipa) && !mem->size)
return true;
dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
route ? "route" : "filter", mem_id, mem->size, size);
return false;
}
/* Verify the filter and route table memory regions are the expected size */
bool ipa_table_valid(struct ipa *ipa)
{
bool valid;
valid = ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER, false);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER, false);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE, true);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE, true);
if (!ipa_table_hash_support(ipa))
return valid;
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_FILTER_HASHED,
false);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_FILTER_HASHED,
false);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V4_ROUTE_HASHED,
true);
valid = valid && ipa_table_valid_one(ipa, IPA_MEM_V6_ROUTE_HASHED,
true);
return valid;
enum ipa_mem_id mem_id;
mem_id = filter ? hashed ? ipv6 ? IPA_MEM_V6_FILTER_HASHED
: IPA_MEM_V4_FILTER_HASHED
: ipv6 ? IPA_MEM_V6_FILTER
: IPA_MEM_V4_FILTER
: hashed ? ipv6 ? IPA_MEM_V6_ROUTE_HASHED
: IPA_MEM_V4_ROUTE_HASHED
: ipv6 ? IPA_MEM_V6_ROUTE
: IPA_MEM_V4_ROUTE;
return ipa_mem_find(ipa, mem_id);
}
bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_map)
......@@ -342,11 +298,11 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
}
if (modem) {
first = IPA_ROUTE_MODEM_MIN;
first = 0;
count = IPA_ROUTE_MODEM_COUNT;
} else {
first = IPA_ROUTE_AP_MIN;
count = IPA_ROUTE_AP_COUNT;
first = IPA_ROUTE_MODEM_COUNT;
count = IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT;
}
ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
......@@ -561,8 +517,7 @@ static void ipa_filter_config(struct ipa *ipa, bool modem)
static bool ipa_route_id_modem(u32 route_id)
{
return route_id >= IPA_ROUTE_MODEM_MIN &&
route_id <= IPA_ROUTE_MODEM_MIN + IPA_ROUTE_MODEM_COUNT - 1;
return route_id < IPA_ROUTE_MODEM_COUNT;
}
/**
......@@ -611,8 +566,81 @@ void ipa_table_config(struct ipa *ipa)
ipa_route_config(ipa, true);
}
/*
* Initialize a coherent DMA allocation containing initialized filter and
/* Zero modem_route_count means filter table memory check */
bool ipa_table_mem_valid(struct ipa *ipa, bool modem_route_count)
{
bool hash_support = ipa_table_hash_support(ipa);
bool filter = !modem_route_count;
const struct ipa_mem *mem_hashed;
const struct ipa_mem *mem_ipv4;
const struct ipa_mem *mem_ipv6;
u32 count;
/* IPv4 and IPv6 non-hashed tables are expected to be defined and
* have the same size. Both must have at least two entries (and
* would normally have more than that).
*/
mem_ipv4 = ipa_table_mem(ipa, filter, false, false);
if (!mem_ipv4)
return false;
mem_ipv6 = ipa_table_mem(ipa, filter, false, true);
if (!mem_ipv6)
return false;
if (mem_ipv4->size != mem_ipv6->size)
return false;
/* Table offset and size must fit in TABLE_INIT command fields */
if (!ipa_cmd_table_init_valid(ipa, mem_ipv4, !filter))
return false;
/* Make sure the regions are big enough */
count = mem_ipv4->size / sizeof(__le64);
if (count < 2)
return false;
if (filter) {
/* Filter tables must able to hold the endpoint bitmap plus
* an entry for each endpoint that supports filtering
*/
if (count < 1 + hweight32(ipa->filter_map))
return false;
} else {
/* Routing tables must be able to hold all modem entries,
* plus at least one entry for the AP.
*/
if (count < modem_route_count + 1)
return false;
}
/* If hashing is supported, hashed tables are expected to be defined,
* and have the same size as non-hashed tables. If hashing is not
* supported, hashed tables are expected to have zero size (or not
* be defined).
*/
mem_hashed = ipa_table_mem(ipa, filter, true, false);
if (hash_support) {
if (!mem_hashed || mem_hashed->size != mem_ipv4->size)
return false;
} else {
if (mem_hashed && mem_hashed->size)
return false;
}
/* Same check for IPv6 tables */
mem_hashed = ipa_table_mem(ipa, filter, true, true);
if (hash_support) {
if (!mem_hashed || mem_hashed->size != mem_ipv6->size)
return false;
} else {
if (mem_hashed && mem_hashed->size)
return false;
}
return true;
}
/* Initialize a coherent DMA allocation containing initialized filter and
* route table data. This is used when initializing or resetting the IPA
* filter or route table.
*
......
......@@ -19,14 +19,6 @@ struct ipa;
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
/**
* ipa_table_valid() - Validate route and filter table memory regions
* @ipa: IPA pointer
*
* Return: true if all regions are valid, false otherwise
*/
bool ipa_table_valid(struct ipa *ipa);
/**
* ipa_filter_map_valid() - Validate a filter table endpoint bitmap
* @ipa: IPA pointer
......@@ -86,4 +78,11 @@ int ipa_table_init(struct ipa *ipa);
*/
void ipa_table_exit(struct ipa *ipa);
/**
* ipa_table_mem_valid() - Validate sizes of table memory regions
* @ipa: IPA pointer
* @modem_route_count: Number of modem route table entries
*/
bool ipa_table_mem_valid(struct ipa *ipa, bool modem_route_count);
#endif /* _IPA_TABLE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment