Commit eb288cbd authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-ipa-start-adding-ipa-v5-0-functionality'

Alex Elder says:

====================
net: ipa: start adding IPA v5.0 functionality

The biggest change for IPA v5.0 is that it supports more than 32
endpoints.  However there are two other unrelated changes:
  - The STATS_TETHERING memory region is not required
  - Filter tables no longer support a "global" filter

Beyond this, refactoring some code makes supporting more than 32
endpoints (in an upcoming series) easier.  So this series includes
a few other changes (not in this order):
  - The maximum endpoint ID in use is determined during config
  - Loops over all endpoints only involve those in use
  - Endpoints IDs and their directions are checked for validity
    differently to simplify comparison against the maximum
====================

Link: https://lore.kernel.org/r/20221027122632.488694-1-elder@linaro.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 58ba4263 b7aaff0b
......@@ -61,9 +61,10 @@ struct ipa_interrupt;
* @zero_addr: DMA address of preallocated zero-filled memory
* @zero_virt: Virtual address of preallocated zero-filled memory
* @zero_size: Size (bytes) of preallocated zero-filled memory
* @endpoint_count: Number of endpoints represented by bit masks below
* @defined: Bit mask indicating endpoints defined in config data
* @available: Bit mask indicating endpoints hardware supports
* @filter_map: Bit mask indicating endpoints that support filtering
* @initialized: Bit mask indicating endpoints initialized
* @set_up: Bit mask indicating endpoints set up
* @enabled: Bit mask indicating endpoints enabled
* @modem_tx_count: Number of defined modem TX endoints
......@@ -117,9 +118,10 @@ struct ipa {
size_t zero_size;
/* Bit masks indicating endpoint state */
u32 available; /* supported by hardware */
u32 endpoint_count;
u32 defined; /* Defined in configuration data */
u32 available; /* Supported by hardware */
u32 filter_map;
u32 initialized;
u32 set_up;
u32 enabled;
......
......@@ -243,42 +243,47 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
}
static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
/* Validate endpoint configuration data. Return max defined endpoint ID */
static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
u32 max;
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
count, IPA_ENDPOINT_COUNT);
return false;
return 0;
}
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
return false;
return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
dev_err(dev, "LAN RX endpoint not defined\n");
return false;
return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
dev_err(dev, "AP->modem TX endpoint not defined\n");
return false;
return 0;
}
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
dev_err(dev, "AP<-modem RX endpoint not defined\n");
return false;
return 0;
}
for (name = 0; name < count; name++, dp++)
max = 0;
for (name = 0; name < count; name++, dp++) {
if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
return false;
return 0;
max = max_t(u32, max, dp->endpoint_id);
}
return true;
return max;
}
/* Allocate a transaction to use on a non-command endpoint */
......@@ -426,10 +431,10 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
*/
void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{
u32 endpoint_id;
u32 endpoint_id = 0;
for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
while (endpoint_id < ipa->endpoint_count) {
struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
if (endpoint->ee_id != GSI_EE_MODEM)
continue;
......@@ -448,7 +453,7 @@ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
/* Reset all modem endpoints to use the default exception endpoint */
int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
{
u32 initialized = ipa->initialized;
u32 defined = ipa->defined;
struct gsi_trans *trans;
u32 count;
......@@ -463,13 +468,13 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
return -EBUSY;
}
while (initialized) {
u32 endpoint_id = __ffs(initialized);
while (defined) {
u32 endpoint_id = __ffs(defined);
struct ipa_endpoint *endpoint;
const struct ipa_reg *reg;
u32 offset;
initialized ^= BIT(endpoint_id);
defined ^= BIT(endpoint_id);
/* We only reset modem TX endpoints */
endpoint = &ipa->endpoint[endpoint_id];
......@@ -1008,10 +1013,10 @@ static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
{
u32 i;
u32 endpoint_id = 0;
for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
struct ipa_endpoint *endpoint = &ipa->endpoint[i];
while (endpoint_id < ipa->endpoint_count) {
struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
......@@ -1812,13 +1817,13 @@ static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_setup(struct ipa *ipa)
{
u32 initialized = ipa->initialized;
u32 defined = ipa->defined;
ipa->set_up = 0;
while (initialized) {
u32 endpoint_id = __ffs(initialized);
while (defined) {
u32 endpoint_id = __ffs(defined);
initialized ^= BIT(endpoint_id);
defined ^= BIT(endpoint_id);
ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
}
......@@ -1842,15 +1847,14 @@ int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
const struct ipa_reg *reg;
u32 initialized;
u32 tx_count;
u32 rx_count;
u32 rx_base;
u32 rx_mask;
u32 tx_mask;
int ret = 0;
u32 max;
u32 defined;
u32 limit;
u32 val;
/* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
/* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
* Furthermore, the endpoints were not grouped such that TX
* endpoint numbers started with 0 and RX endpoints had numbers
* higher than all TX endpoints, so we can't do the simple
......@@ -1866,51 +1870,59 @@ int ipa_endpoint_config(struct ipa *ipa)
}
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number we support.
* the highest one doesn't exceed the number supported by software.
*/
reg = ipa_reg(ipa, FLAVOR_0);
val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* Our RX is an IPA producer */
/* Our RX is an IPA producer; our TX is an IPA consumer. */
tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
if (max > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints (%u > %u)\n",
max, IPA_ENDPOINT_MAX);
limit = rx_base + rx_count;
if (limit > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints, %u > %u\n",
limit, IPA_ENDPOINT_MAX);
return -EINVAL;
}
rx_mask = GENMASK(max - 1, rx_base);
/* Our TX is an IPA consumer */
max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
tx_mask = GENMASK(max - 1, 0);
/* Mark all supported RX and TX endpoints as available */
ipa->available = GENMASK(limit - 1, rx_base) | GENMASK(tx_count - 1, 0);
ipa->available = rx_mask | tx_mask;
defined = ipa->defined;
while (defined) {
u32 endpoint_id = __ffs(defined);
struct ipa_endpoint *endpoint;
/* Check for initialized endpoints not supported by the hardware */
if (ipa->initialized & ~ipa->available) {
dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
ipa->initialized & ~ipa->available);
ret = -EINVAL; /* Report other errors too */
}
defined ^= BIT(endpoint_id);
initialized = ipa->initialized;
while (initialized) {
u32 endpoint_id = __ffs(initialized);
struct ipa_endpoint *endpoint;
if (endpoint_id >= limit) {
dev_err(dev, "invalid endpoint id, %u > %u\n",
endpoint_id, limit - 1);
return -EINVAL;
}
initialized ^= BIT(endpoint_id);
if (!(BIT(endpoint_id) & ipa->available)) {
dev_err(dev, "unavailable endpoint id %u\n",
endpoint_id);
return -EINVAL;
}
/* Make sure it's pointing in the right direction */
endpoint = &ipa->endpoint[endpoint_id];
if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
dev_err(dev, "endpoint id %u wrong direction\n",
endpoint_id);
ret = -EINVAL;
if (endpoint->toward_ipa) {
if (endpoint_id < tx_count)
continue;
} else if (endpoint_id >= rx_base) {
continue;
}
dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
return -EINVAL;
}
return ret;
return 0;
}
void ipa_endpoint_deconfig(struct ipa *ipa)
......@@ -1936,24 +1948,24 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
endpoint->toward_ipa = data->toward_ipa;
endpoint->config = data->endpoint.config;
ipa->initialized |= BIT(endpoint->endpoint_id);
ipa->defined |= BIT(endpoint->endpoint_id);
}
static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
{
endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
endpoint->ipa->defined &= ~BIT(endpoint->endpoint_id);
memset(endpoint, 0, sizeof(*endpoint));
}
void ipa_endpoint_exit(struct ipa *ipa)
{
u32 initialized = ipa->initialized;
u32 defined = ipa->defined;
while (initialized) {
u32 endpoint_id = __fls(initialized);
while (defined) {
u32 endpoint_id = __fls(defined);
initialized ^= BIT(endpoint_id);
defined ^= BIT(endpoint_id);
ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
}
......@@ -1970,10 +1982,12 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
if (!ipa_endpoint_data_valid(ipa, count, data))
/* Number of endpoints is one more than the maximum ID */
ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
if (!ipa->endpoint_count)
return 0; /* Error */
ipa->initialized = 0;
ipa->defined = 0;
filter_map = 0;
for (name = 0; name < count; name++, data++) {
......
......@@ -198,9 +198,12 @@ static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
case IPA_MEM_PDN_CONFIG:
case IPA_MEM_STATS_QUOTA_MODEM:
case IPA_MEM_STATS_TETHERING:
return ipa->version >= IPA_VERSION_4_0;
case IPA_MEM_STATS_TETHERING:
return ipa->version >= IPA_VERSION_4_0 &&
ipa->version != IPA_VERSION_5_0;
default:
return false; /* Anything else is optional */
}
......
......@@ -32,8 +32,8 @@
* endian 64-bit "slot" that holds the address of a rule definition. (The
* size of these slots is 64 bits regardless of the host DMA address size.)
*
* Separate tables (both filter and route) used for IPv4 and IPv6. There
* are normally another set of "hashed" filter and route tables, which are
* Separate tables (both filter and route) are used for IPv4 and IPv6. There
* is normally another set of "hashed" filter and route tables, which are
* used with a hash of message metadata. Hashed operation is not supported
* by all IPA hardware (IPA v4.2 doesn't support hashed tables).
*
......@@ -51,19 +51,32 @@
* Each filter rule is associated with an AP or modem TX endpoint, though
* not all TX endpoints support filtering. The first 64-bit slot in a
* filter table is a bitmap indicating which endpoints have entries in
* the table. The low-order bit (bit 0) in this bitmap represents a
* special global filter, which applies to all traffic. This is not
* used in the current code. Bit 1, if set, indicates that there is an
* entry (i.e. slot containing a system address referring to a rule) for
* endpoint 0 in the table. Bit 3, if set, indicates there is an entry
* for endpoint 2, and so on. Space is set aside in IPA local memory to
* hold as many filter table entries as might be required, but typically
* they are not all used.
* the table. Each set bit in this bitmap indicates the presence of the
* address of a filter rule in the memory following the bitmap. Until IPA
* v5.0, the low-order bit (bit 0) in this bitmap represents a special
* global filter, which applies to all traffic. Otherwise the position of
* each set bit represents an endpoint for which a filter rule is defined.
*
* The global rule is not used in current code, and support for it is
* removed starting at IPA v5.0. For IPA v5.0+, the endpoint bitmap
* position defines the endpoint ID--i.e. if bit 1 is set in the endpoint
* bitmap, endpoint 1 has a filter rule. Older versions of IPA represent
* the presence of a filter rule for endpoint X by bit (X + 1) being set.
* I.e., bit 1 set indicates the presence of a filter rule for endpoint 0,
* and bit 3 set means there is a filter rule present for endpoint 2.
*
* Each filter table entry has the address of a set of equations that
* implement a filter rule. So following the endpoint bitmap there
* will be such an address/entry for each endpoint with a set bit in
* the bitmap.
*
* The AP initializes all entries in a filter table to refer to a "zero"
* entry. Once initialized the modem and AP update the entries for
* endpoints they "own" directly. Currently the AP does not use the
* IPA filtering functionality.
* rule. Once initialized, the modem and AP update the entries for
* endpoints they "own" directly. Currently the AP does not use the IPA
* filtering functionality.
*
* This diagram shows an example of a filter table with an endpoint
* bitmap as defined prior to IPA v5.0.
*
* IPA Filter Table
* ----------------------
......@@ -658,12 +671,6 @@ bool ipa_table_mem_valid(struct ipa *ipa, bool filter)
* when a route table is initialized or reset, its entries are made to refer
* to the zero rule. The zero rule is shared for route and filter tables.
*
* Note that the IPA hardware requires a filter or route rule address to be
* aligned on a 128 byte boundary. The coherent DMA buffer we allocate here
* has a minimum alignment, and we place the zero rule at the base of that
* allocated space. In ipa_table_init() we verify the minimum DMA allocation
* meets our requirement.
*
* +-------------------+
* --> | zero rule |
* / |-------------------|
......@@ -708,12 +715,16 @@ int ipa_table_init(struct ipa *ipa)
/* First slot is the zero rule */
*virt++ = 0;
/* Next is the filter table bitmap. The "soft" bitmap value
* must be converted to the hardware representation by shifting
* it left one position. (Bit 0 repesents global filtering,
* which is possible but not used.)
/* Next is the filter table bitmap. The "soft" bitmap value might
* need to be converted to the hardware representation by shifting
* it left one position. Prior to IPA v5.0, bit 0 repesents global
* filtering, which is possible but not used. IPA v5.0+ eliminated
* that option, so there's no shifting required.
*/
*virt++ = cpu_to_le64((u64)ipa->filter_map << 1);
if (ipa->version < IPA_VERSION_5_0)
*virt++ = cpu_to_le64((u64)ipa->filter_map << 1);
else
*virt++ = cpu_to_le64((u64)ipa->filter_map);
/* All the rest contain the DMA address of the zero rule */
le_addr = cpu_to_le64(addr);
......
......@@ -19,6 +19,7 @@
* @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
* @IPA_VERSION_5_0: IPA version 5.0/GSI version 3.0
* @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
......@@ -36,6 +37,7 @@ enum ipa_version {
IPA_VERSION_4_7,
IPA_VERSION_4_9,
IPA_VERSION_4_11,
IPA_VERSION_5_0,
IPA_VERSION_COUNT, /* Last; not a version */
};
......@@ -48,6 +50,7 @@ static inline bool ipa_version_supported(enum ipa_version version)
case IPA_VERSION_4_5:
case IPA_VERSION_4_9:
case IPA_VERSION_4_11:
case IPA_VERSION_5_0:
return true;
default:
return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment