Commit 76cf404c authored by David S. Miller's avatar David S. Miller

Merge branch 'ipa-mem-2'

Alex Elder says:

====================
net: ipa: memory region rework, part 2

This is the second portion of a set of patches updating the IPA
memory region code.

In this portion (part 2), the focus is on adjusting the code so that
it no longer assumes the memory region descriptor array is indexed
by the region identifier.  This brings with it some related cleanup.

Three loops are changed so their loop index variable is an unsigned
rather than an enumerated type.

A set of functions is changed so a region identifier (rather than a
memory region descriptor pointer) is passed as argument, to simplify
their call sites.  This isn't entirely related or required, but I
think it improves the code.

A validation function for filter and route table memory regions is
changed to take memory region IDs, rather than determining which
region to validate based on a set of Boolean flags.

Finally, ipa_mem_find() is created to abstract getting a memory
descriptor based on its ID, and it is used everywhere rather than
indexing the array.  With that implemented, all of the memory
regions can be defined by arrays of entries defined without
providing index designators.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 53f8b1b2 c61cfb94
......@@ -200,41 +200,55 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_valid(struct ipa *ipa)
{
const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
struct device *dev = &ipa->pdev->dev;
const struct ipa_mem *mem;
u32 offset_max;
u32 size_max;
u32 offset;
u32 size;
/* In ipa_cmd_hdr_init_local_add() we record the offset and size
* of the header table memory area. Make sure the offset and size
* fit in the fields that need to hold them, and that the entire
* range is within the overall IPA memory range.
/* In ipa_cmd_hdr_init_local_add() we record the offset and size of
* the header table memory area in an immediate command. Make sure
* the offset and size fit in the fields that need to hold them, and
* that the entire range is within the overall IPA memory range.
*/
offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
if (mem->offset > offset_max ||
ipa->mem_offset > offset_max - mem->offset) {
size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
/* The header memory area contains both the modem and AP header
* regions. The modem portion defines the address of the region.
*/
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
offset = mem->offset;
size = mem->size;
/* Make sure the offset fits in the IPA command */
if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
dev_err(dev, "header table region offset too large\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
ipa->mem_offset, mem->offset, offset_max);
ipa->mem_offset, offset, offset_max);
return false;
}
size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
size += ipa->mem[IPA_MEM_AP_HEADER].size;
/* Add the size of the AP portion (if defined) to the combined size */
mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
if (mem)
size += mem->size;
/* Make sure the combined size fits in the IPA command */
if (size > size_max) {
dev_err(dev, "header table region size too large\n");
dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
return false;
}
if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
/* Make sure the entire combined area fits in IPA memory */
if (size > ipa->mem_size || offset > ipa->mem_size - size) {
dev_err(dev, "header table region out of range\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
mem->offset, size, ipa->mem_size);
offset, size, ipa->mem_size);
return false;
}
......
......@@ -271,91 +271,91 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v3.5.1 */
static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
[IPA_MEM_UC_INFO] = {
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_V4_FILTER_HASHED] = {
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_FILTER] = {
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER_HASHED] = {
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER] = {
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE_HASHED] = {
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE] = {
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE_HASHED] = {
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE] = {
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_MODEM_HEADER] = {
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0140,
.canary_count = 2,
},
[IPA_MEM_MODEM_PROC_CTX] = {
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x07d0,
.size = 0x0200,
.canary_count = 2,
},
[IPA_MEM_AP_PROC_CTX] = {
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x09d0,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_MODEM] = {
{
.id = IPA_MEM_MODEM,
.offset = 0x0bd8,
.size = 0x1024,
.canary_count = 0,
},
[IPA_MEM_UC_EVENT_RING] = {
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x1c00,
.size = 0x0400,
......
......@@ -220,133 +220,133 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.11 */
static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
[IPA_MEM_UC_INFO] = {
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_V4_FILTER_HASHED] = {
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_FILTER] = {
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER_HASHED] = {
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER] = {
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE_HASHED] = {
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE] = {
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE_HASHED] = {
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE] = {
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_MODEM_HEADER] = {
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
[IPA_MEM_AP_HEADER] = {
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_MODEM_PROC_CTX] = {
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0200,
.canary_count = 2,
},
[IPA_MEM_AP_PROC_CTX] = {
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x0cd0,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_NAT_TABLE] = {
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x0ee0,
.size = 0x0d00,
.canary_count = 4,
},
[IPA_MEM_PDN_CONFIG] = {
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x1be8,
.size = 0x0050,
.canary_count = 0,
},
[IPA_MEM_STATS_QUOTA_MODEM] = {
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x1c40,
.size = 0x0030,
.canary_count = 4,
},
[IPA_MEM_STATS_QUOTA_AP] = {
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x1c70,
.size = 0x0048,
.canary_count = 0,
},
[IPA_MEM_STATS_TETHERING] = {
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x1cb8,
.size = 0x0238,
.canary_count = 0,
},
[IPA_MEM_STATS_DROP] = {
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x1ef0,
.size = 0x0020,
.canary_count = 0,
},
[IPA_MEM_MODEM] = {
{
.id = IPA_MEM_MODEM,
.offset = 0x1f18,
.size = 0x100c,
.canary_count = 2,
},
[IPA_MEM_END_MARKER] = {
{
.id = IPA_MEM_END_MARKER,
.offset = 0x3000,
.size = 0x0000,
......
......@@ -219,109 +219,109 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.2 */
static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
[IPA_MEM_UC_INFO] = {
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_V4_FILTER_HASHED] = {
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0,
.canary_count = 2,
},
[IPA_MEM_V4_FILTER] = {
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0290,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER_HASHED] = {
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0310,
.size = 0,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER] = {
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0318,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE_HASHED] = {
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0398,
.size = 0,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE] = {
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x03a0,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE_HASHED] = {
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0420,
.size = 0,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE] = {
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0428,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_MODEM_HEADER] = {
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x04a8,
.size = 0x0140,
.canary_count = 2,
},
[IPA_MEM_MODEM_PROC_CTX] = {
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x05f0,
.size = 0x0200,
.canary_count = 2,
},
[IPA_MEM_AP_PROC_CTX] = {
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x07f0,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_PDN_CONFIG] = {
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x09f8,
.size = 0x0050,
.canary_count = 2,
},
[IPA_MEM_STATS_QUOTA_MODEM] = {
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x0a50,
.size = 0x0060,
.canary_count = 2,
},
[IPA_MEM_STATS_TETHERING] = {
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x0ab0,
.size = 0x0140,
.canary_count = 0,
},
[IPA_MEM_MODEM] = {
{
.id = IPA_MEM_MODEM,
.offset = 0x0bf0,
.size = 0x140c,
.canary_count = 0,
},
[IPA_MEM_END_MARKER] = {
{
.id = IPA_MEM_END_MARKER,
.offset = 0x2000,
.size = 0,
......
......@@ -265,139 +265,139 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.5 */
static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
[IPA_MEM_UC_INFO] = {
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_V4_FILTER_HASHED] = {
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_FILTER] = {
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER_HASHED] = {
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER] = {
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE_HASHED] = {
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE] = {
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE_HASHED] = {
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE] = {
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_MODEM_HEADER] = {
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
[IPA_MEM_AP_HEADER] = {
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_MODEM_PROC_CTX] = {
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
[IPA_MEM_AP_PROC_CTX] = {
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_NAT_TABLE] = {
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
[IPA_MEM_STATS_QUOTA_MODEM] = {
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
[IPA_MEM_STATS_QUOTA_AP] = {
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
[IPA_MEM_STATS_TETHERING] = {
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
[IPA_MEM_STATS_FILTER_ROUTE] = {
{
.id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
[IPA_MEM_STATS_DROP] = {
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
[IPA_MEM_MODEM] = {
{
.id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
[IPA_MEM_UC_EVENT_RING] = {
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
[IPA_MEM_PDN_CONFIG] = {
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
......
......@@ -263,139 +263,139 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.9 */
static const struct ipa_mem ipa_mem_local_data[] = {
[IPA_MEM_UC_SHARED] = {
{
.id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
[IPA_MEM_UC_INFO] = {
{
.id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_V4_FILTER_HASHED] = {
{
.id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_FILTER] = {
{
.id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER_HASHED] = {
{
.id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_FILTER] = {
{
.id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE_HASHED] = {
{
.id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V4_ROUTE] = {
{
.id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE_HASHED] = {
{
.id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_V6_ROUTE] = {
{
.id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
[IPA_MEM_MODEM_HEADER] = {
{
.id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
[IPA_MEM_AP_HEADER] = {
{
.id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_MODEM_PROC_CTX] = {
{
.id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
[IPA_MEM_AP_PROC_CTX] = {
{
.id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
[IPA_MEM_NAT_TABLE] = {
{
.id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
[IPA_MEM_STATS_QUOTA_MODEM] = {
{
.id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
[IPA_MEM_STATS_QUOTA_AP] = {
{
.id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
[IPA_MEM_STATS_TETHERING] = {
{
.id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
[IPA_MEM_STATS_FILTER_ROUTE] = {
{
.id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
[IPA_MEM_STATS_DROP] = {
{
.id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
[IPA_MEM_MODEM] = {
{
.id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
[IPA_MEM_UC_EVENT_RING] = {
{
.id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
[IPA_MEM_PDN_CONFIG] = {
{
.id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
......
......@@ -26,11 +26,26 @@
/* SMEM host id representing the modem. */
#define QCOM_SMEM_HOST_MODEM 1
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
{
u32 i;
for (i = 0; i < ipa->mem_count; i++) {
const struct ipa_mem *mem = &ipa->mem[i];
if (mem->id == mem_id)
return mem;
}
return NULL;
}
/* Add an immediate command to a transaction that zeroes a memory region */
static void
ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr = ipa->zero_addr;
if (!mem->size)
......@@ -60,6 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
u16 size;
......@@ -74,25 +90,28 @@ int ipa_mem_setup(struct ipa *ipa)
return -EBUSY;
}
/* Initialize IPA-local header memory. The modem and AP header
* regions are contiguous, and initialized together.
/* Initialize IPA-local header memory. The AP header region, if
* present, is contiguous with and follows the modem header region,
* and they are initialized together.
*/
offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
size += ipa->mem[IPA_MEM_AP_HEADER].size;
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
offset = mem->offset;
size = mem->size;
mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
if (mem)
size += mem->size;
ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
offset = ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset;
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
offset = ipa->mem_offset + mem->offset;
val = proc_cntxt_base_addr_encoded(ipa->version, offset);
iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
......@@ -196,6 +215,11 @@ static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
return false;
}
if (!mem->size && !mem->canary_count) {
dev_err(dev, "empty memory region %u\n", mem_id);
return false;
}
/* Other than modem memory, sizes must be a multiple of 8 */
size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
if (mem->size % size_multiple)
......@@ -220,6 +244,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
struct device *dev = &ipa->pdev->dev;
enum ipa_mem_id mem_id;
u32 i;
if (mem_data->local_count > IPA_MEM_COUNT) {
dev_err(dev, "too many memory regions (%u > %u)\n",
......@@ -227,11 +252,8 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
return false;
}
for (mem_id = 0; mem_id < mem_data->local_count; mem_id++) {
const struct ipa_mem *mem = &mem_data->local[mem_id];
if (mem_id == IPA_MEM_UNDEFINED)
continue;
for (i = 0; i < mem_data->local_count; i++) {
const struct ipa_mem *mem = &mem_data->local[i];
if (__test_and_set_bit(mem->id, regions)) {
dev_err(dev, "duplicate memory region %u\n", mem->id);
......@@ -239,18 +261,10 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
}
/* Defined regions have non-zero size and/or canary count */
if (mem->size || mem->canary_count) {
if (ipa_mem_valid_one(ipa, mem))
continue;
if (!ipa_mem_valid_one(ipa, mem))
return false;
}
/* It's harmless, but warn if an offset is provided */
if (mem->offset)
dev_warn(dev, "empty region %u has non-zero offset\n",
mem_id);
}
/* Now see if any required regions are not defined */
for (mem_id = find_first_zero_bit(regions, IPA_MEM_COUNT);
mem_id < IPA_MEM_COUNT;
......@@ -268,16 +282,16 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
u32 limit = ipa->mem_size;
enum ipa_mem_id mem_id;
u32 i;
for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
const struct ipa_mem *mem = &ipa->mem[mem_id];
for (i = 0; i < ipa->mem_count; i++) {
const struct ipa_mem *mem = &ipa->mem[i];
if (mem->offset + mem->size <= limit)
continue;
dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
mem_id, limit);
mem->id, limit);
return false;
}
......@@ -294,11 +308,12 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
enum ipa_mem_id mem_id;
const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
void *virt;
u32 val;
u32 i;
/* Check the advertised location and size of the shared memory area */
val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
......@@ -330,24 +345,18 @@ int ipa_mem_config(struct ipa *ipa)
ipa->zero_virt = virt;
ipa->zero_size = IPA_MEM_MAX;
/* For each region, write "canary" values in the space prior to
* the region's base address if indicated.
/* For each defined region, write "canary" values in the
* space prior to the region's base address if indicated.
*/
for (mem_id = 0; mem_id < ipa->mem_count; mem_id++) {
const struct ipa_mem *mem = &ipa->mem[mem_id];
u16 canary_count;
for (i = 0; i < ipa->mem_count; i++) {
u16 canary_count = ipa->mem[i].canary_count;
__le32 *canary;
/* Skip over undefined regions */
if (!mem->offset && !mem->size)
continue;
canary_count = mem->canary_count;
if (!canary_count)
continue;
/* Write canary values in the space before the region */
canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
do
*--canary = IPA_MEM_CANARY_VAL;
while (--canary_count);
......@@ -361,8 +370,9 @@ int ipa_mem_config(struct ipa *ipa)
if (!ipa_cmd_data_valid(ipa))
goto err_dma_free;
/* Verify the microcontroller ring alignment (0 is OK too) */
if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
/* Verify the microcontroller ring alignment (if defined) */
mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
if (mem && mem->offset % 1024) {
dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
goto err_dma_free;
}
......@@ -410,11 +420,9 @@ int ipa_mem_zero_modem(struct ipa *ipa)
return -EBUSY;
}
ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
......@@ -529,7 +537,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
* (in this case, the modem). An allocation from SMEM is persistent
* until the AP reboots; there is no way to free an allocated SMEM
* region. Allocation only reserves the space; to use it you need
* to "get" a pointer it (this implies no reference counting).
* to "get" a pointer it (this does not imply reference counting).
* The item might have already been allocated, in which case we
* use it unless the size isn't what we expect.
*/
......
......@@ -43,7 +43,6 @@ struct ipa_mem_data;
/* IPA-resident memory region ids */
enum ipa_mem_id {
IPA_MEM_UNDEFINED = 0, /* undefined region */
IPA_MEM_UC_SHARED, /* 0 canaries */
IPA_MEM_UC_INFO, /* 0 canaries */
IPA_MEM_V4_FILTER_HASHED, /* 2 canaries */
......@@ -90,6 +89,8 @@ struct ipa_mem {
u16 canary_count;
};
const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id);
int ipa_mem_config(struct ipa *ipa);
void ipa_mem_deconfig(struct ipa *ipa);
......
......@@ -298,32 +298,32 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.platform_type_valid = 1;
req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
if (mem->size) {
req.hdr_tbl_info_valid = 1;
req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
}
mem = &ipa->mem[IPA_MEM_V4_ROUTE];
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
mem = &ipa->mem[IPA_MEM_V6_ROUTE];
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
mem = &ipa->mem[IPA_MEM_V4_FILTER];
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
mem = &ipa->mem[IPA_MEM_V6_FILTER];
mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER);
req.v6_filter_tbl_start_valid = 1;
req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
mem = &ipa->mem[IPA_MEM_MODEM];
mem = ipa_mem_find(ipa, IPA_MEM_MODEM);
if (mem->size) {
req.modem_mem_info_valid = 1;
req.modem_mem_info.start = ipa->mem_offset + mem->offset;
......@@ -336,7 +336,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* skip_uc_load_valid and skip_uc_load are set above */
mem = &ipa->mem[IPA_MEM_MODEM_PROC_CTX];
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
if (mem->size) {
req.hdr_proc_ctx_tbl_info_valid = 1;
req.hdr_proc_ctx_tbl_info.start =
......@@ -347,7 +347,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* Nothing to report for the compression table (zip_tbl_info) */
mem = &ipa->mem[IPA_MEM_V4_ROUTE_HASHED];
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE_HASHED);
if (mem->size) {
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
......@@ -355,7 +355,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
if (mem->size) {
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
......@@ -363,22 +363,21 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
if (mem->size) {
req.v4_hash_filter_tbl_start_valid = 1;
req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
mem = &ipa->mem[IPA_MEM_V6_FILTER_HASHED];
mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER_HASHED);
if (mem->size) {
req.v6_hash_filter_tbl_start_valid = 1;
req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
/* None of the stats fields are valid (IPA v4.0 and above) */
/* The stats fields are only valid for IPA v4.0+ */
if (ipa->version >= IPA_VERSION_4_0) {
mem = &ipa->mem[IPA_MEM_STATS_QUOTA_MODEM];
mem = ipa_mem_find(ipa, IPA_MEM_STATS_QUOTA_MODEM);
if (mem->size) {
req.hw_stats_quota_base_addr_valid = 1;
req.hw_stats_quota_base_addr =
......@@ -387,8 +386,9 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.hw_stats_quota_size = ipa->mem_offset + mem->size;
}
mem = &ipa->mem[IPA_MEM_STATS_DROP];
if (mem->size) {
/* If the DROP stats region is defined, include it */
mem = ipa_mem_find(ipa, IPA_MEM_STATS_DROP);
if (mem && mem->size) {
req.hw_stats_drop_base_addr_valid = 1;
req.hw_stats_drop_base_addr =
ipa->mem_offset + mem->offset;
......
......@@ -150,29 +150,16 @@ static void ipa_table_validate_build(void)
}
static bool
ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
{
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
struct device *dev = &ipa->pdev->dev;
const struct ipa_mem *mem;
u32 size;
if (route) {
if (ipv6)
mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]
: &ipa->mem[IPA_MEM_V6_ROUTE];
else
mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
: &ipa->mem[IPA_MEM_V4_ROUTE];
if (route)
size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
} else {
if (ipv6)
mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
: &ipa->mem[IPA_MEM_V6_FILTER];
else
mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
: &ipa->mem[IPA_MEM_V4_FILTER];
size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
}
if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
return false;
......@@ -185,9 +172,8 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
if (hashed && !mem->size)
return true;
dev_err(dev, "IPv%c %s%s table region size 0x%02x, expected 0x%02x\n",
ipv6 ? '6' : '4', hashed ? "hashed " : "",
route ? "route" : "filter", mem->size, size);
dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
route ? "route" : "filter", mem_id, mem->size, size);
return false;
}
......@@ -195,16 +181,16 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
/* Verify the filter and route table memory regions are the expected size */
bool ipa_table_valid(struct ipa *ipa)
{
bool valid = true;
bool valid;
valid = valid && ipa_table_valid_one(ipa, false, false, false);
valid = valid && ipa_table_valid_one(ipa, false, false, true);
valid = valid && ipa_table_valid_one(ipa, false, true, false);
valid = valid && ipa_table_valid_one(ipa, false, true, true);
valid = valid && ipa_table_valid_one(ipa, true, false, false);
valid = valid && ipa_table_valid_one(ipa, true, false, true);
valid = valid && ipa_table_valid_one(ipa, true, true, false);
valid = valid && ipa_table_valid_one(ipa, true, true, true);
valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false);
valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false);
valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false);
valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false);
valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true);
valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true);
valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true);
valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true);
return valid;
}
......@@ -256,14 +242,15 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
}
static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
u16 first, u16 count, const struct ipa_mem *mem)
u16 first, u16 count, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr;
u32 offset;
u16 size;
/* Nothing to do if the table memory regions is empty */
/* Nothing to do if the table memory region is empty */
if (!mem->size)
return;
......@@ -282,16 +269,13 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
* for the IPv4 and IPv6 non-hashed and hashed filter tables.
*/
static int
ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
{
u32 ep_mask = ipa->filter_map;
u32 count = hweight32(ep_mask);
struct gsi_trans *trans;
enum gsi_ee_id ee_id;
if (!mem->size)
return 0;
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
......@@ -311,7 +295,7 @@ ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
if (endpoint->ee_id != ee_id)
continue;
ipa_table_reset_add(trans, true, endpoint_id, 1, mem);
ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id);
}
gsi_trans_commit_wait(trans);
......@@ -327,20 +311,18 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
{
int ret;
ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem);
ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem);
if (ret)
return ret;
ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED],
modem);
ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
if (ret)
return ret;
ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem);
ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
if (ret)
return ret;
ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED],
modem);
ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
return ret;
}
......@@ -371,15 +353,13 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
count = IPA_ROUTE_AP_COUNT;
}
ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
ipa_table_reset_add(trans, false, first, count,
&ipa->mem[IPA_MEM_V4_ROUTE]);
ipa_table_reset_add(trans, false, first, count,
&ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
IPA_MEM_V4_ROUTE_HASHED);
ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE);
ipa_table_reset_add(trans, false, first, count,
&ipa->mem[IPA_MEM_V6_ROUTE]);
ipa_table_reset_add(trans, false, first, count,
&ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
IPA_MEM_V6_ROUTE_HASHED);
gsi_trans_commit_wait(trans);
......@@ -433,10 +413,12 @@ int ipa_table_hash_flush(struct ipa *ipa)
static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
enum ipa_cmd_opcode opcode,
const struct ipa_mem *mem,
const struct ipa_mem *hash_mem)
enum ipa_mem_id mem_id,
enum ipa_mem_id hash_mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id);
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t hash_addr;
dma_addr_t addr;
u16 hash_count;
......@@ -477,20 +459,16 @@ int ipa_table_setup(struct ipa *ipa)
}
ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
&ipa->mem[IPA_MEM_V4_ROUTE],
&ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED);
ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
&ipa->mem[IPA_MEM_V6_ROUTE],
&ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
&ipa->mem[IPA_MEM_V4_FILTER],
&ipa->mem[IPA_MEM_V4_FILTER_HASHED]);
IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
&ipa->mem[IPA_MEM_V6_FILTER],
&ipa->mem[IPA_MEM_V6_FILTER_HASHED]);
IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED);
gsi_trans_commit_wait(trans);
......
......@@ -116,7 +116,8 @@ enum ipa_uc_event {
static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
{
u32 offset = ipa->mem_offset + ipa->mem[IPA_MEM_UC_SHARED].offset;
const struct ipa_mem *mem = ipa_mem_find(ipa, IPA_MEM_UC_SHARED);
u32 offset = ipa->mem_offset + mem->offset;
return ipa->mem_virt + offset;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment