Commit 44b958a6 authored by David S. Miller's avatar David S. Miller

Merge branch 'ipa-32bit'

Alex Elder says:

====================
net: ipa: support 32-bit targets

There is currently a configuration dependency that restricts IPA to
be supported only on 64-bit machines.  There are only a few things
that really require that, and those are fixed in this series.  The
last patch in the series removes the CONFIG_64BIT build dependency
for IPA.

Version 2 of this series uses upper_32_bits() rather than creating
a new function to extract bits out of a DMA address.  Version 3 of
uses lower_32_bits() as well.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5108802a 99e75a37
config QCOM_IPA config QCOM_IPA
tristate "Qualcomm IPA support" tristate "Qualcomm IPA support"
depends on 64BIT && NET && QCOM_SMEM depends on NET && QCOM_SMEM
depends on ARCH_QCOM || COMPILE_TEST depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST) depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
select QCOM_MDT_LOADER if ARCH_QCOM select QCOM_MDT_LOADER if ARCH_QCOM
......
...@@ -351,7 +351,7 @@ void *gsi_ring_virt(struct gsi_ring *ring, u32 index) ...@@ -351,7 +351,7 @@ void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
/* Return the 32-bit DMA address associated with a ring index */ /* Return the 32-bit DMA address associated with a ring index */
static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index) static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
{ {
return (ring->addr & GENMASK(31, 0)) + index * GSI_RING_ELEMENT_SIZE; return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
} }
/* Return the ring index of a 32-bit ring offset */ /* Return the ring index of a 32-bit ring offset */
...@@ -708,10 +708,9 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) ...@@ -708,10 +708,9 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
* high-order 32 bits of the address of the event ring, * high-order 32 bits of the address of the event ring,
* respectively. * respectively.
*/ */
val = evt_ring->ring.addr & GENMASK(31, 0); val = lower_32_bits(evt_ring->ring.addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
val = upper_32_bits(evt_ring->ring.addr);
val = evt_ring->ring.addr >> 32;
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */ /* Enable interrupt moderation by setting the moderation delay */
...@@ -816,10 +815,9 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) ...@@ -816,10 +815,9 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
* high-order 32 bits of the address of the channel ring, * high-order 32 bits of the address of the channel ring,
* respectively. * respectively.
*/ */
val = channel->tre_ring.addr & GENMASK(31, 0); val = lower_32_bits(channel->tre_ring.addr);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
val = upper_32_bits(channel->tre_ring.addr);
val = channel->tre_ring.addr >> 32;
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
/* Command channel gets low weighted round-robin priority */ /* Command channel gets low weighted round-robin priority */
...@@ -1365,7 +1363,7 @@ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, ...@@ -1365,7 +1363,7 @@ static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
u32 tre_index; u32 tre_index;
/* Event xfer_ptr records the TRE it's associated with */ /* Event xfer_ptr records the TRE it's associated with */
tre_offset = le64_to_cpu(event->xfer_ptr) & GENMASK(31, 0); tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
tre_index = gsi_ring_index(&channel->tre_ring, tre_offset); tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
return gsi_channel_trans_mapped(channel, tre_index); return gsi_channel_trans_mapped(channel, tre_index);
......
...@@ -735,8 +735,14 @@ MODULE_DEVICE_TABLE(of, ipa_match); ...@@ -735,8 +735,14 @@ MODULE_DEVICE_TABLE(of, ipa_match);
static void ipa_validate_build(void) static void ipa_validate_build(void)
{ {
#ifdef IPA_VALIDATE #ifdef IPA_VALIDATE
/* We assume we're working on 64-bit hardware */ /* At one time we assumed a 64-bit build, allowing some do_div()
BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT)); * calls to be replaced by simple division or modulo operations.
* We currently only perform divide and modulo operations on u32,
* u16, or size_t objects, and of those only size_t has any chance
* of being a 64-bit value. (It should be guaranteed 32 bits wide
* on a 32-bit build, but there is no harm in verifying that.)
*/
BUILD_BUG_ON(!IS_ENABLED(CONFIG_64BIT) && sizeof(size_t) != 4);
/* Code assumes the EE ID for the AP is 0 (zeroed structure field) */ /* Code assumes the EE ID for the AP is 0 (zeroed structure field) */
BUILD_BUG_ON(GSI_EE_AP != 0); BUILD_BUG_ON(GSI_EE_AP != 0);
......
...@@ -118,21 +118,15 @@ ...@@ -118,21 +118,15 @@
/* Check things that can be validated at build time. */ /* Check things that can be validated at build time. */
static void ipa_table_validate_build(void) static void ipa_table_validate_build(void)
{ {
/* IPA hardware accesses memory 128 bytes at a time. Addresses /* Filter and route tables contain DMA addresses that refer
* referred to by entries in filter and route tables must be * to filter or route rules. But the size of a table entry
* aligned on 128-byte byte boundaries. The only rule address * is 64 bits regardless of what the size of an AP DMA address
* ever use is the "zero rule", and it's aligned at the base * is. A fixed constant defines the size of an entry, and
* of a coherent DMA allocation. * code in ipa_table_init() uses a pointer to __le64 to
* initialize tables.
*/ */
BUILD_BUG_ON(ARCH_DMA_MINALIGN % IPA_TABLE_ALIGN); BUILD_BUG_ON(sizeof(dma_addr_t) > IPA_TABLE_ENTRY_SIZE);
BUILD_BUG_ON(sizeof(__le64) != IPA_TABLE_ENTRY_SIZE);
/* Filter and route tables contain DMA addresses that refer to
* filter or route rules. We use a fixed constant to represent
* the size of either type of table entry. Code in ipa_table_init()
* uses a pointer to __le64 to initialize table entriews.
*/
BUILD_BUG_ON(IPA_TABLE_ENTRY_SIZE != sizeof(dma_addr_t));
BUILD_BUG_ON(sizeof(dma_addr_t) != sizeof(__le64));
/* A "zero rule" is used to represent no filtering or no routing. /* A "zero rule" is used to represent no filtering or no routing.
* It is a 64-bit block of zeroed memory. Code in ipa_table_init() * It is a 64-bit block of zeroed memory. Code in ipa_table_init()
...@@ -663,6 +657,18 @@ int ipa_table_init(struct ipa *ipa) ...@@ -663,6 +657,18 @@ int ipa_table_init(struct ipa *ipa)
if (!virt) if (!virt)
return -ENOMEM; return -ENOMEM;
/* We put the "zero rule" at the base of our table area. The IPA
* hardware requires rules to be aligned on a 128-byte boundary.
* Make sure the allocation satisfies this constraint.
*/
if (addr % IPA_TABLE_ALIGN) {
dev_err(dev, "table address %pad not %u-byte aligned\n",
&addr, IPA_TABLE_ALIGN);
dma_free_coherent(dev, size, virt, addr);
return -ERANGE;
}
ipa->table_virt = virt; ipa->table_virt = virt;
ipa->table_addr = addr; ipa->table_addr = addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment