Commit 8024edf3 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-ipa-GSI-regs'

Alex Elder says:

====================
net: ipa: determine GSI register offsets differently

This series changes the way GSI register offset are specified, using
the "reg" mechanism currently used for IPA registers.  A follow-on
series will extend this work so fields within GSI registers are also
specified this way.

The first patch rearranges the GSI register initialization code so
it is similar to the way it's done for the IPA registers.  The
second identifies all the GSI registers in an enumerated type.
The third introduces "gsi_reg-v3.1.c" and uses the "reg" code to
define one GSI register offset.  The second-to-last patch just
adds "gsi_reg-v3.5.1.c", because that version introduces a new
register not previously defined.  All the rest just define the
rest of the GSI register offsets using the "reg" mechanism.

Note that, to have continued lines align with an open parenthesis,
new files created in this series cause some checkpatch warnings.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4fab6412 5791a73c
...@@ -4,15 +4,20 @@ ...@@ -4,15 +4,20 @@
IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11 IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11
# Some IPA versions can reuse another set of GSI register definitions.
GSI_IPA_VERSIONS := 3.1 3.5.1
obj-$(CONFIG_QCOM_IPA) += ipa.o obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \ ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \ ipa_table.o ipa_interrupt.o gsi.o gsi_reg.o \
ipa_gsi.o ipa_smp2p.o ipa_uc.o \ gsi_trans.o ipa_gsi.o ipa_smp2p.o ipa_uc.o \
ipa_endpoint.o ipa_cmd.o ipa_modem.o \ ipa_endpoint.o ipa_cmd.o ipa_modem.o \
ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \ ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
ipa_sysfs.o ipa_sysfs.o
ipa-y += $(GSI_IPA_VERSIONS:%=reg/gsi_reg-v%.o)
ipa-y += $(IPA_VERSIONS:%=reg/ipa_reg-v%.o) ipa-y += $(IPA_VERSIONS:%=reg/ipa_reg-v%.o)
ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o) ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o)
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd. * Copyright (C) 2018-2023 Linaro Ltd.
*/ */
#include <linux/types.h> #include <linux/types.h>
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include "gsi.h" #include "gsi.h"
#include "reg.h"
#include "gsi_reg.h" #include "gsi_reg.h"
#include "gsi_private.h" #include "gsi_private.h"
#include "gsi_trans.h" #include "gsi_trans.h"
...@@ -220,8 +221,10 @@ static u32 ev_ch_e_cntxt_1_length_encode(enum ipa_version version, u32 length) ...@@ -220,8 +221,10 @@ static u32 ev_ch_e_cntxt_1_length_encode(enum ipa_version version, u32 length)
/* Update the GSI IRQ type register with the cached value */ /* Update the GSI IRQ type register with the cached value */
static void gsi_irq_type_update(struct gsi *gsi, u32 val) static void gsi_irq_type_update(struct gsi *gsi, u32 val)
{ {
const struct reg *reg = gsi_reg(gsi, CNTXT_TYPE_IRQ_MSK);
gsi->type_enabled_bitmap = val; gsi->type_enabled_bitmap = val;
iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); iowrite32(val, gsi->virt + reg_offset(reg));
} }
static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id) static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
...@@ -242,22 +245,29 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id) ...@@ -242,22 +245,29 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id) static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
{ {
u32 val = BIT(evt_ring_id); u32 val = BIT(evt_ring_id);
const struct reg *reg;
/* There's a small chance that a previous command completed /* There's a small chance that a previous command completed
* after the interrupt was disabled, so make sure we have no * after the interrupt was disabled, so make sure we have no
* pending interrupts before we enable them. * pending interrupts before we enable them.
*/ */
iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
iowrite32(~0, gsi->virt + reg_offset(reg));
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
iowrite32(val, gsi->virt + reg_offset(reg));
gsi_irq_type_enable(gsi, GSI_EV_CTRL); gsi_irq_type_enable(gsi, GSI_EV_CTRL);
} }
/* Disable event ring control interrupts */ /* Disable event ring control interrupts */
static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
{ {
const struct reg *reg;
gsi_irq_type_disable(gsi, GSI_EV_CTRL); gsi_irq_type_disable(gsi, GSI_EV_CTRL);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
} }
/* Channel commands are performed one at a time. Their completion is /* Channel commands are performed one at a time. Their completion is
...@@ -268,32 +278,43 @@ static void gsi_irq_ev_ctrl_disable(struct gsi *gsi) ...@@ -268,32 +278,43 @@ static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id) static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
{ {
u32 val = BIT(channel_id); u32 val = BIT(channel_id);
const struct reg *reg;
/* There's a small chance that a previous command completed /* There's a small chance that a previous command completed
* after the interrupt was disabled, so make sure we have no * after the interrupt was disabled, so make sure we have no
* pending interrupts before we enable them. * pending interrupts before we enable them.
*/ */
iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
iowrite32(~0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
iowrite32(val, gsi->virt + reg_offset(reg));
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_CH_CTRL); gsi_irq_type_enable(gsi, GSI_CH_CTRL);
} }
/* Disable channel control interrupts */ /* Disable channel control interrupts */
static void gsi_irq_ch_ctrl_disable(struct gsi *gsi) static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
{ {
const struct reg *reg;
gsi_irq_type_disable(gsi, GSI_CH_CTRL); gsi_irq_type_disable(gsi, GSI_CH_CTRL);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
} }
static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
{ {
bool enable_ieob = !gsi->ieob_enabled_bitmap; bool enable_ieob = !gsi->ieob_enabled_bitmap;
const struct reg *reg;
u32 val; u32 val;
gsi->ieob_enabled_bitmap |= BIT(evt_ring_id); gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
val = gsi->ieob_enabled_bitmap; val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); iowrite32(val, gsi->virt + reg_offset(reg));
/* Enable the interrupt type if this is the first channel enabled */ /* Enable the interrupt type if this is the first channel enabled */
if (enable_ieob) if (enable_ieob)
...@@ -302,6 +323,7 @@ static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id) ...@@ -302,6 +323,7 @@ static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
{ {
const struct reg *reg;
u32 val; u32 val;
gsi->ieob_enabled_bitmap &= ~event_mask; gsi->ieob_enabled_bitmap &= ~event_mask;
...@@ -310,8 +332,9 @@ static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask) ...@@ -310,8 +332,9 @@ static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
if (!gsi->ieob_enabled_bitmap) if (!gsi->ieob_enabled_bitmap)
gsi_irq_type_disable(gsi, GSI_IEOB); gsi_irq_type_disable(gsi, GSI_IEOB);
reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
val = gsi->ieob_enabled_bitmap; val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); iowrite32(val, gsi->virt + reg_offset(reg));
} }
static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
...@@ -322,12 +345,15 @@ static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id) ...@@ -322,12 +345,15 @@ static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
/* Enable all GSI_interrupt types */ /* Enable all GSI_interrupt types */
static void gsi_irq_enable(struct gsi *gsi) static void gsi_irq_enable(struct gsi *gsi)
{ {
const struct reg *reg;
u32 val; u32 val;
/* Global interrupts include hardware error reports. Enable /* Global interrupts include hardware error reports. Enable
* that so we can at least report the error should it occur. * that so we can at least report the error should it occur.
*/ */
iowrite32(ERROR_INT, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE); gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE);
/* General GSI interrupts are reported to all EEs; if they occur /* General GSI interrupts are reported to all EEs; if they occur
...@@ -335,21 +361,28 @@ static void gsi_irq_enable(struct gsi *gsi) ...@@ -335,21 +361,28 @@ static void gsi_irq_enable(struct gsi *gsi)
* also exists, but we don't support that. We want to be notified * also exists, but we don't support that. We want to be notified
* of errors so we can report them, even if they can't be handled. * of errors so we can report them, even if they can't be handled.
*/ */
reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
val = BUS_ERROR; val = BUS_ERROR;
val |= CMD_FIFO_OVRFLOW; val |= CMD_FIFO_OVRFLOW;
val |= MCS_STACK_OVRFLOW; val |= MCS_STACK_OVRFLOW;
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); iowrite32(val, gsi->virt + reg_offset(reg));
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL); gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL);
} }
/* Disable all GSI interrupt types */ /* Disable all GSI interrupt types */
static void gsi_irq_disable(struct gsi *gsi) static void gsi_irq_disable(struct gsi *gsi)
{ {
const struct reg *reg;
gsi_irq_type_update(gsi, 0); gsi_irq_type_update(gsi, 0);
/* Clear the type-specific interrupt masks set by gsi_irq_enable() */ /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); iowrite32(0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
iowrite32(0, gsi->virt + reg_offset(reg));
} }
/* Return the virtual address associated with a ring index */ /* Return the virtual address associated with a ring index */
...@@ -391,9 +424,10 @@ static bool gsi_command(struct gsi *gsi, u32 reg, u32 val) ...@@ -391,9 +424,10 @@ static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
static enum gsi_evt_ring_state static enum gsi_evt_ring_state
gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id) gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
{ {
const struct reg *reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
u32 val; u32 val;
val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); val = ioread32(gsi->virt + reg_n_offset(reg, evt_ring_id));
return u32_get_bits(val, EV_CHSTATE_FMASK); return u32_get_bits(val, EV_CHSTATE_FMASK);
} }
...@@ -403,16 +437,18 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id, ...@@ -403,16 +437,18 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
enum gsi_evt_cmd_opcode opcode) enum gsi_evt_cmd_opcode opcode)
{ {
struct device *dev = gsi->dev; struct device *dev = gsi->dev;
const struct reg *reg;
bool timeout; bool timeout;
u32 val; u32 val;
/* Enable the completion interrupt for the command */ /* Enable the completion interrupt for the command */
gsi_irq_ev_ctrl_enable(gsi, evt_ring_id); gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
reg = gsi_reg(gsi, EV_CH_CMD);
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val); timeout = !gsi_command(gsi, reg_offset(reg), val);
gsi_irq_ev_ctrl_disable(gsi); gsi_irq_ev_ctrl_disable(gsi);
...@@ -499,11 +535,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) ...@@ -499,11 +535,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Fetch the current state of a channel from hardware */ /* Fetch the current state of a channel from hardware */
static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{ {
const struct reg *reg = gsi_reg(channel->gsi, CH_C_CNTXT_0);
u32 channel_id = gsi_channel_id(channel); u32 channel_id = gsi_channel_id(channel);
void __iomem *virt = channel->gsi->virt; struct gsi *gsi = channel->gsi;
void __iomem *virt = gsi->virt;
u32 val; u32 val;
val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); reg = gsi_reg(gsi, CH_C_CNTXT_0);
val = ioread32(virt + reg_n_offset(reg, channel_id));
return u32_get_bits(val, CHSTATE_FMASK); return u32_get_bits(val, CHSTATE_FMASK);
} }
...@@ -515,15 +554,18 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) ...@@ -515,15 +554,18 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel); u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi; struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev; struct device *dev = gsi->dev;
const struct reg *reg;
bool timeout; bool timeout;
u32 val; u32 val;
/* Enable the completion interrupt for the command */ /* Enable the completion interrupt for the command */
gsi_irq_ch_ctrl_enable(gsi, channel_id); gsi_irq_ch_ctrl_enable(gsi, channel_id);
reg = gsi_reg(gsi, CH_CMD);
val = u32_encode_bits(channel_id, CH_CHID_FMASK); val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
timeout = !gsi_command(gsi, reg_offset(reg), val);
gsi_irq_ch_ctrl_disable(gsi); gsi_irq_ch_ctrl_disable(gsi);
...@@ -686,6 +728,7 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id) ...@@ -686,6 +728,7 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
*/ */
static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
{ {
const struct reg *reg = gsi_reg(gsi, EV_CH_E_DOORBELL_0);
struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring; struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
u32 val; u32 val;
...@@ -693,7 +736,7 @@ static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index) ...@@ -693,7 +736,7 @@ static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
/* Note: index *must* be used modulo the ring count here */ /* Note: index *must* be used modulo the ring count here */
val = gsi_ring_addr(ring, (index - 1) % ring->count); val = gsi_ring_addr(ring, (index - 1) % ring->count);
iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
} }
/* Program an event ring for use */ /* Program an event ring for use */
...@@ -701,41 +744,56 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id) ...@@ -701,41 +744,56 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
{ {
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring; struct gsi_ring *ring = &evt_ring->ring;
const struct reg *reg;
size_t size; size_t size;
u32 val; u32 val;
reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
/* We program all event rings as GPI type/protocol */ /* We program all event rings as GPI type/protocol */
val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK); val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
val |= EV_INTYPE_FMASK; val |= EV_INTYPE_FMASK;
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK); val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_1);
size = ring->count * GSI_RING_ELEMENT_SIZE; size = ring->count * GSI_RING_ELEMENT_SIZE;
val = ev_ch_e_cntxt_1_length_encode(gsi->version, size); val = ev_ch_e_cntxt_1_length_encode(gsi->version, size);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* The context 2 and 3 registers store the low-order and /* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the event ring, * high-order 32 bits of the address of the event ring,
* respectively. * respectively.
*/ */
reg = gsi_reg(gsi, EV_CH_E_CNTXT_2);
val = lower_32_bits(ring->addr); val = lower_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_3);
val = upper_32_bits(ring->addr); val = upper_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */ /* Enable interrupt moderation by setting the moderation delay */
reg = gsi_reg(gsi, EV_CH_E_CNTXT_8);
val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK); val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */ val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* No MSI write data, and MSI address high and low address is 0 */ /* No MSI write data, and MSI address high and low address is 0 */
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id)); reg = gsi_reg(gsi, EV_CH_E_CNTXT_9);
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id)); iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_10);
iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_11);
iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* We don't need to get event read pointer updates */ /* We don't need to get event read pointer updates */
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id)); reg = gsi_reg(gsi, EV_CH_E_CNTXT_12);
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id)); iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
reg = gsi_reg(gsi, EV_CH_E_CNTXT_13);
iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* Finally, tell the hardware our "last processed" event (arbitrary) */ /* Finally, tell the hardware our "last processed" event (arbitrary) */
gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index); gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
...@@ -796,28 +854,38 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) ...@@ -796,28 +854,38 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
union gsi_channel_scratch scr = { }; union gsi_channel_scratch scr = { };
struct gsi_channel_scratch_gpi *gpi; struct gsi_channel_scratch_gpi *gpi;
struct gsi *gsi = channel->gsi; struct gsi *gsi = channel->gsi;
const struct reg *reg;
u32 wrr_weight = 0; u32 wrr_weight = 0;
u32 offset;
u32 val; u32 val;
reg = gsi_reg(gsi, CH_C_CNTXT_0);
/* We program all channels as GPI type/protocol */ /* We program all channels as GPI type/protocol */
val = ch_c_cntxt_0_type_encode(gsi->version, GSI_CHANNEL_TYPE_GPI); val = ch_c_cntxt_0_type_encode(gsi->version, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa) if (channel->toward_ipa)
val |= CHTYPE_DIR_FMASK; val |= CHTYPE_DIR_FMASK;
val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK); val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK); val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_CNTXT_1);
val = ch_c_cntxt_1_length_encode(gsi->version, size); val = ch_c_cntxt_1_length_encode(gsi->version, size);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* The context 2 and 3 registers store the low-order and /* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the channel ring, * high-order 32 bits of the address of the channel ring,
* respectively. * respectively.
*/ */
reg = gsi_reg(gsi, CH_C_CNTXT_2);
val = lower_32_bits(channel->tre_ring.addr); val = lower_32_bits(channel->tre_ring.addr);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_CNTXT_3);
val = upper_32_bits(channel->tre_ring.addr); val = upper_32_bits(channel->tre_ring.addr);
iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_QOS);
/* Command channel gets low weighted round-robin priority */ /* Command channel gets low weighted round-robin priority */
if (channel->command) if (channel->command)
...@@ -845,7 +913,7 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) ...@@ -845,7 +913,7 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
if (gsi->version >= IPA_VERSION_4_9) if (gsi->version >= IPA_VERSION_4_9)
val |= DB_IN_BYTES; val |= DB_IN_BYTES;
iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* Now update the scratch registers for GPI protocol */ /* Now update the scratch registers for GPI protocol */
gpi = &scr.gpi; gpi = &scr.gpi;
...@@ -853,22 +921,27 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell) ...@@ -853,22 +921,27 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
GSI_RING_ELEMENT_SIZE; GSI_RING_ELEMENT_SIZE;
gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE; gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
reg = gsi_reg(gsi, CH_C_SCRATCH_0);
val = scr.data.word1; val = scr.data.word1;
iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_SCRATCH_1);
val = scr.data.word2; val = scr.data.word2;
iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
reg = gsi_reg(gsi, CH_C_SCRATCH_2);
val = scr.data.word3; val = scr.data.word3;
iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* We must preserve the upper 16 bits of the last scratch register. /* We must preserve the upper 16 bits of the last scratch register.
* The next sequence assumes those bits remain unchanged between the * The next sequence assumes those bits remain unchanged between the
* read and the write. * read and the write.
*/ */
val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); reg = gsi_reg(gsi, CH_C_SCRATCH_3);
offset = reg_n_offset(reg, channel_id);
val = ioread32(gsi->virt + offset);
val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0)); val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id)); iowrite32(val, gsi->virt + offset);
/* All done! */ /* All done! */
} }
...@@ -1084,10 +1157,14 @@ static void gsi_trans_tx_completed(struct gsi_trans *trans) ...@@ -1084,10 +1157,14 @@ static void gsi_trans_tx_completed(struct gsi_trans *trans)
/* Channel control interrupt handler */ /* Channel control interrupt handler */
static void gsi_isr_chan_ctrl(struct gsi *gsi) static void gsi_isr_chan_ctrl(struct gsi *gsi)
{ {
const struct reg *reg;
u32 channel_mask; u32 channel_mask;
channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET); reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ);
iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET); channel_mask = ioread32(gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
iowrite32(channel_mask, gsi->virt + reg_offset(reg));
while (channel_mask) { while (channel_mask) {
u32 channel_id = __ffs(channel_mask); u32 channel_id = __ffs(channel_mask);
...@@ -1101,10 +1178,14 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi) ...@@ -1101,10 +1178,14 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
/* Event ring control interrupt handler */ /* Event ring control interrupt handler */
static void gsi_isr_evt_ctrl(struct gsi *gsi) static void gsi_isr_evt_ctrl(struct gsi *gsi)
{ {
const struct reg *reg;
u32 event_mask; u32 event_mask;
event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET); reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ);
iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET); event_mask = ioread32(gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
iowrite32(event_mask, gsi->virt + reg_offset(reg));
while (event_mask) { while (event_mask) {
u32 evt_ring_id = __ffs(event_mask); u32 evt_ring_id = __ffs(event_mask);
...@@ -1154,15 +1235,22 @@ static void gsi_isr_glob_err(struct gsi *gsi) ...@@ -1154,15 +1235,22 @@ static void gsi_isr_glob_err(struct gsi *gsi)
{ {
enum gsi_err_type type; enum gsi_err_type type;
enum gsi_err_code code; enum gsi_err_code code;
const struct reg *reg;
u32 offset;
u32 which; u32 which;
u32 val; u32 val;
u32 ee; u32 ee;
/* Get the logged error, then reinitialize the log */ /* Get the logged error, then reinitialize the log */
val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET); reg = gsi_reg(gsi, ERROR_LOG);
iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); offset = reg_offset(reg);
iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); val = ioread32(gsi->virt + offset);
iowrite32(0, gsi->virt + offset);
reg = gsi_reg(gsi, ERROR_LOG_CLR);
iowrite32(~0, gsi->virt + reg_offset(reg));
/* Parse the error value */
ee = u32_get_bits(val, ERR_EE_FMASK); ee = u32_get_bits(val, ERR_EE_FMASK);
type = u32_get_bits(val, ERR_TYPE_FMASK); type = u32_get_bits(val, ERR_TYPE_FMASK);
which = u32_get_bits(val, ERR_VIRT_IDX_FMASK); which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
...@@ -1179,6 +1267,7 @@ static void gsi_isr_glob_err(struct gsi *gsi) ...@@ -1179,6 +1267,7 @@ static void gsi_isr_glob_err(struct gsi *gsi)
/* Generic EE interrupt handler */ /* Generic EE interrupt handler */
static void gsi_isr_gp_int1(struct gsi *gsi) static void gsi_isr_gp_int1(struct gsi *gsi)
{ {
const struct reg *reg;
u32 result; u32 result;
u32 val; u32 val;
...@@ -1201,7 +1290,8 @@ static void gsi_isr_gp_int1(struct gsi *gsi) ...@@ -1201,7 +1290,8 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
* In either case, we silently ignore a INCORRECT_CHANNEL_STATE * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
* error if we receive it. * error if we receive it.
*/ */
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
val = ioread32(gsi->virt + reg_offset(reg));
result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK); result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
switch (result) { switch (result) {
...@@ -1226,14 +1316,17 @@ static void gsi_isr_gp_int1(struct gsi *gsi) ...@@ -1226,14 +1316,17 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
/* Inter-EE interrupt handler */ /* Inter-EE interrupt handler */
static void gsi_isr_glob_ee(struct gsi *gsi) static void gsi_isr_glob_ee(struct gsi *gsi)
{ {
const struct reg *reg;
u32 val; u32 val;
val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET); reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_STTS);
val = ioread32(gsi->virt + reg_offset(reg));
if (val & ERROR_INT) if (val & ERROR_INT)
gsi_isr_glob_err(gsi); gsi_isr_glob_err(gsi);
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET); reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_CLR);
iowrite32(val, gsi->virt + reg_offset(reg));
val &= ~ERROR_INT; val &= ~ERROR_INT;
...@@ -1249,11 +1342,16 @@ static void gsi_isr_glob_ee(struct gsi *gsi) ...@@ -1249,11 +1342,16 @@ static void gsi_isr_glob_ee(struct gsi *gsi)
/* I/O completion interrupt event */ /* I/O completion interrupt event */
static void gsi_isr_ieob(struct gsi *gsi) static void gsi_isr_ieob(struct gsi *gsi)
{ {
const struct reg *reg;
u32 event_mask; u32 event_mask;
event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET); reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ);
event_mask = ioread32(gsi->virt + reg_offset(reg));
gsi_irq_ieob_disable(gsi, event_mask); gsi_irq_ieob_disable(gsi, event_mask);
iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_CLR);
iowrite32(event_mask, gsi->virt + reg_offset(reg));
while (event_mask) { while (event_mask) {
u32 evt_ring_id = __ffs(event_mask); u32 evt_ring_id = __ffs(event_mask);
...@@ -1268,10 +1366,14 @@ static void gsi_isr_ieob(struct gsi *gsi) ...@@ -1268,10 +1366,14 @@ static void gsi_isr_ieob(struct gsi *gsi)
static void gsi_isr_general(struct gsi *gsi) static void gsi_isr_general(struct gsi *gsi)
{ {
struct device *dev = gsi->dev; struct device *dev = gsi->dev;
const struct reg *reg;
u32 val; u32 val;
val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); reg = gsi_reg(gsi, CNTXT_GSI_IRQ_STTS);
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); val = ioread32(gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_GSI_IRQ_CLR);
iowrite32(val, gsi->virt + reg_offset(reg));
dev_err(dev, "unexpected general interrupt 0x%08x\n", val); dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
} }
...@@ -1287,17 +1389,25 @@ static void gsi_isr_general(struct gsi *gsi) ...@@ -1287,17 +1389,25 @@ static void gsi_isr_general(struct gsi *gsi)
static irqreturn_t gsi_isr(int irq, void *dev_id) static irqreturn_t gsi_isr(int irq, void *dev_id)
{ {
struct gsi *gsi = dev_id; struct gsi *gsi = dev_id;
const struct reg *reg;
u32 intr_mask; u32 intr_mask;
u32 cnt = 0; u32 cnt = 0;
u32 offset;
reg = gsi_reg(gsi, CNTXT_TYPE_IRQ);
offset = reg_offset(reg);
/* enum gsi_irq_type_id defines GSI interrupt types */ /* enum gsi_irq_type_id defines GSI interrupt types */
while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { while ((intr_mask = ioread32(gsi->virt + offset))) {
/* intr_mask contains bitmask of pending GSI interrupts */ /* intr_mask contains bitmask of pending GSI interrupts */
do { do {
u32 gsi_intr = BIT(__ffs(intr_mask)); u32 gsi_intr = BIT(__ffs(intr_mask));
intr_mask ^= gsi_intr; intr_mask ^= gsi_intr;
/* Note: the IRQ condition for each type is cleared
* when the type-specific register is updated.
*/
switch (gsi_intr) { switch (gsi_intr) {
case GSI_CH_CTRL: case GSI_CH_CTRL:
gsi_isr_chan_ctrl(gsi); gsi_isr_chan_ctrl(gsi);
...@@ -1502,11 +1612,13 @@ void gsi_channel_doorbell(struct gsi_channel *channel) ...@@ -1502,11 +1612,13 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
struct gsi_ring *tre_ring = &channel->tre_ring; struct gsi_ring *tre_ring = &channel->tre_ring;
u32 channel_id = gsi_channel_id(channel); u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi; struct gsi *gsi = channel->gsi;
const struct reg *reg;
u32 val; u32 val;
reg = gsi_reg(gsi, CH_C_DOORBELL_0);
/* Note: index *must* be used modulo the ring count here */ /* Note: index *must* be used modulo the ring count here */
val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count); val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id)); iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
} }
/* Consult hardware, move newly completed transactions to completed state */ /* Consult hardware, move newly completed transactions to completed state */
...@@ -1517,6 +1629,7 @@ void gsi_channel_update(struct gsi_channel *channel) ...@@ -1517,6 +1629,7 @@ void gsi_channel_update(struct gsi_channel *channel)
struct gsi_evt_ring *evt_ring; struct gsi_evt_ring *evt_ring;
struct gsi_trans *trans; struct gsi_trans *trans;
struct gsi_ring *ring; struct gsi_ring *ring;
const struct reg *reg;
u32 offset; u32 offset;
u32 index; u32 index;
...@@ -1526,7 +1639,8 @@ void gsi_channel_update(struct gsi_channel *channel) ...@@ -1526,7 +1639,8 @@ void gsi_channel_update(struct gsi_channel *channel)
/* See if there's anything new to process; if not, we're done. Note /* See if there's anything new to process; if not, we're done. Note
* that index always refers to an entry *within* the event ring. * that index always refers to an entry *within* the event ring.
*/ */
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id); reg = gsi_reg(gsi, EV_CH_E_CNTXT_4);
offset = reg_n_offset(reg, evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset)); index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count) if (index == ring->index % ring->count)
return; return;
...@@ -1677,7 +1791,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, ...@@ -1677,7 +1791,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode, enum gsi_generic_cmd_opcode opcode,
u8 params) u8 params)
{ {
const struct reg *reg;
bool timeout; bool timeout;
u32 offset;
u32 val; u32 val;
/* The error global interrupt type is always enabled (until we tear /* The error global interrupt type is always enabled (until we tear
...@@ -1689,25 +1805,31 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, ...@@ -1689,25 +1805,31 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
* channel), and only from this function. So we enable the GP_INT1 * channel), and only from this function. So we enable the GP_INT1
* IRQ type here, and disable it again after the command completes. * IRQ type here, and disable it again after the command completes.
*/ */
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
val = ERROR_INT | GP_INT1; val = ERROR_INT | GP_INT1;
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); iowrite32(val, gsi->virt + reg_offset(reg));
/* First zero the result code field */ /* First zero the result code field */
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
offset = reg_offset(reg);
val = ioread32(gsi->virt + offset);
val &= ~GENERIC_EE_RESULT_FMASK; val &= ~GENERIC_EE_RESULT_FMASK;
iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); iowrite32(val, gsi->virt + offset);
/* Now issue the command */ /* Now issue the command */
reg = gsi_reg(gsi, GENERIC_CMD);
val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK); val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
if (gsi->version >= IPA_VERSION_4_11) if (gsi->version >= IPA_VERSION_4_11)
val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK); val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val); timeout = !gsi_command(gsi, reg_offset(reg), val);
/* Disable the GP_INT1 IRQ type again */ /* Disable the GP_INT1 IRQ type again */
iowrite32(ERROR_INT, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
if (!timeout) if (!timeout)
return gsi->result; return gsi->result;
...@@ -1864,32 +1986,41 @@ static void gsi_channel_teardown(struct gsi *gsi) ...@@ -1864,32 +1986,41 @@ static void gsi_channel_teardown(struct gsi *gsi)
/* Turn off all GSI interrupts initially */ /* Turn off all GSI interrupts initially */
static int gsi_irq_setup(struct gsi *gsi) static int gsi_irq_setup(struct gsi *gsi)
{ {
const struct reg *reg;
int ret; int ret;
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */ /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); reg = gsi_reg(gsi, CNTXT_INTSET);
iowrite32(1, gsi->virt + reg_offset(reg));
/* Disable all interrupt types */ /* Disable all interrupt types */
gsi_irq_type_update(gsi, 0); gsi_irq_type_update(gsi, 0);
/* Clear all type-specific interrupt masks */ /* Clear all type-specific interrupt masks */
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET); reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET); iowrite32(0, gsi->virt + reg_offset(reg));
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
iowrite32(0, gsi->virt + reg_offset(reg));
reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
iowrite32(0, gsi->virt + reg_offset(reg));
/* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */ /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
if (gsi->version > IPA_VERSION_3_1) { if (gsi->version > IPA_VERSION_3_1) {
u32 offset;
/* These registers are in the non-adjusted address range */ /* These registers are in the non-adjusted address range */
offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET; reg = gsi_reg(gsi, INTER_EE_SRC_CH_IRQ_MSK);
iowrite32(0, gsi->virt_raw + offset); iowrite32(0, gsi->virt_raw + reg_offset(reg));
offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
iowrite32(0, gsi->virt_raw + offset); reg = gsi_reg(gsi, INTER_EE_SRC_EV_CH_IRQ_MSK);
iowrite32(0, gsi->virt_raw + reg_offset(reg));
} }
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
iowrite32(0, gsi->virt + reg_offset(reg));
ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi); ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
if (ret) if (ret)
...@@ -1907,6 +2038,7 @@ static void gsi_irq_teardown(struct gsi *gsi) ...@@ -1907,6 +2038,7 @@ static void gsi_irq_teardown(struct gsi *gsi)
static int gsi_ring_setup(struct gsi *gsi) static int gsi_ring_setup(struct gsi *gsi)
{ {
struct device *dev = gsi->dev; struct device *dev = gsi->dev;
const struct reg *reg;
u32 count; u32 count;
u32 val; u32 val;
...@@ -1918,7 +2050,8 @@ static int gsi_ring_setup(struct gsi *gsi) ...@@ -1918,7 +2050,8 @@ static int gsi_ring_setup(struct gsi *gsi)
return 0; return 0;
} }
val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); reg = gsi_reg(gsi, HW_PARAM_2);
val = ioread32(gsi->virt + reg_offset(reg));
count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
if (!count) { if (!count) {
...@@ -1951,11 +2084,13 @@ static int gsi_ring_setup(struct gsi *gsi) ...@@ -1951,11 +2084,13 @@ static int gsi_ring_setup(struct gsi *gsi)
/* Setup function for GSI. GSI firmware must be loaded and initialized */ /* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi) int gsi_setup(struct gsi *gsi)
{ {
const struct reg *reg;
u32 val; u32 val;
int ret; int ret;
/* Here is where we first touch the GSI hardware */ /* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); reg = gsi_reg(gsi, GSI_STATUS);
val = ioread32(gsi->virt + reg_offset(reg));
if (!(val & ENABLED_FMASK)) { if (!(val & ENABLED_FMASK)) {
dev_err(gsi->dev, "GSI has not been enabled\n"); dev_err(gsi->dev, "GSI has not been enabled\n");
return -EIO; return -EIO;
...@@ -1970,7 +2105,8 @@ int gsi_setup(struct gsi *gsi) ...@@ -1970,7 +2105,8 @@ int gsi_setup(struct gsi *gsi)
goto err_irq_teardown; goto err_irq_teardown;
/* Initialize the error log */ /* Initialize the error log */
iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET); reg = gsi_reg(gsi, ERROR_LOG);
iowrite32(0, gsi->virt + reg_offset(reg));
ret = gsi_channel_setup(gsi); ret = gsi_channel_setup(gsi);
if (ret) if (ret)
...@@ -2241,67 +2377,37 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, ...@@ -2241,67 +2377,37 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
enum ipa_version version, u32 count, enum ipa_version version, u32 count,
const struct ipa_gsi_endpoint_data *data) const struct ipa_gsi_endpoint_data *data)
{ {
struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
u32 adjust;
int ret; int ret;
gsi_validate_build(); gsi_validate_build();
gsi->dev = dev; gsi->dev = &pdev->dev;
gsi->version = version; gsi->version = version;
/* GSI uses NAPI on all channels. Create a dummy network device /* GSI uses NAPI on all channels. Create a dummy network device
* for the channel NAPI contexts to be associated with. * for the channel NAPI contexts to be associated with.
*/ */
init_dummy_netdev(&gsi->dummy_dev); init_dummy_netdev(&gsi->dummy_dev);
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n");
return -ENODEV;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
return -EINVAL;
}
/* Make sure we can make our pointer adjustment if necessary */
adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
if (res->start < adjust) {
dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
adjust);
return -EINVAL;
}
gsi->virt_raw = ioremap(res->start, size);
if (!gsi->virt_raw) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
return -ENOMEM;
}
/* Most registers are accessed using an adjusted register range */
gsi->virt = gsi->virt_raw - adjust;
init_completion(&gsi->completion); init_completion(&gsi->completion);
ret = gsi_reg_init(gsi, pdev);
if (ret)
return ret;
ret = gsi_irq_init(gsi, pdev); /* No matching exit required */ ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
if (ret) if (ret)
goto err_iounmap; goto err_reg_exit;
ret = gsi_channel_init(gsi, count, data); ret = gsi_channel_init(gsi, count, data);
if (ret) if (ret)
goto err_iounmap; goto err_reg_exit;
mutex_init(&gsi->mutex); mutex_init(&gsi->mutex);
return 0; return 0;
err_iounmap: err_reg_exit:
iounmap(gsi->virt_raw); gsi_reg_exit(gsi);
return ret; return ret;
} }
...@@ -2311,7 +2417,7 @@ void gsi_exit(struct gsi *gsi) ...@@ -2311,7 +2417,7 @@ void gsi_exit(struct gsi *gsi)
{ {
mutex_destroy(&gsi->mutex); mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi); gsi_channel_exit(gsi);
iounmap(gsi->virt_raw); gsi_reg_exit(gsi);
} }
/* The maximum number of outstanding TREs on a channel. This limits /* The maximum number of outstanding TREs on a channel. This limits
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd. * Copyright (C) 2018-2023 Linaro Ltd.
*/ */
#ifndef _GSI_H_ #ifndef _GSI_H_
#define _GSI_H_ #define _GSI_H_
...@@ -142,6 +142,8 @@ struct gsi { ...@@ -142,6 +142,8 @@ struct gsi {
enum ipa_version version; enum ipa_version version;
void __iomem *virt_raw; /* I/O mapped address range */ void __iomem *virt_raw; /* I/O mapped address range */
void __iomem *virt; /* Adjusted for most registers */ void __iomem *virt; /* Adjusted for most registers */
const struct regs *regs;
u32 irq; u32 irq;
u32 channel_count; u32 channel_count;
u32 evt_ring_count; u32 evt_ring_count;
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/platform_device.h>
#include <linux/io.h>
#include "gsi.h"
#include "reg.h"
#include "gsi_reg.h"
/* GSI EE registers as a group are shifted downward by a fixed constant amount
* for IPA versions 4.5 and beyond. This applies to all GSI registers we use
* *except* the ones that disable inter-EE interrupts for channels and event
* channels.
*
* The "raw" (not adjusted) GSI register range is mapped, and a pointer to
* the mapped range is held in gsi->virt_raw. The inter-EE interrupt
* registers are accessed using that pointer.
*
* Most registers are accessed using gsi->virt, which is a copy of the "raw"
* pointer, adjusted downward by the fixed amount.
*/
#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
/* Is this register ID valid for the current GSI version? */
static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
{
switch (reg_id) {
case INTER_EE_SRC_CH_IRQ_MSK:
case INTER_EE_SRC_EV_CH_IRQ_MSK:
case CH_C_CNTXT_0:
case CH_C_CNTXT_1:
case CH_C_CNTXT_2:
case CH_C_CNTXT_3:
case CH_C_QOS:
case CH_C_SCRATCH_0:
case CH_C_SCRATCH_1:
case CH_C_SCRATCH_2:
case CH_C_SCRATCH_3:
case EV_CH_E_CNTXT_0:
case EV_CH_E_CNTXT_1:
case EV_CH_E_CNTXT_2:
case EV_CH_E_CNTXT_3:
case EV_CH_E_CNTXT_4:
case EV_CH_E_CNTXT_8:
case EV_CH_E_CNTXT_9:
case EV_CH_E_CNTXT_10:
case EV_CH_E_CNTXT_11:
case EV_CH_E_CNTXT_12:
case EV_CH_E_CNTXT_13:
case EV_CH_E_SCRATCH_0:
case EV_CH_E_SCRATCH_1:
case CH_C_DOORBELL_0:
case EV_CH_E_DOORBELL_0:
case GSI_STATUS:
case CH_CMD:
case EV_CH_CMD:
case GENERIC_CMD:
case HW_PARAM_2:
case CNTXT_TYPE_IRQ:
case CNTXT_TYPE_IRQ_MSK:
case CNTXT_SRC_CH_IRQ:
case CNTXT_SRC_CH_IRQ_MSK:
case CNTXT_SRC_CH_IRQ_CLR:
case CNTXT_SRC_EV_CH_IRQ:
case CNTXT_SRC_EV_CH_IRQ_MSK:
case CNTXT_SRC_EV_CH_IRQ_CLR:
case CNTXT_SRC_IEOB_IRQ:
case CNTXT_SRC_IEOB_IRQ_MSK:
case CNTXT_SRC_IEOB_IRQ_CLR:
case CNTXT_GLOB_IRQ_STTS:
case CNTXT_GLOB_IRQ_EN:
case CNTXT_GLOB_IRQ_CLR:
case CNTXT_GSI_IRQ_STTS:
case CNTXT_GSI_IRQ_EN:
case CNTXT_GSI_IRQ_CLR:
case CNTXT_INTSET:
case ERROR_LOG:
case ERROR_LOG_CLR:
case CNTXT_SCRATCH_0:
return true;
default:
return false;
}
}
const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id)
{
if (WARN(!gsi_reg_id_valid(gsi, reg_id), "invalid reg %u\n", reg_id))
return NULL;
return reg(gsi->regs, reg_id);
}
static const struct regs *gsi_regs(struct gsi *gsi)
{
switch (gsi->version) {
case IPA_VERSION_3_1:
return &gsi_regs_v3_1;
case IPA_VERSION_3_5_1:
case IPA_VERSION_4_2:
case IPA_VERSION_4_5:
case IPA_VERSION_4_7:
case IPA_VERSION_4_9:
case IPA_VERSION_4_11:
return &gsi_regs_v3_5_1;
default:
return NULL;
}
}
/* Sets gsi->virt_raw and gsi->virt, and I/O maps the "gsi" memory range */
int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
resource_size_t size;
u32 adjust;
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n");
return -ENODEV;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
return -EINVAL;
}
/* Make sure we can make our pointer adjustment if necessary */
adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
if (res->start < adjust) {
dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
adjust);
return -EINVAL;
}
gsi->regs = gsi_regs(gsi);
if (!gsi->regs) {
dev_err(dev, "unsupported IPA version %u (?)\n", gsi->version);
return -EINVAL;
}
gsi->virt_raw = ioremap(res->start, size);
if (!gsi->virt_raw) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
return -ENOMEM;
}
/* Most registers are accessed using an adjusted register range */
gsi->virt = gsi->virt_raw - adjust;
return 0;
}
/* Inverse of gsi_reg_init() */
void gsi_reg_exit(struct gsi *gsi)
{
gsi->virt = NULL;
iounmap(gsi->virt_raw);
gsi->virt_raw = NULL;
}
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2018-2022 Linaro Ltd. * Copyright (C) 2018-2023 Linaro Ltd.
*/ */
#ifndef _GSI_REG_H_ #ifndef _GSI_REG_H_
#define _GSI_REG_H_ #define _GSI_REG_H_
/* === Only "gsi.c" should include this file === */ /* === Only "gsi.c" and "gsi_reg.c" should include this file === */
#include <linux/bits.h> #include <linux/bits.h>
...@@ -38,32 +38,64 @@ ...@@ -38,32 +38,64 @@
* (though the actual limit is hardware-dependent). * (though the actual limit is hardware-dependent).
*/ */
/* GSI EE registers as a group are shifted downward by a fixed constant amount /* enum gsi_reg_id - GSI register IDs */
* for IPA versions 4.5 and beyond. This applies to all GSI registers we use enum gsi_reg_id {
* *except* the ones that disable inter-EE interrupts for channels and event INTER_EE_SRC_CH_IRQ_MSK, /* IPA v3.5+ */
* channels. INTER_EE_SRC_EV_CH_IRQ_MSK, /* IPA v3.5+ */
* CH_C_CNTXT_0,
* The "raw" (not adjusted) GSI register range is mapped, and a pointer to CH_C_CNTXT_1,
* the mapped range is held in gsi->virt_raw. The inter-EE interrupt CH_C_CNTXT_2,
* registers are accessed using that pointer. CH_C_CNTXT_3,
* CH_C_QOS,
* Most registers are accessed using gsi->virt, which is a copy of the "raw" CH_C_SCRATCH_0,
* pointer, adjusted downward by the fixed amount. CH_C_SCRATCH_1,
*/ CH_C_SCRATCH_2,
#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */ CH_C_SCRATCH_3,
EV_CH_E_CNTXT_0,
/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */ EV_CH_E_CNTXT_1,
EV_CH_E_CNTXT_2,
#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \ EV_CH_E_CNTXT_3,
(0x0000c020 + 0x1000 * GSI_EE_AP) EV_CH_E_CNTXT_4,
EV_CH_E_CNTXT_8,
#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \ EV_CH_E_CNTXT_9,
(0x0000c024 + 0x1000 * GSI_EE_AP) EV_CH_E_CNTXT_10,
EV_CH_E_CNTXT_11,
/* All other register offsets are relative to gsi->virt */ EV_CH_E_CNTXT_12,
EV_CH_E_CNTXT_13,
EV_CH_E_SCRATCH_0,
EV_CH_E_SCRATCH_1,
CH_C_DOORBELL_0,
EV_CH_E_DOORBELL_0,
GSI_STATUS,
CH_CMD,
EV_CH_CMD,
GENERIC_CMD,
HW_PARAM_2, /* IPA v3.5.1+ */
CNTXT_TYPE_IRQ,
CNTXT_TYPE_IRQ_MSK,
CNTXT_SRC_CH_IRQ,
CNTXT_SRC_CH_IRQ_MSK,
CNTXT_SRC_CH_IRQ_CLR,
CNTXT_SRC_EV_CH_IRQ,
CNTXT_SRC_EV_CH_IRQ_MSK,
CNTXT_SRC_EV_CH_IRQ_CLR,
CNTXT_SRC_IEOB_IRQ,
CNTXT_SRC_IEOB_IRQ_MSK,
CNTXT_SRC_IEOB_IRQ_CLR,
CNTXT_GLOB_IRQ_STTS,
CNTXT_GLOB_IRQ_EN,
CNTXT_GLOB_IRQ_CLR,
CNTXT_GSI_IRQ_STTS,
CNTXT_GSI_IRQ_EN,
CNTXT_GSI_IRQ_CLR,
CNTXT_INTSET,
ERROR_LOG,
ERROR_LOG_CLR,
CNTXT_SCRATCH_0,
GSI_REG_ID_COUNT, /* Last; not an ID */
};
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \ /* CH_C_CNTXT_0 register */
(0x0001c000 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0) #define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
#define CHTYPE_DIR_FMASK GENMASK(3, 3) #define CHTYPE_DIR_FMASK GENMASK(3, 3)
#define EE_FMASK GENMASK(7, 4) #define EE_FMASK GENMASK(7, 4)
...@@ -88,17 +120,7 @@ enum gsi_channel_type { ...@@ -88,17 +120,7 @@ enum gsi_channel_type {
GSI_CHANNEL_TYPE_11AD = 0x9, GSI_CHANNEL_TYPE_11AD = 0x9,
}; };
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \ /* CH_C_QOS register */
(0x0001c004 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
(0x0001c008 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
(0x0001c00c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_QOS_OFFSET(ch) \
(0x0001c05c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define WRR_WEIGHT_FMASK GENMASK(3, 0) #define WRR_WEIGHT_FMASK GENMASK(3, 0)
#define MAX_PREFETCH_FMASK GENMASK(8, 8) #define MAX_PREFETCH_FMASK GENMASK(8, 8)
#define USE_DB_ENG_FMASK GENMASK(9, 9) #define USE_DB_ENG_FMASK GENMASK(9, 9)
...@@ -118,20 +140,7 @@ enum gsi_prefetch_mode { ...@@ -118,20 +140,7 @@ enum gsi_prefetch_mode {
GSI_FREE_PREFETCH = 0x3, GSI_FREE_PREFETCH = 0x3,
}; };
#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \ /* EV_CH_E_CNTXT_0 register */
(0x0001c060 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
(0x0001c064 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
(0x0001c068 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
(0x0001c06c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
(0x0001d000 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */ /* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define EV_CHTYPE_FMASK GENMASK(3, 0) #define EV_CHTYPE_FMASK GENMASK(3, 0)
#define EV_EE_FMASK GENMASK(7, 4) #define EV_EE_FMASK GENMASK(7, 4)
...@@ -140,57 +149,15 @@ enum gsi_prefetch_mode { ...@@ -140,57 +149,15 @@ enum gsi_prefetch_mode {
#define EV_CHSTATE_FMASK GENMASK(23, 20) #define EV_CHSTATE_FMASK GENMASK(23, 20)
#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24) #define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \ /* EV_CH_E_CNTXT_8 register */
(0x0001d004 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
(0x0001d008 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
(0x0001d00c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
(0x0001d010 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
(0x0001d020 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define MODT_FMASK GENMASK(15, 0) #define MODT_FMASK GENMASK(15, 0)
#define MODC_FMASK GENMASK(23, 16) #define MODC_FMASK GENMASK(23, 16)
#define MOD_CNT_FMASK GENMASK(31, 24) #define MOD_CNT_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \ /* GSI_STATUS register */
(0x0001d024 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
(0x0001d028 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
(0x0001d02c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
(0x0001d030 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
(0x0001d034 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
(0x0001d048 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
(0x0001d04c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
(0x0001e000 + 0x4000 * GSI_EE_AP + 0x08 * (ch))
#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
(0x0001e100 + 0x4000 * GSI_EE_AP + 0x08 * (ev))
#define GSI_GSI_STATUS_OFFSET \
(0x0001f000 + 0x4000 * GSI_EE_AP)
#define ENABLED_FMASK GENMASK(0, 0) #define ENABLED_FMASK GENMASK(0, 0)
#define GSI_CH_CMD_OFFSET \ /* CH_CMD register */
(0x0001f008 + 0x4000 * GSI_EE_AP)
#define CH_CHID_FMASK GENMASK(7, 0) #define CH_CHID_FMASK GENMASK(7, 0)
#define CH_OPCODE_FMASK GENMASK(31, 24) #define CH_OPCODE_FMASK GENMASK(31, 24)
...@@ -204,8 +171,7 @@ enum gsi_ch_cmd_opcode { ...@@ -204,8 +171,7 @@ enum gsi_ch_cmd_opcode {
GSI_CH_DB_STOP = 0xb, GSI_CH_DB_STOP = 0xb,
}; };
#define GSI_EV_CH_CMD_OFFSET \ /* EV_CH_CMD register */
(0x0001f010 + 0x4000 * GSI_EE_AP)
#define EV_CHID_FMASK GENMASK(7, 0) #define EV_CHID_FMASK GENMASK(7, 0)
#define EV_OPCODE_FMASK GENMASK(31, 24) #define EV_OPCODE_FMASK GENMASK(31, 24)
...@@ -216,8 +182,7 @@ enum gsi_evt_cmd_opcode { ...@@ -216,8 +182,7 @@ enum gsi_evt_cmd_opcode {
GSI_EVT_DE_ALLOC = 0xa, GSI_EVT_DE_ALLOC = 0xa,
}; };
#define GSI_GENERIC_CMD_OFFSET \ /* GENERIC_CMD register */
(0x0001f018 + 0x4000 * GSI_EE_AP)
#define GENERIC_OPCODE_FMASK GENMASK(4, 0) #define GENERIC_OPCODE_FMASK GENMASK(4, 0)
#define GENERIC_CHID_FMASK GENMASK(9, 5) #define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10) #define GENERIC_EE_FMASK GENMASK(13, 10)
...@@ -232,9 +197,7 @@ enum gsi_generic_cmd_opcode { ...@@ -232,9 +197,7 @@ enum gsi_generic_cmd_opcode {
GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */ GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */
}; };
/* The next register is present for IPA v3.5.1 and above */ /* HW_PARAM_2 register */ /* IPA v3.5.1+ */
#define GSI_GSI_HW_PARAM_2_OFFSET \
(0x0001f040 + 0x4000 * GSI_EE_AP)
#define IRAM_SIZE_FMASK GENMASK(2, 0) #define IRAM_SIZE_FMASK GENMASK(2, 0)
#define NUM_CH_PER_EE_FMASK GENMASK(7, 3) #define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
#define NUM_EV_PER_EE_FMASK GENMASK(12, 8) #define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
...@@ -261,12 +224,6 @@ enum gsi_iram_size { ...@@ -261,12 +224,6 @@ enum gsi_iram_size {
IRAM_SIZE_FOUR_KB = 0x5, IRAM_SIZE_FOUR_KB = 0x5,
}; };
/* IRQ condition for each type is cleared by writing type-specific register */
#define GSI_CNTXT_TYPE_IRQ_OFFSET \
(0x0001f080 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
(0x0001f088 + 0x4000 * GSI_EE_AP)
/** /**
* enum gsi_irq_type_id: GSI IRQ types * enum gsi_irq_type_id: GSI IRQ types
* @GSI_CH_CTRL: Channel allocation, deallocation, etc. * @GSI_CH_CTRL: Channel allocation, deallocation, etc.
...@@ -288,40 +245,6 @@ enum gsi_irq_type_id { ...@@ -288,40 +245,6 @@ enum gsi_irq_type_id {
/* IRQ types 7-31 (and their bit values) are reserved */ /* IRQ types 7-31 (and their bit values) are reserved */
}; };
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
(0x0001f090 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
(0x0001f094 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
(0x0001f098 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
(0x0001f09c + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
(0x0001f0a0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
(0x0001f0a4 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
(0x0001f0b0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
(0x0001f0b8 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
(0x0001f0c0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
(0x0001f100 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
(0x0001f108 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
(0x0001f110 + 0x4000 * GSI_EE_AP)
/** enum gsi_global_irq_id: Global GSI interrupt events */ /** enum gsi_global_irq_id: Global GSI interrupt events */
enum gsi_global_irq_id { enum gsi_global_irq_id {
ERROR_INT = BIT(0), ERROR_INT = BIT(0),
...@@ -331,13 +254,6 @@ enum gsi_global_irq_id { ...@@ -331,13 +254,6 @@ enum gsi_global_irq_id {
/* Global IRQ types 4-31 (and their bit values) are reserved */ /* Global IRQ types 4-31 (and their bit values) are reserved */
}; };
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
(0x0001f118 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
(0x0001f120 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
(0x0001f128 + 0x4000 * GSI_EE_AP)
/** enum gsi_general_irq_id: GSI general IRQ conditions */ /** enum gsi_general_irq_id: GSI general IRQ conditions */
enum gsi_general_irq_id { enum gsi_general_irq_id {
BREAK_POINT = BIT(0), BREAK_POINT = BIT(0),
...@@ -347,13 +263,10 @@ enum gsi_general_irq_id { ...@@ -347,13 +263,10 @@ enum gsi_general_irq_id {
/* General IRQ types 4-31 (and their bit values) are reserved */ /* General IRQ types 4-31 (and their bit values) are reserved */
}; };
#define GSI_CNTXT_INTSET_OFFSET \ /* CNTXT_INTSET register */
(0x0001f180 + 0x4000 * GSI_EE_AP)
#define INTYPE_FMASK GENMASK(0, 0) #define INTYPE_FMASK GENMASK(0, 0)
#define GSI_ERROR_LOG_OFFSET \ /* ERROR_LOG register */
(0x0001f200 + 0x4000 * GSI_EE_AP)
#define ERR_ARG3_FMASK GENMASK(3, 0) #define ERR_ARG3_FMASK GENMASK(3, 0)
#define ERR_ARG2_FMASK GENMASK(7, 4) #define ERR_ARG2_FMASK GENMASK(7, 4)
#define ERR_ARG1_FMASK GENMASK(11, 8) #define ERR_ARG1_FMASK GENMASK(11, 8)
...@@ -381,11 +294,7 @@ enum gsi_err_type { ...@@ -381,11 +294,7 @@ enum gsi_err_type {
GSI_ERR_TYPE_EVT = 0x3, GSI_ERR_TYPE_EVT = 0x3,
}; };
#define GSI_ERROR_LOG_CLR_OFFSET \ /* CNTXT_SCRATCH_0 register */
(0x0001f210 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SCRATCH_0_OFFSET \
(0x0001f400 + 0x4000 * GSI_EE_AP)
#define INTER_EE_RESULT_FMASK GENMASK(2, 0) #define INTER_EE_RESULT_FMASK GENMASK(2, 0)
#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5) #define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
...@@ -400,4 +309,31 @@ enum gsi_generic_ee_result { ...@@ -400,4 +309,31 @@ enum gsi_generic_ee_result {
GENERIC_EE_NO_RESOURCES = 0x7, GENERIC_EE_NO_RESOURCES = 0x7,
}; };
extern const struct regs gsi_regs_v3_1;
extern const struct regs gsi_regs_v3_5_1;
/**
* gsi_reg() - Return the structure describing a GSI register
* @gsi: GSI pointer
* @reg_id: GSI register ID
*/
const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id);
/**
* gsi_reg_init() - Perform GSI register initialization
* @gsi: GSI pointer
* @pdev: GSI (IPA) platform device
*
* Initialize GSI registers, including looking up and I/O mapping
* the "gsi" memory space. This function sets gsi->virt_raw and
* gsi->virt.
*/
int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev);
/**
* gsi_reg_exit() - Inverse of gsi_reg_init()
* @gsi: GSI pointer
*/
void gsi_reg_exit(struct gsi *gsi);
#endif /* _GSI_REG_H_ */ #endif /* _GSI_REG_H_ */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../gsi.h"
#include "../reg.h"
#include "../gsi_reg.h"
/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
0x0000c024 + 0x1000 * GSI_EE_AP);
/* All other register offsets are relative to gsi->virt */
REG_STRIDE(CH_C_CNTXT_0, ch_c_cntxt_0, 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_1, ch_c_cntxt_1, 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
REG(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
REG(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
REG(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
REG(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
REG(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
0x0001f098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
0x0001f09c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
0x0001f0a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
0x0001f0a4 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
0x0001f0b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
0x0001f0c0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
REG(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
[INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
[CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
[CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
[CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
[CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
[CH_C_QOS] = &reg_ch_c_qos,
[CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
[CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
[CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
[CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
[EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
[EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
[EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
[EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
[EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
[EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
[EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
[EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
[EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
[EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
[EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
[EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
[EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
[CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
[EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
[GSI_STATUS] = &reg_gsi_status,
[CH_CMD] = &reg_ch_cmd,
[EV_CH_CMD] = &reg_ev_ch_cmd,
[GENERIC_CMD] = &reg_generic_cmd,
[CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
[CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
[CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
[CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
[CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
[CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
[CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
[CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
[CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
[CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
[CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
[CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
[CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
[CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
[CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
[CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
[CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
[CNTXT_INTSET] = &reg_cntxt_intset,
[ERROR_LOG] = &reg_error_log,
[ERROR_LOG_CLR] = &reg_error_log_clr,
[CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
};
const struct regs gsi_regs_v3_1 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2023 Linaro Ltd. */
#include <linux/types.h>
#include "../gsi.h"
#include "../reg.h"
#include "../gsi_reg.h"
/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
0x0000c024 + 0x1000 * GSI_EE_AP);
/* All other register offsets are relative to gsi->virt */
REG_STRIDE(CH_C_CNTXT_0, ch_c_cntxt_0, 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_1, ch_c_cntxt_1, 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
REG(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
REG(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
REG(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
REG(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
REG(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
REG(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
0x0001f098 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
0x0001f09c + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
0x0001f0a0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
0x0001f0a4 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
0x0001f0b8 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
0x0001f0c0 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
REG(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
REG(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
static const struct reg *reg_array[] = {
[INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
[INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
[CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
[CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
[CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
[CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
[CH_C_QOS] = &reg_ch_c_qos,
[CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
[CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
[CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
[CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
[EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
[EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
[EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
[EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
[EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
[EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
[EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
[EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
[EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
[EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
[EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
[EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
[EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
[CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
[EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
[GSI_STATUS] = &reg_gsi_status,
[CH_CMD] = &reg_ch_cmd,
[EV_CH_CMD] = &reg_ev_ch_cmd,
[GENERIC_CMD] = &reg_generic_cmd,
[HW_PARAM_2] = &reg_hw_param_2,
[CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
[CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
[CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
[CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
[CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
[CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
[CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
[CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
[CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
[CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
[CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
[CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
[CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
[CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
[CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
[CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
[CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
[CNTXT_INTSET] = &reg_cntxt_intset,
[ERROR_LOG] = &reg_error_log,
[ERROR_LOG_CLR] = &reg_error_log_clr,
[CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
};
const struct regs gsi_regs_v3_5_1 = {
.reg_count = ARRAY_SIZE(reg_array),
.reg = reg_array,
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment