Commit c4af8e3f authored by Mario Limonciello's avatar Mario Limonciello Committed by Mika Westerberg

thunderbolt: Clear registers properly when auto clear isn't in use

When `QUIRK_AUTO_CLEAR_INT` isn't set, interrupt masking should be
cleared by writing to Interrupt Mask Clear (IMR) and interrupt
status should be cleared properly at shutdown/init.

This fixes an error where interrupts are left enabled during resume
from hibernation with `CONFIG_USB4=y`.

Fixes: 468c49f4 ("thunderbolt: Disable interrupt auto clear for rings")
Cc: stable@vger.kernel.org # v6.3
Reported-by: default avatarTakashi Iwai <tiwai@suse.de>
Link: https://bugzilla.kernel.org/show_bug.cgi?id=217343Signed-off-by: default avatarMario Limonciello <mario.limonciello@amd.com>
Signed-off-by: default avatarMika Westerberg <mika.westerberg@linux.intel.com>
parent ac9a7868
...@@ -54,6 +54,21 @@ static int ring_interrupt_index(const struct tb_ring *ring) ...@@ -54,6 +54,21 @@ static int ring_interrupt_index(const struct tb_ring *ring)
return bit; return bit;
} }
static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
{
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
return;
iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
}
static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
{
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
else
iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
}
/* /*
* ring_interrupt_active() - activate/deactivate interrupts for a single ring * ring_interrupt_active() - activate/deactivate interrupts for a single ring
* *
...@@ -61,8 +76,8 @@ static int ring_interrupt_index(const struct tb_ring *ring) ...@@ -61,8 +76,8 @@ static int ring_interrupt_index(const struct tb_ring *ring)
*/ */
static void ring_interrupt_active(struct tb_ring *ring, bool active) static void ring_interrupt_active(struct tb_ring *ring, bool active)
{ {
int reg = REG_RING_INTERRUPT_BASE + int index = ring_interrupt_index(ring) / 32 * 4;
ring_interrupt_index(ring) / 32 * 4; int reg = REG_RING_INTERRUPT_BASE + index;
int interrupt_bit = ring_interrupt_index(ring) & 31; int interrupt_bit = ring_interrupt_index(ring) & 31;
int mask = 1 << interrupt_bit; int mask = 1 << interrupt_bit;
u32 old, new; u32 old, new;
...@@ -123,7 +138,11 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) ...@@ -123,7 +138,11 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
"interrupt for %s %d is already %s\n", "interrupt for %s %d is already %s\n",
RING_TYPE(ring), ring->hop, RING_TYPE(ring), ring->hop,
active ? "enabled" : "disabled"); active ? "enabled" : "disabled");
if (active)
iowrite32(new, ring->nhi->iobase + reg); iowrite32(new, ring->nhi->iobase + reg);
else
nhi_mask_interrupt(ring->nhi, mask, index);
} }
/* /*
...@@ -136,11 +155,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi) ...@@ -136,11 +155,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi)
int i = 0; int i = 0;
/* disable interrupts */ /* disable interrupts */
for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); nhi_mask_interrupt(nhi, ~0, 4 * i);
/* clear interrupt status bits */ /* clear interrupt status bits */
for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); nhi_clear_interrupt(nhi, 4 * i);
} }
/* ring helper methods */ /* ring helper methods */
......
...@@ -93,6 +93,8 @@ struct ring_desc { ...@@ -93,6 +93,8 @@ struct ring_desc {
#define REG_RING_INTERRUPT_BASE 0x38200 #define REG_RING_INTERRUPT_BASE 0x38200
#define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32) #define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
#define REG_RING_INTERRUPT_MASK_CLEAR_BASE 0x38208
#define REG_INT_THROTTLING_RATE 0x38c00 #define REG_INT_THROTTLING_RATE 0x38c00
/* Interrupt Vector Allocation */ /* Interrupt Vector Allocation */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment