Commit 72e1dcc4 authored by Joerg Roedel's avatar Joerg Roedel

iommu/amd: Implement notifier for PPR faults

Add a notifer at which a module can attach to get informed
about incoming PPR faults.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 5abcdba4
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/ */
#include <linux/ratelimit.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pci-ats.h> #include <linux/pci-ats.h>
#include <linux/bitmap.h> #include <linux/bitmap.h>
...@@ -28,6 +29,8 @@ ...@@ -28,6 +29,8 @@
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/amd-iommu.h> #include <linux/amd-iommu.h>
#include <linux/notifier.h>
#include <linux/export.h>
#include <asm/msidef.h> #include <asm/msidef.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/iommu.h> #include <asm/iommu.h>
...@@ -59,6 +62,8 @@ static struct protection_domain *pt_domain; ...@@ -59,6 +62,8 @@ static struct protection_domain *pt_domain;
static struct iommu_ops amd_iommu_ops; static struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
/* /*
* general struct to manage commands send to an IOMMU * general struct to manage commands send to an IOMMU
*/ */
...@@ -488,12 +493,82 @@ static void iommu_poll_events(struct amd_iommu *iommu) ...@@ -488,12 +493,82 @@ static void iommu_poll_events(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
{
struct amd_iommu_fault fault;
volatile u64 *raw;
int i;
raw = (u64 *)(iommu->ppr_log + head);
/*
* Hardware bug: Interrupt may arrive before the entry is written to
* memory. If this happens we need to wait for the entry to arrive.
*/
for (i = 0; i < LOOP_TIMEOUT; ++i) {
if (PPR_REQ_TYPE(raw[0]) != 0)
break;
udelay(1);
}
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
return;
}
fault.address = raw[1];
fault.pasid = PPR_PASID(raw[0]);
fault.device_id = PPR_DEVID(raw[0]);
fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]);
/*
* To detect the hardware bug we need to clear the entry
* to back to zero.
*/
raw[0] = raw[1] = 0;
atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
}
static void iommu_poll_ppr_log(struct amd_iommu *iommu)
{
unsigned long flags;
u32 head, tail;
if (iommu->ppr_log == NULL)
return;
spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
while (head != tail) {
/* Handle PPR entry */
iommu_handle_ppr_entry(iommu, head);
/* Update and refresh ring-buffer state*/
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
}
/* enable ppr interrupts again */
writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
spin_unlock_irqrestore(&iommu->lock, flags);
}
irqreturn_t amd_iommu_int_thread(int irq, void *data) irqreturn_t amd_iommu_int_thread(int irq, void *data)
{ {
struct amd_iommu *iommu; struct amd_iommu *iommu;
for_each_iommu(iommu) for_each_iommu(iommu) {
iommu_poll_events(iommu); iommu_poll_events(iommu);
iommu_poll_ppr_log(iommu);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -2888,3 +2963,16 @@ int __init amd_iommu_init_passthrough(void) ...@@ -2888,3 +2963,16 @@ int __init amd_iommu_init_passthrough(void)
return 0; return 0;
} }
/* IOMMUv2 specific functions */
int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&ppr_notifier, nb);
}
EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&ppr_notifier, nb);
}
EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
...@@ -32,7 +32,10 @@ extern void amd_iommu_uninit_devices(void); ...@@ -32,7 +32,10 @@ extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void); extern void amd_iommu_init_notifier(void);
extern void amd_iommu_init_api(void); extern void amd_iommu_init_api(void);
/* IOMMUv2 specific functions */
extern bool amd_iommu_v2_supported(void); extern bool amd_iommu_v2_supported(void);
extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
#ifndef CONFIG_AMD_IOMMU_STATS #ifndef CONFIG_AMD_IOMMU_STATS
......
...@@ -94,7 +94,8 @@ ...@@ -94,7 +94,8 @@
#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) #define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
/* MMIO status bits */ /* MMIO status bits */
#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 #define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
/* event logging constants */ /* event logging constants */
#define EVENT_ENTRY_SIZE 0x10 #define EVENT_ENTRY_SIZE 0x10
...@@ -180,6 +181,16 @@ ...@@ -180,6 +181,16 @@
#define PPR_ENTRY_SIZE 16 #define PPR_ENTRY_SIZE 16
#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) #define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
#define PPR_DEVID(x) ((x) & 0xffffULL)
#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
#define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
#define PPR_REQ_FAULT 0x01
#define PAGE_MODE_NONE 0x00 #define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02 #define PAGE_MODE_2_LEVEL 0x02
...@@ -300,6 +311,27 @@ extern bool amd_iommu_iotlb_sup; ...@@ -300,6 +311,27 @@ extern bool amd_iommu_iotlb_sup;
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) #define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
/*
* This struct is used to pass information about
* incoming PPR faults around.
*/
struct amd_iommu_fault {
u64 address; /* IO virtual address of the fault*/
u32 pasid; /* Address space identifier */
u16 device_id; /* Originating PCI device id */
u16 tag; /* PPR tag */
u16 flags; /* Fault flags */
};
#define PPR_FAULT_EXEC (1 << 1)
#define PPR_FAULT_READ (1 << 2)
#define PPR_FAULT_WRITE (1 << 5)
#define PPR_FAULT_USER (1 << 6)
#define PPR_FAULT_RSVD (1 << 7)
#define PPR_FAULT_GN (1 << 8)
/* /*
* This structure contains generic data for IOMMU protection domains * This structure contains generic data for IOMMU protection domains
* independent of their use. * independent of their use.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment