Commit 6ba6c3a4 authored by Yu Zhao's avatar Yu Zhao Committed by David Woodhouse

VT-d: add device IOTLB invalidation support

Support device IOTLB invalidation to flush the translation cached
in the Endpoint.
Signed-off-by: default avatarYu Zhao <yu.zhao@intel.com>
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent aa5d2b51
...@@ -699,7 +699,8 @@ void free_iommu(struct intel_iommu *iommu) ...@@ -699,7 +699,8 @@ void free_iommu(struct intel_iommu *iommu)
*/ */
static inline void reclaim_free_desc(struct q_inval *qi) static inline void reclaim_free_desc(struct q_inval *qi)
{ {
while (qi->desc_status[qi->free_tail] == QI_DONE) { while (qi->desc_status[qi->free_tail] == QI_DONE ||
qi->desc_status[qi->free_tail] == QI_ABORT) {
qi->desc_status[qi->free_tail] = QI_FREE; qi->desc_status[qi->free_tail] = QI_FREE;
qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
qi->free_cnt++; qi->free_cnt++;
...@@ -709,10 +710,13 @@ static inline void reclaim_free_desc(struct q_inval *qi) ...@@ -709,10 +710,13 @@ static inline void reclaim_free_desc(struct q_inval *qi)
static int qi_check_fault(struct intel_iommu *iommu, int index) static int qi_check_fault(struct intel_iommu *iommu, int index)
{ {
u32 fault; u32 fault;
int head; int head, tail;
struct q_inval *qi = iommu->qi; struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH; int wait_index = (index + 1) % QI_LENGTH;
if (qi->desc_status[wait_index] == QI_ABORT)
return -EAGAIN;
fault = readl(iommu->reg + DMAR_FSTS_REG); fault = readl(iommu->reg + DMAR_FSTS_REG);
/* /*
...@@ -722,7 +726,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index) ...@@ -722,7 +726,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
*/ */
if (fault & DMA_FSTS_IQE) { if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG); head = readl(iommu->reg + DMAR_IQH_REG);
if ((head >> 4) == index) { if ((head >> DMAR_IQ_SHIFT) == index) {
printk(KERN_ERR "VT-d detected invalid descriptor: "
"low=%llx, high=%llx\n",
(unsigned long long)qi->desc[index].low,
(unsigned long long)qi->desc[index].high);
memcpy(&qi->desc[index], &qi->desc[wait_index], memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc)); sizeof(struct qi_desc));
__iommu_flush_cache(iommu, &qi->desc[index], __iommu_flush_cache(iommu, &qi->desc[index],
...@@ -732,6 +740,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index) ...@@ -732,6 +740,32 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
} }
} }
/*
* If ITE happens, all pending wait_desc commands are aborted.
* No new descriptors are fetched until the ITE is cleared.
*/
if (fault & DMA_FSTS_ITE) {
head = readl(iommu->reg + DMAR_IQH_REG);
head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
head |= 1;
tail = readl(iommu->reg + DMAR_IQT_REG);
tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
do {
if (qi->desc_status[head] == QI_IN_USE)
qi->desc_status[head] = QI_ABORT;
head = (head - 2 + QI_LENGTH) % QI_LENGTH;
} while (head != tail);
if (qi->desc_status[wait_index] == QI_ABORT)
return -EAGAIN;
}
if (fault & DMA_FSTS_ICE)
writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
return 0; return 0;
} }
...@@ -741,7 +775,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index) ...@@ -741,7 +775,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
*/ */
int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{ {
int rc = 0; int rc;
struct q_inval *qi = iommu->qi; struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc; struct qi_desc *hw, wait_desc;
int wait_index, index; int wait_index, index;
...@@ -752,6 +786,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) ...@@ -752,6 +786,9 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
hw = qi->desc; hw = qi->desc;
restart:
rc = 0;
spin_lock_irqsave(&qi->q_lock, flags); spin_lock_irqsave(&qi->q_lock, flags);
while (qi->free_cnt < 3) { while (qi->free_cnt < 3) {
spin_unlock_irqrestore(&qi->q_lock, flags); spin_unlock_irqrestore(&qi->q_lock, flags);
...@@ -782,7 +819,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) ...@@ -782,7 +819,7 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
* update the HW tail register indicating the presence of * update the HW tail register indicating the presence of
* new descriptors. * new descriptors.
*/ */
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
while (qi->desc_status[wait_index] != QI_DONE) { while (qi->desc_status[wait_index] != QI_DONE) {
/* /*
...@@ -794,18 +831,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) ...@@ -794,18 +831,21 @@ int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
*/ */
rc = qi_check_fault(iommu, index); rc = qi_check_fault(iommu, index);
if (rc) if (rc)
goto out; break;
spin_unlock(&qi->q_lock); spin_unlock(&qi->q_lock);
cpu_relax(); cpu_relax();
spin_lock(&qi->q_lock); spin_lock(&qi->q_lock);
} }
out:
qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE; qi->desc_status[index] = QI_DONE;
reclaim_free_desc(qi); reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags); spin_unlock_irqrestore(&qi->q_lock, flags);
if (rc == -EAGAIN)
goto restart;
return rc; return rc;
} }
...@@ -857,6 +897,27 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, ...@@ -857,6 +897,27 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
u64 addr, unsigned mask)
{
struct qi_desc desc;
if (mask) {
BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
} else
desc.high = QI_DEV_IOTLB_ADDR(addr);
if (qdep >= QI_DEV_IOTLB_MAX_INVS)
qdep = 0;
desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
QI_DIOTLB_TYPE;
qi_submit_sync(&desc, iommu);
}
/* /*
* Disable Queued Invalidation interface. * Disable Queued Invalidation interface.
*/ */
......
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ #define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ #define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */ #define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
...@@ -198,6 +199,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) ...@@ -198,6 +199,8 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define DMA_FSTS_PPF ((u32)2) #define DMA_FSTS_PPF ((u32)2)
#define DMA_FSTS_PFO ((u32)1) #define DMA_FSTS_PFO ((u32)1)
#define DMA_FSTS_IQE (1 << 4) #define DMA_FSTS_IQE (1 << 4)
#define DMA_FSTS_ICE (1 << 5)
#define DMA_FSTS_ITE (1 << 6)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */ /* FRCD_REG, 32 bits access */
...@@ -226,7 +229,8 @@ do { \ ...@@ -226,7 +229,8 @@ do { \
enum { enum {
QI_FREE, QI_FREE,
QI_IN_USE, QI_IN_USE,
QI_DONE QI_DONE,
QI_ABORT
}; };
#define QI_CC_TYPE 0x1 #define QI_CC_TYPE 0x1
...@@ -255,6 +259,12 @@ enum { ...@@ -255,6 +259,12 @@ enum {
#define QI_CC_DID(did) (((u64)did) << 16) #define QI_CC_DID(did) (((u64)did) << 16)
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4)) #define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
struct qi_desc { struct qi_desc {
u64 low, high; u64 low, high;
}; };
...@@ -344,6 +354,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, ...@@ -344,6 +354,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type); u8 fm, u64 type);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type); unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
u64 addr, unsigned mask);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment