Commit 0bbcdb43 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman

powerpc/powernv/npu: TCE Kill helpers cleanup

NPU PHB TCE Kill register is exactly the same as in the rest of POWER8
so let's reuse the existing code for NPU. The only bit missing is
a helper to reset the entire TCE cache so this moves such a helper
from NPU code and renames it.

Since pnv_npu_tce_invalidate() does really invalidate the entire cache,
this uses pnv_pci_ioda2_tce_invalidate_entire() directly for NPU.
This adds an explicit comment for workaround for invalidating NPU TCE
cache.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Reviewed-by: default avatarAlistair Popple <alistair@popple.id.au>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent bef9253f
...@@ -25,8 +25,6 @@ ...@@ -25,8 +25,6 @@
* Other types of TCE cache invalidation are not functional in the * Other types of TCE cache invalidation are not functional in the
* hardware. * hardware.
*/ */
#define TCE_KILL_INVAL_ALL PPC_BIT(0)
static struct pci_dev *get_pci_dev(struct device_node *dn) static struct pci_dev *get_pci_dev(struct device_node *dn)
{ {
return PCI_DN(dn)->pcidev; return PCI_DN(dn)->pcidev;
...@@ -161,45 +159,6 @@ static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe, ...@@ -161,45 +159,6 @@ static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
return pe; return pe;
} }
void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe)
{
struct pnv_phb *phb = npe->phb;
if (WARN_ON(phb->type != PNV_PHB_NPU ||
!phb->ioda.tce_inval_reg ||
!(npe->flags & PNV_IODA_PE_DEV)))
return;
mb(); /* Ensure previous TCE table stores are visible */
__raw_writeq(cpu_to_be64(TCE_KILL_INVAL_ALL),
phb->ioda.tce_inval_reg);
}
void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe,
struct iommu_table *tbl,
unsigned long index,
unsigned long npages,
bool rm)
{
struct pnv_phb *phb = npe->phb;
/* We can only invalidate the whole cache on NPU */
unsigned long val = TCE_KILL_INVAL_ALL;
if (WARN_ON(phb->type != PNV_PHB_NPU ||
!phb->ioda.tce_inval_reg ||
!(npe->flags & PNV_IODA_PE_DEV)))
return;
mb(); /* Ensure previous TCE table stores are visible */
if (rm)
__raw_rm_writeq(cpu_to_be64(val),
(__be64 __iomem *) phb->ioda.tce_inval_reg_phys);
else
__raw_writeq(cpu_to_be64(val),
phb->ioda.tce_inval_reg);
}
void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe) void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe)
{ {
struct pnv_ioda_pe *gpe; struct pnv_ioda_pe *gpe;
......
...@@ -1840,9 +1840,23 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = { ...@@ -1840,9 +1840,23 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
.get = pnv_tce_get, .get = pnv_tce_get,
}; };
#define TCE_KILL_INVAL_ALL PPC_BIT(0)
#define TCE_KILL_INVAL_PE PPC_BIT(1) #define TCE_KILL_INVAL_PE PPC_BIT(1)
#define TCE_KILL_INVAL_TCE PPC_BIT(2) #define TCE_KILL_INVAL_TCE PPC_BIT(2)
void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
{
const unsigned long val = TCE_KILL_INVAL_ALL;
mb(); /* Ensure previous TCE table stores are visible */
if (rm)
__raw_rm_writeq(cpu_to_be64(val),
(__be64 __iomem *)
phb->ioda.tce_inval_reg_phys);
else
__raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
}
static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
{ {
/* 01xb - invalidate TCEs that match the specified PE# */ /* 01xb - invalidate TCEs that match the specified PE# */
...@@ -1863,7 +1877,7 @@ static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe) ...@@ -1863,7 +1877,7 @@ static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
if (!npe || npe->phb->type != PNV_PHB_NPU) if (!npe || npe->phb->type != PNV_PHB_NPU)
continue; continue;
pnv_npu_tce_invalidate_entire(npe); pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
} }
} }
...@@ -1912,14 +1926,19 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, ...@@ -1912,14 +1926,19 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
index, npages); index, npages);
if (pe->flags & PNV_IODA_PE_PEER) if (pe->flags & PNV_IODA_PE_PEER)
/* Invalidate PEs using the same TCE table */ /*
* The NVLink hardware does not support TCE kill
* per TCE entry so we have to invalidate
* the entire cache for it.
*/
for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) { for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
npe = pe->peers[i]; npe = pe->peers[i];
if (!npe || npe->phb->type != PNV_PHB_NPU) if (!npe || npe->phb->type != PNV_PHB_NPU ||
!npe->phb->ioda.tce_inval_reg)
continue; continue;
pnv_npu_tce_invalidate(npe, tbl, index, pnv_pci_ioda2_tce_invalidate_entire(npe->phb,
npages, rm); rm);
} }
} }
} }
......
...@@ -218,15 +218,10 @@ extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type); ...@@ -218,15 +218,10 @@ extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev); extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
/* Nvlink functions */ /* Nvlink functions */
extern void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe);
extern void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe,
struct iommu_table *tbl,
unsigned long index,
unsigned long npages,
bool rm);
extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe); extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe);
extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe); extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe);
extern int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enabled); extern int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enabled);
extern int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask); extern int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask);
extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
#endif /* __POWERNV_PCI_H */ #endif /* __POWERNV_PCI_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment