Commit f87a8864 authored by Alexey Kardashevskiy's avatar Alexey Kardashevskiy Committed by Michael Ellerman

vfio: powerpc/spapr/iommu/powernv/ioda2: Rework IOMMU ownership control

This adds tce_iommu_take_ownership() and tce_iommu_release_ownership
which call in a loop iommu_take_ownership()/iommu_release_ownership()
for every table on the group. As there is just one now, no change in
behaviour is expected.

At the moment the iommu_table struct has a set_bypass() which enables/
disables DMA bypass on IODA2 PHB. This is exposed to POWERPC IOMMU code
which calls this callback when external IOMMU users such as VFIO are
about to get over a PHB.

The set_bypass() callback is not really an iommu_table function but
IOMMU/PE function. This introduces a iommu_table_group_ops struct and
adds take_ownership()/release_ownership() callbacks to it which are
called when an external user takes/releases control over the IOMMU.

This replaces set_bypass() with ownership callbacks as it is not
necessarily just bypass enabling, it can be something else/more
so let's give it more generic name.

The callbacks is implemented for IODA2 only. Other platforms (P5IOC2,
IODA1) will use the old iommu_take_ownership/iommu_release_ownership API.
The following patches will replace iommu_take_ownership/
iommu_release_ownership calls in IODA2 with full IOMMU table release/
create.

As we here and touching bypass control, this removes
pnv_pci_ioda2_setup_bypass_pe() as it does not do much
more compared to pnv_pci_ioda2_set_bypass. This moves tce_bypass_base
initialization to pnv_pci_ioda2_setup_dma_pe.
Signed-off-by: default avatarAlexey Kardashevskiy <aik@ozlabs.ru>
[aw: for the vfio related changes]
Acked-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Reviewed-by: default avatarGavin Shan <gwshan@linux.vnet.ibm.com>
Reviewed-by: default avatarDavid Gibson <david@gibson.dropbear.id.au>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 0eaf4def
...@@ -93,7 +93,6 @@ struct iommu_table { ...@@ -93,7 +93,6 @@ struct iommu_table {
unsigned long it_page_shift;/* table iommu page size */ unsigned long it_page_shift;/* table iommu page size */
struct list_head it_group_list;/* List of iommu_table_group_link */ struct list_head it_group_list;/* List of iommu_table_group_link */
struct iommu_table_ops *it_ops; struct iommu_table_ops *it_ops;
void (*set_bypass)(struct iommu_table *tbl, bool enable);
}; };
/* Pure 2^n version of get_order */ /* Pure 2^n version of get_order */
...@@ -126,6 +125,15 @@ extern struct iommu_table *iommu_init_table(struct iommu_table * tbl, ...@@ -126,6 +125,15 @@ extern struct iommu_table *iommu_init_table(struct iommu_table * tbl,
int nid); int nid);
#define IOMMU_TABLE_GROUP_MAX_TABLES 1 #define IOMMU_TABLE_GROUP_MAX_TABLES 1
struct iommu_table_group;
struct iommu_table_group_ops {
/* Switch ownership from platform code to external user (e.g. VFIO) */
void (*take_ownership)(struct iommu_table_group *table_group);
/* Switch ownership from external user (e.g. VFIO) back to core */
void (*release_ownership)(struct iommu_table_group *table_group);
};
struct iommu_table_group_link { struct iommu_table_group_link {
struct list_head next; struct list_head next;
struct rcu_head rcu; struct rcu_head rcu;
...@@ -135,6 +143,7 @@ struct iommu_table_group_link { ...@@ -135,6 +143,7 @@ struct iommu_table_group_link {
struct iommu_table_group { struct iommu_table_group {
struct iommu_group *group; struct iommu_group *group;
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
struct iommu_table_group_ops *ops;
}; };
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
......
...@@ -1047,14 +1047,6 @@ int iommu_take_ownership(struct iommu_table *tbl) ...@@ -1047,14 +1047,6 @@ int iommu_take_ownership(struct iommu_table *tbl)
memset(tbl->it_map, 0xff, sz); memset(tbl->it_map, 0xff, sz);
/*
* Disable iommu bypass, otherwise the user can DMA to all of
* our physical memory via the bypass window instead of just
* the pages that has been explicitly mapped into the iommu
*/
if (tbl->set_bypass)
tbl->set_bypass(tbl, false);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(iommu_take_ownership); EXPORT_SYMBOL_GPL(iommu_take_ownership);
...@@ -1068,10 +1060,6 @@ void iommu_release_ownership(struct iommu_table *tbl) ...@@ -1068,10 +1060,6 @@ void iommu_release_ownership(struct iommu_table *tbl)
/* Restore bit#0 set by iommu_init_table() */ /* Restore bit#0 set by iommu_init_table() */
if (tbl->it_offset == 0) if (tbl->it_offset == 0)
set_bit(0, tbl->it_map); set_bit(0, tbl->it_map);
/* The kernel owns the device now, we can restore the iommu bypass */
if (tbl->set_bypass)
tbl->set_bypass(tbl, true);
} }
EXPORT_SYMBOL_GPL(iommu_release_ownership); EXPORT_SYMBOL_GPL(iommu_release_ownership);
......
...@@ -1919,13 +1919,8 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb, ...@@ -1919,13 +1919,8 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
} }
} }
static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable)
{ {
struct iommu_table_group_link *tgl = list_first_entry_or_null(
&tbl->it_group_list, struct iommu_table_group_link,
next);
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group);
uint16_t window_id = (pe->pe_number << 1 ) + 1; uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc; int64_t rc;
...@@ -1952,19 +1947,31 @@ static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable) ...@@ -1952,19 +1947,31 @@ static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
pe->tce_bypass_enabled = enable; pe->tce_bypass_enabled = enable;
} }
static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb, #ifdef CONFIG_IOMMU_API
struct pnv_ioda_pe *pe) static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
{ {
/* TVE #1 is selected by PCI address bit 59 */ struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
pe->tce_bypass_base = 1ull << 59; table_group);
iommu_take_ownership(table_group->tables[0]);
pnv_pci_ioda2_set_bypass(pe, false);
}
/* Install set_bypass callback for VFIO */ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
pe->table_group.tables[0]->set_bypass = pnv_pci_ioda2_set_bypass; {
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
table_group);
/* Enable bypass by default */ iommu_release_ownership(table_group->tables[0]);
pnv_pci_ioda2_set_bypass(pe->table_group.tables[0], true); pnv_pci_ioda2_set_bypass(pe, true);
} }
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
.take_ownership = pnv_ioda2_take_ownership,
.release_ownership = pnv_ioda2_release_ownership,
};
#endif
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe) struct pnv_ioda_pe *pe)
{ {
...@@ -1979,6 +1986,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, ...@@ -1979,6 +1986,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
if (WARN_ON(pe->tce32_seg >= 0)) if (WARN_ON(pe->tce32_seg >= 0))
return; return;
/* TVE #1 is selected by PCI address bit 59 */
pe->tce_bypass_base = 1ull << 59;
tbl = pnv_pci_table_alloc(phb->hose->node); tbl = pnv_pci_table_alloc(phb->hose->node);
iommu_register_group(&pe->table_group, phb->hose->global_number, iommu_register_group(&pe->table_group, phb->hose->global_number,
pe->pe_number); pe->pe_number);
...@@ -2033,6 +2043,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, ...@@ -2033,6 +2043,9 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
} }
tbl->it_ops = &pnv_ioda2_iommu_ops; tbl->it_ops = &pnv_ioda2_iommu_ops;
iommu_init_table(tbl, phb->hose->node); iommu_init_table(tbl, phb->hose->node);
#ifdef CONFIG_IOMMU_API
pe->table_group.ops = &pnv_pci_ioda2_ops;
#endif
if (pe->flags & PNV_IODA_PE_DEV) { if (pe->flags & PNV_IODA_PE_DEV) {
/* /*
...@@ -2047,7 +2060,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, ...@@ -2047,7 +2060,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
/* Also create a bypass window */ /* Also create a bypass window */
if (!pnv_iommu_bypass_disabled) if (!pnv_iommu_bypass_disabled)
pnv_pci_ioda2_setup_bypass_pe(phb, pe); pnv_pci_ioda2_set_bypass(pe, true);
return; return;
fail: fail:
......
...@@ -486,6 +486,61 @@ static long tce_iommu_ioctl(void *iommu_data, ...@@ -486,6 +486,61 @@ static long tce_iommu_ioctl(void *iommu_data,
return -ENOTTY; return -ENOTTY;
} }
static void tce_iommu_release_ownership(struct tce_container *container,
struct iommu_table_group *table_group)
{
int i;
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbl = table_group->tables[i];
if (!tbl)
continue;
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
if (tbl->it_map)
iommu_release_ownership(tbl);
}
}
static int tce_iommu_take_ownership(struct tce_container *container,
struct iommu_table_group *table_group)
{
int i, j, rc = 0;
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
struct iommu_table *tbl = table_group->tables[i];
if (!tbl || !tbl->it_map)
continue;
rc = iommu_take_ownership(tbl);
if (rc) {
for (j = 0; j < i; ++j)
iommu_release_ownership(
table_group->tables[j]);
return rc;
}
}
return 0;
}
static void tce_iommu_release_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
table_group->ops->release_ownership(table_group);
}
static long tce_iommu_take_ownership_ddw(struct tce_container *container,
struct iommu_table_group *table_group)
{
table_group->ops->take_ownership(table_group);
return 0;
}
static int tce_iommu_attach_group(void *iommu_data, static int tce_iommu_attach_group(void *iommu_data,
struct iommu_group *iommu_group) struct iommu_group *iommu_group)
{ {
...@@ -518,7 +573,12 @@ static int tce_iommu_attach_group(void *iommu_data, ...@@ -518,7 +573,12 @@ static int tce_iommu_attach_group(void *iommu_data,
goto unlock_exit; goto unlock_exit;
} }
ret = iommu_take_ownership(table_group->tables[0]); if (!table_group->ops || !table_group->ops->take_ownership ||
!table_group->ops->release_ownership)
ret = tce_iommu_take_ownership(container, table_group);
else
ret = tce_iommu_take_ownership_ddw(container, table_group);
if (!ret) if (!ret)
container->grp = iommu_group; container->grp = iommu_group;
...@@ -533,7 +593,6 @@ static void tce_iommu_detach_group(void *iommu_data, ...@@ -533,7 +593,6 @@ static void tce_iommu_detach_group(void *iommu_data,
{ {
struct tce_container *container = iommu_data; struct tce_container *container = iommu_data;
struct iommu_table_group *table_group; struct iommu_table_group *table_group;
struct iommu_table *tbl;
mutex_lock(&container->lock); mutex_lock(&container->lock);
if (iommu_group != container->grp) { if (iommu_group != container->grp) {
...@@ -556,9 +615,10 @@ static void tce_iommu_detach_group(void *iommu_data, ...@@ -556,9 +615,10 @@ static void tce_iommu_detach_group(void *iommu_data,
table_group = iommu_group_get_iommudata(iommu_group); table_group = iommu_group_get_iommudata(iommu_group);
BUG_ON(!table_group); BUG_ON(!table_group);
tbl = table_group->tables[0]; if (!table_group->ops || !table_group->ops->release_ownership)
tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); tce_iommu_release_ownership(container, table_group);
iommu_release_ownership(tbl); else
tce_iommu_release_ownership_ddw(container, table_group);
unlock_exit: unlock_exit:
mutex_unlock(&container->lock); mutex_unlock(&container->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment