Commit 507e4c9d authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/io-pgtable: Add helper functions for TLB ops

Add some simple wrappers to avoid having the guts of the TLB operations
spilled all over the page table implementations, and to provide a point
to implement extra common functionality.
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent e5fc9753
...@@ -417,9 +417,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -417,9 +417,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot)
{ {
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg; struct io_pgtable *iop = &data->iop;
const struct iommu_gather_ops *tlb = cfg->tlb;
void *cookie = data->iop.cookie;
int ret; int ret;
/* If no access, then nothing to do */ /* If no access, then nothing to do */
...@@ -431,10 +429,10 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -431,10 +429,10 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
* Synchronise all PTE updates for the new mapping before there's * Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova. * a chance for anything to kick off a table walk for the new iova.
*/ */
if (cfg->quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) { if (iop->cfg.quirks & IO_PGTABLE_QUIRK_TLBI_ON_MAP) {
tlb->tlb_add_flush(iova, size, ARM_V7S_BLOCK_SIZE(2), false, io_pgtable_tlb_add_flush(iop, iova, size,
cookie); ARM_V7S_BLOCK_SIZE(2), false);
tlb->tlb_sync(cookie); io_pgtable_tlb_sync(iop);
} else { } else {
wmb(); wmb();
} }
...@@ -462,8 +460,7 @@ static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, ...@@ -462,8 +460,7 @@ static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
unsigned long iova, int idx, int lvl, unsigned long iova, int idx, int lvl,
arm_v7s_iopte *ptep) arm_v7s_iopte *ptep)
{ {
struct io_pgtable_cfg *cfg = &data->iop.cfg; struct io_pgtable *iop = &data->iop;
void *cookie = data->iop.cookie;
arm_v7s_iopte pte; arm_v7s_iopte pte;
size_t size = ARM_V7S_BLOCK_SIZE(lvl); size_t size = ARM_V7S_BLOCK_SIZE(lvl);
int i; int i;
...@@ -475,11 +472,11 @@ static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, ...@@ -475,11 +472,11 @@ static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data,
pte += size; pte += size;
} }
__arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, cfg); __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg);
size *= ARM_V7S_CONT_PAGES; size *= ARM_V7S_CONT_PAGES;
cfg->tlb->tlb_add_flush(iova, size, size, true, cookie); io_pgtable_tlb_add_flush(iop, iova, size, size, true);
cfg->tlb->tlb_sync(cookie); io_pgtable_tlb_sync(iop);
} }
static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
...@@ -489,7 +486,6 @@ static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, ...@@ -489,7 +486,6 @@ static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
unsigned long blk_start, blk_end, blk_size; unsigned long blk_start, blk_end, blk_size;
phys_addr_t blk_paddr; phys_addr_t blk_paddr;
arm_v7s_iopte table = 0; arm_v7s_iopte table = 0;
struct io_pgtable_cfg *cfg = &data->iop.cfg;
int prot = arm_v7s_pte_to_prot(*ptep, 1); int prot = arm_v7s_pte_to_prot(*ptep, 1);
blk_size = ARM_V7S_BLOCK_SIZE(1); blk_size = ARM_V7S_BLOCK_SIZE(1);
...@@ -517,9 +513,9 @@ static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, ...@@ -517,9 +513,9 @@ static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
} }
} }
__arm_v7s_set_pte(ptep, table, 1, cfg); __arm_v7s_set_pte(ptep, table, 1, &data->iop.cfg);
iova &= ~(blk_size - 1); iova &= ~(blk_size - 1);
cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie); io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
return size; return size;
} }
...@@ -528,9 +524,7 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, ...@@ -528,9 +524,7 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
arm_v7s_iopte *ptep) arm_v7s_iopte *ptep)
{ {
arm_v7s_iopte pte[ARM_V7S_CONT_PAGES]; arm_v7s_iopte pte[ARM_V7S_CONT_PAGES];
struct io_pgtable_cfg *cfg = &data->iop.cfg; struct io_pgtable *iop = &data->iop;
const struct iommu_gather_ops *tlb = cfg->tlb;
void *cookie = data->iop.cookie;
int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl); int idx, i = 0, num_entries = size >> ARM_V7S_LVL_SHIFT(lvl);
/* Something went horribly wrong and we ran out of page table */ /* Something went horribly wrong and we ran out of page table */
...@@ -556,20 +550,19 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, ...@@ -556,20 +550,19 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
if (num_entries) { if (num_entries) {
size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl); size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl);
__arm_v7s_set_pte(ptep, 0, num_entries, cfg); __arm_v7s_set_pte(ptep, 0, num_entries, &iop->cfg);
for (i = 0; i < num_entries; i++) { for (i = 0; i < num_entries; i++) {
if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) { if (ARM_V7S_PTE_IS_TABLE(pte[i], lvl)) {
/* Also flush any partial walks */ /* Also flush any partial walks */
tlb->tlb_add_flush(iova, blk_size, io_pgtable_tlb_add_flush(iop, iova, blk_size,
ARM_V7S_BLOCK_SIZE(lvl + 1), ARM_V7S_BLOCK_SIZE(lvl + 1), false);
false, cookie); io_pgtable_tlb_sync(iop);
tlb->tlb_sync(cookie);
ptep = iopte_deref(pte[i], lvl); ptep = iopte_deref(pte[i], lvl);
__arm_v7s_free_table(ptep, lvl + 1, data); __arm_v7s_free_table(ptep, lvl + 1, data);
} else { } else {
tlb->tlb_add_flush(iova, blk_size, blk_size, io_pgtable_tlb_add_flush(iop, iova, blk_size,
true, cookie); blk_size, true);
} }
iova += blk_size; iova += blk_size;
} }
...@@ -590,13 +583,12 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, ...@@ -590,13 +583,12 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size) size_t size)
{ {
size_t unmapped;
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop; size_t unmapped;
unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd);
if (unmapped) if (unmapped)
iop->cfg.tlb->tlb_sync(iop->cookie); io_pgtable_tlb_sync(&data->iop);
return unmapped; return unmapped;
} }
......
...@@ -446,7 +446,6 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ...@@ -446,7 +446,6 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
unsigned long blk_start, blk_end; unsigned long blk_start, blk_end;
phys_addr_t blk_paddr; phys_addr_t blk_paddr;
arm_lpae_iopte table = 0; arm_lpae_iopte table = 0;
struct io_pgtable_cfg *cfg = &data->iop.cfg;
blk_start = iova & ~(blk_size - 1); blk_start = iova & ~(blk_size - 1);
blk_end = blk_start + blk_size; blk_end = blk_start + blk_size;
...@@ -472,9 +471,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ...@@ -472,9 +471,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
} }
} }
__arm_lpae_set_pte(ptep, table, cfg); __arm_lpae_set_pte(ptep, table, &data->iop.cfg);
iova &= ~(blk_size - 1); iova &= ~(blk_size - 1);
cfg->tlb->tlb_add_flush(iova, blk_size, blk_size, true, data->iop.cookie); io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
return size; return size;
} }
...@@ -483,8 +482,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ...@@ -483,8 +482,7 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
arm_lpae_iopte *ptep) arm_lpae_iopte *ptep)
{ {
arm_lpae_iopte pte; arm_lpae_iopte pte;
const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; struct io_pgtable *iop = &data->iop;
void *cookie = data->iop.cookie;
size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
/* Something went horribly wrong and we ran out of page table */ /* Something went horribly wrong and we ran out of page table */
...@@ -498,17 +496,17 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ...@@ -498,17 +496,17 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* If the size matches this level, we're in the right place */ /* If the size matches this level, we're in the right place */
if (size == blk_size) { if (size == blk_size) {
__arm_lpae_set_pte(ptep, 0, &data->iop.cfg); __arm_lpae_set_pte(ptep, 0, &iop->cfg);
if (!iopte_leaf(pte, lvl)) { if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */ /* Also flush any partial walks */
tlb->tlb_add_flush(iova, size, ARM_LPAE_GRANULE(data), io_pgtable_tlb_add_flush(iop, iova, size,
false, cookie); ARM_LPAE_GRANULE(data), false);
tlb->tlb_sync(cookie); io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte, data); ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep); __arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else { } else {
tlb->tlb_add_flush(iova, size, size, true, cookie); io_pgtable_tlb_add_flush(iop, iova, size, size, true);
} }
return size; return size;
...@@ -532,13 +530,12 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -532,13 +530,12 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
{ {
size_t unmapped; size_t unmapped;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
arm_lpae_iopte *ptep = data->pgd; arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data); int lvl = ARM_LPAE_START_LVL(data);
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped) if (unmapped)
iop->cfg.tlb->tlb_sync(iop->cookie); io_pgtable_tlb_sync(&data->iop);
return unmapped; return unmapped;
} }
......
...@@ -75,6 +75,6 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops) ...@@ -75,6 +75,6 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
return; return;
iop = container_of(ops, struct io_pgtable, ops); iop = container_of(ops, struct io_pgtable, ops);
iop->cfg.tlb->tlb_flush_all(iop->cookie); io_pgtable_tlb_flush_all(iop);
io_pgtable_init_table[iop->fmt]->free(iop); io_pgtable_init_table[iop->fmt]->free(iop);
} }
...@@ -144,6 +144,22 @@ struct io_pgtable { ...@@ -144,6 +144,22 @@ struct io_pgtable {
#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops)
static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop)
{
iop->cfg.tlb->tlb_flush_all(iop->cookie);
}
static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop,
unsigned long iova, size_t size, size_t granule, bool leaf)
{
iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie);
}
static inline void io_pgtable_tlb_sync(struct io_pgtable *iop)
{
iop->cfg.tlb->tlb_sync(iop->cookie);
}
/** /**
* struct io_pgtable_init_fns - Alloc/free a set of page tables for a * struct io_pgtable_init_fns - Alloc/free a set of page tables for a
* particular format. * particular format.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment