Commit 79319a05 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "Not much this time, but the changes include:

   - moving domain allocation into the iommu drivers to prepare for the
     introduction of default domains for devices

   - fixing the IO page-table code in the AMD IOMMU driver to correctly
     encode large page sizes

   - extension of the PCI support in the ARM-SMMU driver

   - various fixes and cleanups"

* tag 'iommu-updates-v4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (34 commits)
  iommu/amd: Correctly encode huge pages in iommu page tables
  iommu/amd: Optimize amd_iommu_iova_to_phys for new fetch_pte interface
  iommu/amd: Optimize alloc_new_range for new fetch_pte interface
  iommu/amd: Optimize iommu_unmap_page for new fetch_pte interface
  iommu/amd: Return the pte page-size in fetch_pte
  iommu/amd: Add support for contiguous dma allocator
  iommu/amd: Don't allocate with __GFP_ZERO in alloc_coherent
  iommu/amd: Ignore BUS_NOTIFY_UNBOUND_DRIVER event
  iommu/amd: Use BUS_NOTIFY_REMOVED_DEVICE
  iommu/tegra: smmu: Compute PFN mask at runtime
  iommu/tegra: gart: Set aperture at domain initialization time
  iommu/tegra: Setup aperture
  iommu: Remove domain_init and domain_free iommu_ops
  iommu/fsl: Make use of domain_alloc and domain_free
  iommu/rockchip: Make use of domain_alloc and domain_free
  iommu/ipmmu-vmsa: Make use of domain_alloc and domain_free
  iommu/shmobile: Make use of domain_alloc and domain_free
  iommu/msm: Make use of domain_alloc and domain_free
  iommu/tegra-gart: Make use of domain_alloc and domain_free
  iommu/tegra-smmu: Make use of domain_alloc and domain_free
  ...
parents 6496edfc 7f65ef01
This diff is collapsed.
...@@ -282,6 +282,12 @@ ...@@ -282,6 +282,12 @@
#define PTE_PAGE_SIZE(pte) \ #define PTE_PAGE_SIZE(pte) \
(1ULL << (1 + ffz(((pte) | 0xfffULL)))) (1ULL << (1 + ffz(((pte) | 0xfffULL))))
/*
* Takes a page-table level and returns the default page-size for this level
*/
#define PTE_LEVEL_PAGE_SIZE(level) \
(1ULL << (12 + (9 * (level))))
#define IOMMU_PTE_P (1ULL << 0) #define IOMMU_PTE_P (1ULL << 0)
#define IOMMU_PTE_TV (1ULL << 1) #define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59) #define IOMMU_PTE_U (1ULL << 59)
...@@ -400,6 +406,8 @@ struct iommu_domain; ...@@ -400,6 +406,8 @@ struct iommu_domain;
struct protection_domain { struct protection_domain {
struct list_head list; /* for list of all protection domains */ struct list_head list; /* for list of all protection domains */
struct list_head dev_list; /* List of all devices in this domain */ struct list_head dev_list; /* List of all devices in this domain */
struct iommu_domain domain; /* generic domain handle used by
iommu core code */
spinlock_t lock; /* mostly used to lock the page table*/ spinlock_t lock; /* mostly used to lock the page table*/
struct mutex api_lock; /* protect page tables in the iommu-api path */ struct mutex api_lock; /* protect page tables in the iommu-api path */
u16 id; /* the domain id written to the device table */ u16 id; /* the domain id written to the device table */
...@@ -411,10 +419,7 @@ struct protection_domain { ...@@ -411,10 +419,7 @@ struct protection_domain {
bool updated; /* complete domain flush required */ bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */ unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
void *priv; /* private data */ void *priv; /* private data */
struct iommu_domain *iommu_domain; /* Pointer to generic
domain structure */
}; };
/* /*
......
...@@ -417,7 +417,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) ...@@ -417,7 +417,7 @@ static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
dev_state = pasid_state->device_state; dev_state = pasid_state->device_state;
run_inv_ctx_cb = !pasid_state->invalid; run_inv_ctx_cb = !pasid_state->invalid;
if (run_inv_ctx_cb && pasid_state->device_state->inv_ctx_cb) if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid); dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
unbind_pasid(pasid_state); unbind_pasid(pasid_state);
......
This diff is collapsed.
...@@ -200,6 +200,7 @@ struct exynos_iommu_domain { ...@@ -200,6 +200,7 @@ struct exynos_iommu_domain {
short *lv2entcnt; /* free lv2 entry counter for each section */ short *lv2entcnt; /* free lv2 entry counter for each section */
spinlock_t lock; /* lock for this structure */ spinlock_t lock; /* lock for this structure */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
struct iommu_domain domain; /* generic domain data structure */
}; };
struct sysmmu_drvdata { struct sysmmu_drvdata {
...@@ -214,6 +215,11 @@ struct sysmmu_drvdata { ...@@ -214,6 +215,11 @@ struct sysmmu_drvdata {
phys_addr_t pgtable; phys_addr_t pgtable;
}; };
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
{
return container_of(dom, struct exynos_iommu_domain, domain);
}
static bool set_sysmmu_active(struct sysmmu_drvdata *data) static bool set_sysmmu_active(struct sysmmu_drvdata *data)
{ {
/* return true if the System MMU was not active previously /* return true if the System MMU was not active previously
...@@ -696,58 +702,60 @@ static inline void pgtable_flush(void *vastart, void *vaend) ...@@ -696,58 +702,60 @@ static inline void pgtable_flush(void *vastart, void *vaend)
virt_to_phys(vaend)); virt_to_phys(vaend));
} }
static int exynos_iommu_domain_init(struct iommu_domain *domain) static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
{ {
struct exynos_iommu_domain *priv; struct exynos_iommu_domain *exynos_domain;
int i; int i;
priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (type != IOMMU_DOMAIN_UNMANAGED)
if (!priv) return NULL;
return -ENOMEM;
exynos_domain = kzalloc(sizeof(*exynos_domain), GFP_KERNEL);
if (!exynos_domain)
return NULL;
priv->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); exynos_domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
if (!priv->pgtable) if (!exynos_domain->pgtable)
goto err_pgtable; goto err_pgtable;
priv->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); exynos_domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!priv->lv2entcnt) if (!exynos_domain->lv2entcnt)
goto err_counter; goto err_counter;
/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
for (i = 0; i < NUM_LV1ENTRIES; i += 8) { for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
priv->pgtable[i + 0] = ZERO_LV2LINK; exynos_domain->pgtable[i + 0] = ZERO_LV2LINK;
priv->pgtable[i + 1] = ZERO_LV2LINK; exynos_domain->pgtable[i + 1] = ZERO_LV2LINK;
priv->pgtable[i + 2] = ZERO_LV2LINK; exynos_domain->pgtable[i + 2] = ZERO_LV2LINK;
priv->pgtable[i + 3] = ZERO_LV2LINK; exynos_domain->pgtable[i + 3] = ZERO_LV2LINK;
priv->pgtable[i + 4] = ZERO_LV2LINK; exynos_domain->pgtable[i + 4] = ZERO_LV2LINK;
priv->pgtable[i + 5] = ZERO_LV2LINK; exynos_domain->pgtable[i + 5] = ZERO_LV2LINK;
priv->pgtable[i + 6] = ZERO_LV2LINK; exynos_domain->pgtable[i + 6] = ZERO_LV2LINK;
priv->pgtable[i + 7] = ZERO_LV2LINK; exynos_domain->pgtable[i + 7] = ZERO_LV2LINK;
} }
pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES); pgtable_flush(exynos_domain->pgtable, exynos_domain->pgtable + NUM_LV1ENTRIES);
spin_lock_init(&priv->lock); spin_lock_init(&exynos_domain->lock);
spin_lock_init(&priv->pgtablelock); spin_lock_init(&exynos_domain->pgtablelock);
INIT_LIST_HEAD(&priv->clients); INIT_LIST_HEAD(&exynos_domain->clients);
domain->geometry.aperture_start = 0; exynos_domain->domain.geometry.aperture_start = 0;
domain->geometry.aperture_end = ~0UL; exynos_domain->domain.geometry.aperture_end = ~0UL;
domain->geometry.force_aperture = true; exynos_domain->domain.geometry.force_aperture = true;
domain->priv = priv; return &exynos_domain->domain;
return 0;
err_counter: err_counter:
free_pages((unsigned long)priv->pgtable, 2); free_pages((unsigned long)exynos_domain->pgtable, 2);
err_pgtable: err_pgtable:
kfree(priv); kfree(exynos_domain);
return -ENOMEM; return NULL;
} }
static void exynos_iommu_domain_destroy(struct iommu_domain *domain) static void exynos_iommu_domain_free(struct iommu_domain *domain)
{ {
struct exynos_iommu_domain *priv = domain->priv; struct exynos_iommu_domain *priv = to_exynos_domain(domain);
struct exynos_iommu_owner *owner; struct exynos_iommu_owner *owner;
unsigned long flags; unsigned long flags;
int i; int i;
...@@ -773,15 +781,14 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain) ...@@ -773,15 +781,14 @@ static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
free_pages((unsigned long)priv->pgtable, 2); free_pages((unsigned long)priv->pgtable, 2);
free_pages((unsigned long)priv->lv2entcnt, 1); free_pages((unsigned long)priv->lv2entcnt, 1);
kfree(domain->priv); kfree(priv);
domain->priv = NULL;
} }
static int exynos_iommu_attach_device(struct iommu_domain *domain, static int exynos_iommu_attach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct exynos_iommu_owner *owner = dev->archdata.iommu; struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct exynos_iommu_domain *priv = domain->priv; struct exynos_iommu_domain *priv = to_exynos_domain(domain);
phys_addr_t pagetable = virt_to_phys(priv->pgtable); phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -812,7 +819,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain, ...@@ -812,7 +819,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct exynos_iommu_owner *owner; struct exynos_iommu_owner *owner;
struct exynos_iommu_domain *priv = domain->priv; struct exynos_iommu_domain *priv = to_exynos_domain(domain);
phys_addr_t pagetable = virt_to_phys(priv->pgtable); phys_addr_t pagetable = virt_to_phys(priv->pgtable);
unsigned long flags; unsigned long flags;
...@@ -988,7 +995,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, ...@@ -988,7 +995,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova, static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot)
{ {
struct exynos_iommu_domain *priv = domain->priv; struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_pte_t *entry; sysmmu_pte_t *entry;
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
unsigned long flags; unsigned long flags;
...@@ -1042,7 +1049,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv, ...@@ -1042,7 +1049,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *priv,
static size_t exynos_iommu_unmap(struct iommu_domain *domain, static size_t exynos_iommu_unmap(struct iommu_domain *domain,
unsigned long l_iova, size_t size) unsigned long l_iova, size_t size)
{ {
struct exynos_iommu_domain *priv = domain->priv; struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
sysmmu_pte_t *ent; sysmmu_pte_t *ent;
size_t err_pgsize; size_t err_pgsize;
...@@ -1119,7 +1126,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, ...@@ -1119,7 +1126,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
struct exynos_iommu_domain *priv = domain->priv; struct exynos_iommu_domain *priv = to_exynos_domain(domain);
sysmmu_pte_t *entry; sysmmu_pte_t *entry;
unsigned long flags; unsigned long flags;
phys_addr_t phys = 0; phys_addr_t phys = 0;
...@@ -1171,8 +1178,8 @@ static void exynos_iommu_remove_device(struct device *dev) ...@@ -1171,8 +1178,8 @@ static void exynos_iommu_remove_device(struct device *dev)
} }
static const struct iommu_ops exynos_iommu_ops = { static const struct iommu_ops exynos_iommu_ops = {
.domain_init = exynos_iommu_domain_init, .domain_alloc = exynos_iommu_domain_alloc,
.domain_destroy = exynos_iommu_domain_destroy, .domain_free = exynos_iommu_domain_free,
.attach_dev = exynos_iommu_attach_device, .attach_dev = exynos_iommu_attach_device,
.detach_dev = exynos_iommu_detach_device, .detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map, .map = exynos_iommu_map,
......
...@@ -33,6 +33,11 @@ static struct kmem_cache *fsl_pamu_domain_cache; ...@@ -33,6 +33,11 @@ static struct kmem_cache *fsl_pamu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache; static struct kmem_cache *iommu_devinfo_cache;
static DEFINE_SPINLOCK(device_domain_lock); static DEFINE_SPINLOCK(device_domain_lock);
static struct fsl_dma_domain *to_fsl_dma_domain(struct iommu_domain *dom)
{
return container_of(dom, struct fsl_dma_domain, iommu_domain);
}
static int __init iommu_init_mempool(void) static int __init iommu_init_mempool(void)
{ {
fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain", fsl_pamu_domain_cache = kmem_cache_create("fsl_pamu_domain",
...@@ -65,7 +70,7 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i ...@@ -65,7 +70,7 @@ static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t i
struct dma_window *win_ptr = &dma_domain->win_arr[0]; struct dma_window *win_ptr = &dma_domain->win_arr[0];
struct iommu_domain_geometry *geom; struct iommu_domain_geometry *geom;
geom = &dma_domain->iommu_domain->geometry; geom = &dma_domain->iommu_domain.geometry;
if (!win_cnt || !dma_domain->geom_size) { if (!win_cnt || !dma_domain->geom_size) {
pr_debug("Number of windows/geometry not configured for the domain\n"); pr_debug("Number of windows/geometry not configured for the domain\n");
...@@ -123,7 +128,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain) ...@@ -123,7 +128,7 @@ static int map_win(int liodn, struct fsl_dma_domain *dma_domain)
{ {
int ret; int ret;
struct dma_window *wnd = &dma_domain->win_arr[0]; struct dma_window *wnd = &dma_domain->win_arr[0];
phys_addr_t wnd_addr = dma_domain->iommu_domain->geometry.aperture_start; phys_addr_t wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&iommu_lock, flags); spin_lock_irqsave(&iommu_lock, flags);
...@@ -172,7 +177,7 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr ...@@ -172,7 +177,7 @@ static int update_liodn(int liodn, struct fsl_dma_domain *dma_domain, u32 wnd_nr
} else { } else {
phys_addr_t wnd_addr; phys_addr_t wnd_addr;
wnd_addr = dma_domain->iommu_domain->geometry.aperture_start; wnd_addr = dma_domain->iommu_domain.geometry.aperture_start;
ret = pamu_config_ppaace(liodn, wnd_addr, ret = pamu_config_ppaace(liodn, wnd_addr,
wnd->size, wnd->size,
...@@ -384,7 +389,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d ...@@ -384,7 +389,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
if (iova < domain->geometry.aperture_start || if (iova < domain->geometry.aperture_start ||
iova > domain->geometry.aperture_end) iova > domain->geometry.aperture_end)
...@@ -398,11 +403,9 @@ static bool fsl_pamu_capable(enum iommu_cap cap) ...@@ -398,11 +403,9 @@ static bool fsl_pamu_capable(enum iommu_cap cap)
return cap == IOMMU_CAP_CACHE_COHERENCY; return cap == IOMMU_CAP_CACHE_COHERENCY;
} }
static void fsl_pamu_domain_destroy(struct iommu_domain *domain) static void fsl_pamu_domain_free(struct iommu_domain *domain)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
domain->priv = NULL;
/* remove all the devices from the device list */ /* remove all the devices from the device list */
detach_device(NULL, dma_domain); detach_device(NULL, dma_domain);
...@@ -413,23 +416,24 @@ static void fsl_pamu_domain_destroy(struct iommu_domain *domain) ...@@ -413,23 +416,24 @@ static void fsl_pamu_domain_destroy(struct iommu_domain *domain)
kmem_cache_free(fsl_pamu_domain_cache, dma_domain); kmem_cache_free(fsl_pamu_domain_cache, dma_domain);
} }
static int fsl_pamu_domain_init(struct iommu_domain *domain) static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
{ {
struct fsl_dma_domain *dma_domain; struct fsl_dma_domain *dma_domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
dma_domain = iommu_alloc_dma_domain(); dma_domain = iommu_alloc_dma_domain();
if (!dma_domain) { if (!dma_domain) {
pr_debug("dma_domain allocation failed\n"); pr_debug("dma_domain allocation failed\n");
return -ENOMEM; return NULL;
} }
domain->priv = dma_domain;
dma_domain->iommu_domain = domain;
/* defaul geometry 64 GB i.e. maximum system address */ /* defaul geometry 64 GB i.e. maximum system address */
domain->geometry.aperture_start = 0; dma_domain->iommu_domain. geometry.aperture_start = 0;
domain->geometry.aperture_end = (1ULL << 36) - 1; dma_domain->iommu_domain.geometry.aperture_end = (1ULL << 36) - 1;
domain->geometry.force_aperture = true; dma_domain->iommu_domain.geometry.force_aperture = true;
return 0; return &dma_domain->iommu_domain;
} }
/* Configure geometry settings for all LIODNs associated with domain */ /* Configure geometry settings for all LIODNs associated with domain */
...@@ -499,7 +503,7 @@ static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr) ...@@ -499,7 +503,7 @@ static int disable_domain_win(struct fsl_dma_domain *dma_domain, u32 wnd_nr)
static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -530,7 +534,7 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) ...@@ -530,7 +534,7 @@ static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr)
static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot) phys_addr_t paddr, u64 size, int prot)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
struct dma_window *wnd; struct dma_window *wnd;
int pamu_prot = 0; int pamu_prot = 0;
int ret; int ret;
...@@ -607,7 +611,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, ...@@ -607,7 +611,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
int num) int num)
{ {
unsigned long flags; unsigned long flags;
struct iommu_domain *domain = dma_domain->iommu_domain; struct iommu_domain *domain = &dma_domain->iommu_domain;
int ret = 0; int ret = 0;
int i; int i;
...@@ -653,7 +657,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain, ...@@ -653,7 +657,7 @@ static int handle_attach_device(struct fsl_dma_domain *dma_domain,
static int fsl_pamu_attach_device(struct iommu_domain *domain, static int fsl_pamu_attach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
const u32 *liodn; const u32 *liodn;
u32 liodn_cnt; u32 liodn_cnt;
int len, ret = 0; int len, ret = 0;
...@@ -691,7 +695,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain, ...@@ -691,7 +695,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
static void fsl_pamu_detach_device(struct iommu_domain *domain, static void fsl_pamu_detach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
const u32 *prop; const u32 *prop;
int len; int len;
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
...@@ -723,7 +727,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain, ...@@ -723,7 +727,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
static int configure_domain_geometry(struct iommu_domain *domain, void *data) static int configure_domain_geometry(struct iommu_domain *domain, void *data)
{ {
struct iommu_domain_geometry *geom_attr = data; struct iommu_domain_geometry *geom_attr = data;
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
dma_addr_t geom_size; dma_addr_t geom_size;
unsigned long flags; unsigned long flags;
...@@ -813,7 +817,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en ...@@ -813,7 +817,7 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data) enum iommu_attr attr_type, void *data)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
int ret = 0; int ret = 0;
switch (attr_type) { switch (attr_type) {
...@@ -838,7 +842,7 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, ...@@ -838,7 +842,7 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data) enum iommu_attr attr_type, void *data)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
int ret = 0; int ret = 0;
switch (attr_type) { switch (attr_type) {
...@@ -999,7 +1003,7 @@ static void fsl_pamu_remove_device(struct device *dev) ...@@ -999,7 +1003,7 @@ static void fsl_pamu_remove_device(struct device *dev)
static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -1048,15 +1052,15 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) ...@@ -1048,15 +1052,15 @@ static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
static u32 fsl_pamu_get_windows(struct iommu_domain *domain) static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
{ {
struct fsl_dma_domain *dma_domain = domain->priv; struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
return dma_domain->win_cnt; return dma_domain->win_cnt;
} }
static const struct iommu_ops fsl_pamu_ops = { static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable, .capable = fsl_pamu_capable,
.domain_init = fsl_pamu_domain_init, .domain_alloc = fsl_pamu_domain_alloc,
.domain_destroy = fsl_pamu_domain_destroy, .domain_free = fsl_pamu_domain_free,
.attach_dev = fsl_pamu_attach_device, .attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device, .detach_dev = fsl_pamu_detach_device,
.domain_window_enable = fsl_pamu_window_enable, .domain_window_enable = fsl_pamu_window_enable,
......
...@@ -71,7 +71,7 @@ struct fsl_dma_domain { ...@@ -71,7 +71,7 @@ struct fsl_dma_domain {
u32 stash_id; u32 stash_id;
struct pamu_stash_attribute dma_stash; struct pamu_stash_attribute dma_stash;
u32 snoop_id; u32 snoop_id;
struct iommu_domain *iommu_domain; struct iommu_domain iommu_domain;
spinlock_t domain_lock; spinlock_t domain_lock;
}; };
......
...@@ -339,7 +339,7 @@ struct dmar_domain { ...@@ -339,7 +339,7 @@ struct dmar_domain {
DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED); DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
/* bitmap of iommus this domain uses*/ /* bitmap of iommus this domain uses*/
struct list_head devices; /* all devices' list */ struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */ struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */ struct dma_pte *pgd; /* virtual address */
...@@ -358,6 +358,9 @@ struct dmar_domain { ...@@ -358,6 +358,9 @@ struct dmar_domain {
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
spinlock_t iommu_lock; /* protect iommu set in domain */ spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */ u64 max_addr; /* maximum mapped address */
struct iommu_domain domain; /* generic domain data structure for
iommu core */
}; };
/* PCI domain-device relationship */ /* PCI domain-device relationship */
...@@ -449,6 +452,12 @@ static LIST_HEAD(device_domain_list); ...@@ -449,6 +452,12 @@ static LIST_HEAD(device_domain_list);
static const struct iommu_ops intel_iommu_ops; static const struct iommu_ops intel_iommu_ops;
/* Convert generic 'struct iommu_domain to private struct dmar_domain */
static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
{
return container_of(dom, struct dmar_domain, domain);
}
static int __init intel_iommu_setup(char *str) static int __init intel_iommu_setup(char *str)
{ {
if (!str) if (!str)
...@@ -595,12 +604,13 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) ...@@ -595,12 +604,13 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu; struct intel_iommu *iommu;
int i, found = 0; bool found = false;
int i;
domain->iommu_coherency = 1; domain->iommu_coherency = 1;
for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
found = 1; found = true;
if (!ecap_coherent(g_iommus[i]->ecap)) { if (!ecap_coherent(g_iommus[i]->ecap)) {
domain->iommu_coherency = 0; domain->iommu_coherency = 0;
break; break;
...@@ -1267,7 +1277,7 @@ static struct device_domain_info * ...@@ -1267,7 +1277,7 @@ static struct device_domain_info *
iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn) u8 bus, u8 devfn)
{ {
int found = 0; bool found = false;
unsigned long flags; unsigned long flags;
struct device_domain_info *info; struct device_domain_info *info;
struct pci_dev *pdev; struct pci_dev *pdev;
...@@ -1282,7 +1292,7 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, ...@@ -1282,7 +1292,7 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
list_for_each_entry(info, &domain->devices, link) list_for_each_entry(info, &domain->devices, link)
if (info->iommu == iommu && info->bus == bus && if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) { info->devfn == devfn) {
found = 1; found = true;
break; break;
} }
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
...@@ -4269,7 +4279,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, ...@@ -4269,7 +4279,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct device_domain_info *info, *tmp; struct device_domain_info *info, *tmp;
struct intel_iommu *iommu; struct intel_iommu *iommu;
unsigned long flags; unsigned long flags;
int found = 0; bool found = false;
u8 bus, devfn; u8 bus, devfn;
iommu = device_to_iommu(dev, &bus, &devfn); iommu = device_to_iommu(dev, &bus, &devfn);
...@@ -4301,7 +4311,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, ...@@ -4301,7 +4311,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
* update iommu count and coherency * update iommu count and coherency
*/ */
if (info->iommu == iommu) if (info->iommu == iommu)
found = 1; found = true;
} }
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
...@@ -4339,44 +4349,45 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) ...@@ -4339,44 +4349,45 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
return 0; return 0;
} }
static int intel_iommu_domain_init(struct iommu_domain *domain) static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{ {
struct dmar_domain *dmar_domain; struct dmar_domain *dmar_domain;
struct iommu_domain *domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE); dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
if (!dmar_domain) { if (!dmar_domain) {
printk(KERN_ERR printk(KERN_ERR
"intel_iommu_domain_init: dmar_domain == NULL\n"); "intel_iommu_domain_init: dmar_domain == NULL\n");
return -ENOMEM; return NULL;
} }
if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR printk(KERN_ERR
"intel_iommu_domain_init() failed\n"); "intel_iommu_domain_init() failed\n");
domain_exit(dmar_domain); domain_exit(dmar_domain);
return -ENOMEM; return NULL;
} }
domain_update_iommu_cap(dmar_domain); domain_update_iommu_cap(dmar_domain);
domain->priv = dmar_domain;
domain = &dmar_domain->domain;
domain->geometry.aperture_start = 0; domain->geometry.aperture_start = 0;
domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw); domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
domain->geometry.force_aperture = true; domain->geometry.force_aperture = true;
return 0; return domain;
} }
static void intel_iommu_domain_destroy(struct iommu_domain *domain) static void intel_iommu_domain_free(struct iommu_domain *domain)
{ {
struct dmar_domain *dmar_domain = domain->priv; domain_exit(to_dmar_domain(domain));
domain->priv = NULL;
domain_exit(dmar_domain);
} }
static int intel_iommu_attach_device(struct iommu_domain *domain, static int intel_iommu_attach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct dmar_domain *dmar_domain = domain->priv; struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu; struct intel_iommu *iommu;
int addr_width; int addr_width;
u8 bus, devfn; u8 bus, devfn;
...@@ -4441,16 +4452,14 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, ...@@ -4441,16 +4452,14 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
static void intel_iommu_detach_device(struct iommu_domain *domain, static void intel_iommu_detach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct dmar_domain *dmar_domain = domain->priv; domain_remove_one_dev_info(to_dmar_domain(domain), dev);
domain_remove_one_dev_info(dmar_domain, dev);
} }
static int intel_iommu_map(struct iommu_domain *domain, static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa, unsigned long iova, phys_addr_t hpa,
size_t size, int iommu_prot) size_t size, int iommu_prot)
{ {
struct dmar_domain *dmar_domain = domain->priv; struct dmar_domain *dmar_domain = to_dmar_domain(domain);
u64 max_addr; u64 max_addr;
int prot = 0; int prot = 0;
int ret; int ret;
...@@ -4487,7 +4496,7 @@ static int intel_iommu_map(struct iommu_domain *domain, ...@@ -4487,7 +4496,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
static size_t intel_iommu_unmap(struct iommu_domain *domain, static size_t intel_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size) unsigned long iova, size_t size)
{ {
struct dmar_domain *dmar_domain = domain->priv; struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct page *freelist = NULL; struct page *freelist = NULL;
struct intel_iommu *iommu; struct intel_iommu *iommu;
unsigned long start_pfn, last_pfn; unsigned long start_pfn, last_pfn;
...@@ -4535,7 +4544,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, ...@@ -4535,7 +4544,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
struct dmar_domain *dmar_domain = domain->priv; struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct dma_pte *pte; struct dma_pte *pte;
int level = 0; int level = 0;
u64 phys = 0; u64 phys = 0;
...@@ -4594,8 +4603,8 @@ static void intel_iommu_remove_device(struct device *dev) ...@@ -4594,8 +4603,8 @@ static void intel_iommu_remove_device(struct device *dev)
static const struct iommu_ops intel_iommu_ops = { static const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable, .capable = intel_iommu_capable,
.domain_init = intel_iommu_domain_init, .domain_alloc = intel_iommu_domain_alloc,
.domain_destroy = intel_iommu_domain_destroy, .domain_free = intel_iommu_domain_free,
.attach_dev = intel_iommu_attach_device, .attach_dev = intel_iommu_attach_device,
.detach_dev = intel_iommu_detach_device, .detach_dev = intel_iommu_detach_device,
.map = intel_iommu_map, .map = intel_iommu_map,
......
...@@ -631,7 +631,7 @@ static int __init intel_enable_irq_remapping(void) ...@@ -631,7 +631,7 @@ static int __init intel_enable_irq_remapping(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu; struct intel_iommu *iommu;
int setup = 0; bool setup = false;
int eim = 0; int eim = 0;
if (x2apic_supported()) { if (x2apic_supported()) {
...@@ -697,7 +697,7 @@ static int __init intel_enable_irq_remapping(void) ...@@ -697,7 +697,7 @@ static int __init intel_enable_irq_remapping(void)
*/ */
for_each_iommu(iommu, drhd) { for_each_iommu(iommu, drhd) {
iommu_set_irq_remapping(iommu, eim); iommu_set_irq_remapping(iommu, eim);
setup = 1; setup = true;
} }
if (!setup) if (!setup)
...@@ -856,7 +856,7 @@ static int __init parse_ioapics_under_ir(void) ...@@ -856,7 +856,7 @@ static int __init parse_ioapics_under_ir(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu; struct intel_iommu *iommu;
int ir_supported = 0; bool ir_supported = false;
int ioapic_idx; int ioapic_idx;
for_each_iommu(iommu, drhd) for_each_iommu(iommu, drhd)
...@@ -864,7 +864,7 @@ static int __init parse_ioapics_under_ir(void) ...@@ -864,7 +864,7 @@ static int __init parse_ioapics_under_ir(void)
if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
return -1; return -1;
ir_supported = 1; ir_supported = true;
} }
if (!ir_supported) if (!ir_supported)
...@@ -917,7 +917,7 @@ static void disable_irq_remapping(void) ...@@ -917,7 +917,7 @@ static void disable_irq_remapping(void)
static int reenable_irq_remapping(int eim) static int reenable_irq_remapping(int eim)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
int setup = 0; bool setup = false;
struct intel_iommu *iommu = NULL; struct intel_iommu *iommu = NULL;
for_each_iommu(iommu, drhd) for_each_iommu(iommu, drhd)
...@@ -933,7 +933,7 @@ static int reenable_irq_remapping(int eim) ...@@ -933,7 +933,7 @@ static int reenable_irq_remapping(int eim)
/* Set up interrupt remapping for iommu.*/ /* Set up interrupt remapping for iommu.*/
iommu_set_irq_remapping(iommu, eim); iommu_set_irq_remapping(iommu, eim);
setup = 1; setup = true;
} }
if (!setup) if (!setup)
......
...@@ -116,6 +116,8 @@ ...@@ -116,6 +116,8 @@
#define ARM_32_LPAE_TCR_EAE (1 << 31) #define ARM_32_LPAE_TCR_EAE (1 << 31)
#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
#define ARM_LPAE_TCR_EPD1 (1 << 23)
#define ARM_LPAE_TCR_TG0_4K (0 << 14) #define ARM_LPAE_TCR_TG0_4K (0 << 14)
#define ARM_LPAE_TCR_TG0_64K (1 << 14) #define ARM_LPAE_TCR_TG0_64K (1 << 14)
#define ARM_LPAE_TCR_TG0_16K (2 << 14) #define ARM_LPAE_TCR_TG0_16K (2 << 14)
...@@ -621,6 +623,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -621,6 +623,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
} }
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
/* Disable speculative walks through TTBR1 */
reg |= ARM_LPAE_TCR_EPD1;
cfg->arm_lpae_s1_cfg.tcr = reg; cfg->arm_lpae_s1_cfg.tcr = reg;
/* MAIRs */ /* MAIRs */
......
...@@ -901,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler); ...@@ -901,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
{ {
struct iommu_domain *domain; struct iommu_domain *domain;
int ret;
if (bus == NULL || bus->iommu_ops == NULL) if (bus == NULL || bus->iommu_ops == NULL)
return NULL; return NULL;
domain = kzalloc(sizeof(*domain), GFP_KERNEL); domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED);
if (!domain) if (!domain)
return NULL; return NULL;
domain->ops = bus->iommu_ops; domain->ops = bus->iommu_ops;
domain->type = IOMMU_DOMAIN_UNMANAGED;
ret = domain->ops->domain_init(domain);
if (ret)
goto out_free;
return domain; return domain;
out_free:
kfree(domain);
return NULL;
} }
EXPORT_SYMBOL_GPL(iommu_domain_alloc); EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain) void iommu_domain_free(struct iommu_domain *domain)
{ {
if (likely(domain->ops->domain_destroy != NULL)) domain->ops->domain_free(domain);
domain->ops->domain_destroy(domain);
kfree(domain);
} }
EXPORT_SYMBOL_GPL(iommu_domain_free); EXPORT_SYMBOL_GPL(iommu_domain_free);
...@@ -1049,6 +1037,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -1049,6 +1037,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
domain->ops->pgsize_bitmap == 0UL)) domain->ops->pgsize_bitmap == 0UL))
return -ENODEV; return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
/* find out the minimum page size supported */ /* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
...@@ -1100,6 +1091,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) ...@@ -1100,6 +1091,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
domain->ops->pgsize_bitmap == 0UL)) domain->ops->pgsize_bitmap == 0UL))
return -ENODEV; return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
/* find out the minimum page size supported */ /* find out the minimum page size supported */
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
......
...@@ -38,7 +38,7 @@ struct ipmmu_vmsa_device { ...@@ -38,7 +38,7 @@ struct ipmmu_vmsa_device {
struct ipmmu_vmsa_domain { struct ipmmu_vmsa_domain {
struct ipmmu_vmsa_device *mmu; struct ipmmu_vmsa_device *mmu;
struct iommu_domain *io_domain; struct iommu_domain io_domain;
struct io_pgtable_cfg cfg; struct io_pgtable_cfg cfg;
struct io_pgtable_ops *iop; struct io_pgtable_ops *iop;
...@@ -56,6 +56,11 @@ struct ipmmu_vmsa_archdata { ...@@ -56,6 +56,11 @@ struct ipmmu_vmsa_archdata {
static DEFINE_SPINLOCK(ipmmu_devices_lock); static DEFINE_SPINLOCK(ipmmu_devices_lock);
static LIST_HEAD(ipmmu_devices); static LIST_HEAD(ipmmu_devices);
static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
{
return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
}
#define TLB_LOOP_TIMEOUT 100 /* 100us */ #define TLB_LOOP_TIMEOUT 100 /* 100us */
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
...@@ -428,7 +433,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) ...@@ -428,7 +433,7 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
* TODO: We need to look up the faulty device based on the I/O VA. Use * TODO: We need to look up the faulty device based on the I/O VA. Use
* the IOMMU device for now. * the IOMMU device for now.
*/ */
if (!report_iommu_fault(domain->io_domain, mmu->dev, iova, 0)) if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
return IRQ_HANDLED; return IRQ_HANDLED;
dev_err_ratelimited(mmu->dev, dev_err_ratelimited(mmu->dev,
...@@ -448,7 +453,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) ...@@ -448,7 +453,7 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
return IRQ_NONE; return IRQ_NONE;
io_domain = mmu->mapping->domain; io_domain = mmu->mapping->domain;
domain = io_domain->priv; domain = to_vmsa_domain(io_domain);
return ipmmu_domain_irq(domain); return ipmmu_domain_irq(domain);
} }
...@@ -457,25 +462,25 @@ static irqreturn_t ipmmu_irq(int irq, void *dev) ...@@ -457,25 +462,25 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
* IOMMU Operations * IOMMU Operations
*/ */
static int ipmmu_domain_init(struct iommu_domain *io_domain) static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
{ {
struct ipmmu_vmsa_domain *domain; struct ipmmu_vmsa_domain *domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
domain = kzalloc(sizeof(*domain), GFP_KERNEL); domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain) if (!domain)
return -ENOMEM; return NULL;
spin_lock_init(&domain->lock); spin_lock_init(&domain->lock);
io_domain->priv = domain; return &domain->io_domain;
domain->io_domain = io_domain;
return 0;
} }
static void ipmmu_domain_destroy(struct iommu_domain *io_domain) static void ipmmu_domain_free(struct iommu_domain *io_domain)
{ {
struct ipmmu_vmsa_domain *domain = io_domain->priv; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
/* /*
* Free the domain resources. We assume that all devices have already * Free the domain resources. We assume that all devices have already
...@@ -491,7 +496,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, ...@@ -491,7 +496,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
{ {
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_device *mmu = archdata->mmu; struct ipmmu_vmsa_device *mmu = archdata->mmu;
struct ipmmu_vmsa_domain *domain = io_domain->priv; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned long flags; unsigned long flags;
unsigned int i; unsigned int i;
int ret = 0; int ret = 0;
...@@ -532,7 +537,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain, ...@@ -532,7 +537,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
struct device *dev) struct device *dev)
{ {
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_domain *domain = io_domain->priv; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
unsigned int i; unsigned int i;
for (i = 0; i < archdata->num_utlbs; ++i) for (i = 0; i < archdata->num_utlbs; ++i)
...@@ -546,7 +551,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain, ...@@ -546,7 +551,7 @@ static void ipmmu_detach_device(struct iommu_domain *io_domain,
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot)
{ {
struct ipmmu_vmsa_domain *domain = io_domain->priv; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
if (!domain) if (!domain)
return -ENODEV; return -ENODEV;
...@@ -557,7 +562,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, ...@@ -557,7 +562,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
size_t size) size_t size)
{ {
struct ipmmu_vmsa_domain *domain = io_domain->priv; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
return domain->iop->unmap(domain->iop, iova, size); return domain->iop->unmap(domain->iop, iova, size);
} }
...@@ -565,7 +570,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, ...@@ -565,7 +570,7 @@ static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
dma_addr_t iova) dma_addr_t iova)
{ {
struct ipmmu_vmsa_domain *domain = io_domain->priv; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
/* TODO: Is locking needed ? */ /* TODO: Is locking needed ? */
...@@ -737,8 +742,8 @@ static void ipmmu_remove_device(struct device *dev) ...@@ -737,8 +742,8 @@ static void ipmmu_remove_device(struct device *dev)
} }
static const struct iommu_ops ipmmu_ops = { static const struct iommu_ops ipmmu_ops = {
.domain_init = ipmmu_domain_init, .domain_alloc = ipmmu_domain_alloc,
.domain_destroy = ipmmu_domain_destroy, .domain_free = ipmmu_domain_free,
.attach_dev = ipmmu_attach_device, .attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device, .detach_dev = ipmmu_detach_device,
.map = ipmmu_map, .map = ipmmu_map,
......
...@@ -52,8 +52,14 @@ DEFINE_SPINLOCK(msm_iommu_lock); ...@@ -52,8 +52,14 @@ DEFINE_SPINLOCK(msm_iommu_lock);
struct msm_priv { struct msm_priv {
unsigned long *pgtable; unsigned long *pgtable;
struct list_head list_attached; struct list_head list_attached;
struct iommu_domain domain;
}; };
static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
{
return container_of(dom, struct msm_priv, domain);
}
static int __enable_clocks(struct msm_iommu_drvdata *drvdata) static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
{ {
int ret; int ret;
...@@ -79,7 +85,7 @@ static void __disable_clocks(struct msm_iommu_drvdata *drvdata) ...@@ -79,7 +85,7 @@ static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
static int __flush_iotlb(struct iommu_domain *domain) static int __flush_iotlb(struct iommu_domain *domain)
{ {
struct msm_priv *priv = domain->priv; struct msm_priv *priv = to_msm_priv(domain);
struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_drvdata *iommu_drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata;
int ret = 0; int ret = 0;
...@@ -209,10 +215,14 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) ...@@ -209,10 +215,14 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
SET_M(base, ctx, 1); SET_M(base, ctx, 1);
} }
static int msm_iommu_domain_init(struct iommu_domain *domain) static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
{ {
struct msm_priv *priv = kzalloc(sizeof(*priv), GFP_KERNEL); struct msm_priv *priv;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) if (!priv)
goto fail_nomem; goto fail_nomem;
...@@ -224,20 +234,19 @@ static int msm_iommu_domain_init(struct iommu_domain *domain) ...@@ -224,20 +234,19 @@ static int msm_iommu_domain_init(struct iommu_domain *domain)
goto fail_nomem; goto fail_nomem;
memset(priv->pgtable, 0, SZ_16K); memset(priv->pgtable, 0, SZ_16K);
domain->priv = priv;
domain->geometry.aperture_start = 0; priv->domain.geometry.aperture_start = 0;
domain->geometry.aperture_end = (1ULL << 32) - 1; priv->domain.geometry.aperture_end = (1ULL << 32) - 1;
domain->geometry.force_aperture = true; priv->domain.geometry.force_aperture = true;
return 0; return &priv->domain;
fail_nomem: fail_nomem:
kfree(priv); kfree(priv);
return -ENOMEM; return NULL;
} }
static void msm_iommu_domain_destroy(struct iommu_domain *domain) static void msm_iommu_domain_free(struct iommu_domain *domain)
{ {
struct msm_priv *priv; struct msm_priv *priv;
unsigned long flags; unsigned long flags;
...@@ -245,20 +254,17 @@ static void msm_iommu_domain_destroy(struct iommu_domain *domain) ...@@ -245,20 +254,17 @@ static void msm_iommu_domain_destroy(struct iommu_domain *domain)
int i; int i;
spin_lock_irqsave(&msm_iommu_lock, flags); spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv; priv = to_msm_priv(domain);
domain->priv = NULL;
if (priv) { fl_table = priv->pgtable;
fl_table = priv->pgtable;
for (i = 0; i < NUM_FL_PTE; i++) for (i = 0; i < NUM_FL_PTE; i++)
if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
free_page((unsigned long) __va(((fl_table[i]) & free_page((unsigned long) __va(((fl_table[i]) &
FL_BASE_MASK))); FL_BASE_MASK)));
free_pages((unsigned long)priv->pgtable, get_order(SZ_16K)); free_pages((unsigned long)priv->pgtable, get_order(SZ_16K));
priv->pgtable = NULL; priv->pgtable = NULL;
}
kfree(priv); kfree(priv);
spin_unlock_irqrestore(&msm_iommu_lock, flags); spin_unlock_irqrestore(&msm_iommu_lock, flags);
...@@ -276,9 +282,9 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -276,9 +282,9 @@ static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
spin_lock_irqsave(&msm_iommu_lock, flags); spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv; priv = to_msm_priv(domain);
if (!priv || !dev) { if (!dev) {
ret = -EINVAL; ret = -EINVAL;
goto fail; goto fail;
} }
...@@ -330,9 +336,9 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain, ...@@ -330,9 +336,9 @@ static void msm_iommu_detach_dev(struct iommu_domain *domain,
int ret; int ret;
spin_lock_irqsave(&msm_iommu_lock, flags); spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv; priv = to_msm_priv(domain);
if (!priv || !dev) if (!dev)
goto fail; goto fail;
iommu_drvdata = dev_get_drvdata(dev->parent); iommu_drvdata = dev_get_drvdata(dev->parent);
...@@ -382,11 +388,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, ...@@ -382,11 +388,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
goto fail; goto fail;
} }
priv = domain->priv; priv = to_msm_priv(domain);
if (!priv) {
ret = -EINVAL;
goto fail;
}
fl_table = priv->pgtable; fl_table = priv->pgtable;
...@@ -484,10 +486,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, ...@@ -484,10 +486,7 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
spin_lock_irqsave(&msm_iommu_lock, flags); spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv; priv = to_msm_priv(domain);
if (!priv)
goto fail;
fl_table = priv->pgtable; fl_table = priv->pgtable;
...@@ -566,7 +565,7 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -566,7 +565,7 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
spin_lock_irqsave(&msm_iommu_lock, flags); spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv; priv = to_msm_priv(domain);
if (list_empty(&priv->list_attached)) if (list_empty(&priv->list_attached))
goto fail; goto fail;
...@@ -674,8 +673,8 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id) ...@@ -674,8 +673,8 @@ irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
static const struct iommu_ops msm_iommu_ops = { static const struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable, .capable = msm_iommu_capable,
.domain_init = msm_iommu_domain_init, .domain_alloc = msm_iommu_domain_alloc,
.domain_destroy = msm_iommu_domain_destroy, .domain_free = msm_iommu_domain_free,
.attach_dev = msm_iommu_attach_dev, .attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev, .detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map, .map = msm_iommu_map,
......
...@@ -59,6 +59,7 @@ struct omap_iommu_domain { ...@@ -59,6 +59,7 @@ struct omap_iommu_domain {
struct omap_iommu *iommu_dev; struct omap_iommu *iommu_dev;
struct device *dev; struct device *dev;
spinlock_t lock; spinlock_t lock;
struct iommu_domain domain;
}; };
#define MMU_LOCK_BASE_SHIFT 10 #define MMU_LOCK_BASE_SHIFT 10
...@@ -79,6 +80,15 @@ struct iotlb_lock { ...@@ -79,6 +80,15 @@ struct iotlb_lock {
static struct platform_driver omap_iommu_driver; static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep; static struct kmem_cache *iopte_cachep;
/**
* to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
* @dom: generic iommu domain handle
**/
static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
{
return container_of(dom, struct omap_iommu_domain, domain);
}
/** /**
* omap_iommu_save_ctx - Save registers for pm off-mode support * omap_iommu_save_ctx - Save registers for pm off-mode support
* @dev: client device * @dev: client device
...@@ -901,7 +911,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) ...@@ -901,7 +911,7 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
u32 *iopgd, *iopte; u32 *iopgd, *iopte;
struct omap_iommu *obj = data; struct omap_iommu *obj = data;
struct iommu_domain *domain = obj->domain; struct iommu_domain *domain = obj->domain;
struct omap_iommu_domain *omap_domain = domain->priv; struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
if (!omap_domain->iommu_dev) if (!omap_domain->iommu_dev)
return IRQ_NONE; return IRQ_NONE;
...@@ -1113,7 +1123,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) ...@@ -1113,7 +1123,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot) phys_addr_t pa, size_t bytes, int prot)
{ {
struct omap_iommu_domain *omap_domain = domain->priv; struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev; struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev; struct device *dev = oiommu->dev;
struct iotlb_entry e; struct iotlb_entry e;
...@@ -1140,7 +1150,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, ...@@ -1140,7 +1150,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t size) size_t size)
{ {
struct omap_iommu_domain *omap_domain = domain->priv; struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev; struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev; struct device *dev = oiommu->dev;
...@@ -1152,7 +1162,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, ...@@ -1152,7 +1162,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
static int static int
omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{ {
struct omap_iommu_domain *omap_domain = domain->priv; struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu; struct omap_iommu *oiommu;
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
int ret = 0; int ret = 0;
...@@ -1212,17 +1222,20 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, ...@@ -1212,17 +1222,20 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
static void omap_iommu_detach_dev(struct iommu_domain *domain, static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct omap_iommu_domain *omap_domain = domain->priv; struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
spin_lock(&omap_domain->lock); spin_lock(&omap_domain->lock);
_omap_iommu_detach_dev(omap_domain, dev); _omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock); spin_unlock(&omap_domain->lock);
} }
static int omap_iommu_domain_init(struct iommu_domain *domain) static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
{ {
struct omap_iommu_domain *omap_domain; struct omap_iommu_domain *omap_domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain) { if (!omap_domain) {
pr_err("kzalloc failed\n"); pr_err("kzalloc failed\n");
...@@ -1244,25 +1257,21 @@ static int omap_iommu_domain_init(struct iommu_domain *domain) ...@@ -1244,25 +1257,21 @@ static int omap_iommu_domain_init(struct iommu_domain *domain)
clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE); clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
spin_lock_init(&omap_domain->lock); spin_lock_init(&omap_domain->lock);
domain->priv = omap_domain; omap_domain->domain.geometry.aperture_start = 0;
omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
omap_domain->domain.geometry.force_aperture = true;
domain->geometry.aperture_start = 0; return &omap_domain->domain;
domain->geometry.aperture_end = (1ULL << 32) - 1;
domain->geometry.force_aperture = true;
return 0;
fail_nomem: fail_nomem:
kfree(omap_domain); kfree(omap_domain);
out: out:
return -ENOMEM; return NULL;
} }
static void omap_iommu_domain_destroy(struct iommu_domain *domain) static void omap_iommu_domain_free(struct iommu_domain *domain)
{ {
struct omap_iommu_domain *omap_domain = domain->priv; struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
domain->priv = NULL;
/* /*
* An iommu device is still attached * An iommu device is still attached
...@@ -1278,7 +1287,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain) ...@@ -1278,7 +1287,7 @@ static void omap_iommu_domain_destroy(struct iommu_domain *domain)
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t da) dma_addr_t da)
{ {
struct omap_iommu_domain *omap_domain = domain->priv; struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev; struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev; struct device *dev = oiommu->dev;
u32 *pgd, *pte; u32 *pgd, *pte;
...@@ -1358,8 +1367,8 @@ static void omap_iommu_remove_device(struct device *dev) ...@@ -1358,8 +1367,8 @@ static void omap_iommu_remove_device(struct device *dev)
} }
static const struct iommu_ops omap_iommu_ops = { static const struct iommu_ops omap_iommu_ops = {
.domain_init = omap_iommu_domain_init, .domain_alloc = omap_iommu_domain_alloc,
.domain_destroy = omap_iommu_domain_destroy, .domain_free = omap_iommu_domain_free,
.attach_dev = omap_iommu_attach_dev, .attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev, .detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map, .map = omap_iommu_map,
......
...@@ -80,6 +80,8 @@ struct rk_iommu_domain { ...@@ -80,6 +80,8 @@ struct rk_iommu_domain {
u32 *dt; /* page directory table */ u32 *dt; /* page directory table */
spinlock_t iommus_lock; /* lock for iommus list */ spinlock_t iommus_lock; /* lock for iommus list */
spinlock_t dt_lock; /* lock for modifying page directory table */ spinlock_t dt_lock; /* lock for modifying page directory table */
struct iommu_domain domain;
}; };
struct rk_iommu { struct rk_iommu {
...@@ -100,6 +102,11 @@ static inline void rk_table_flush(u32 *va, unsigned int count) ...@@ -100,6 +102,11 @@ static inline void rk_table_flush(u32 *va, unsigned int count)
outer_flush_range(pa_start, pa_end); outer_flush_range(pa_start, pa_end);
} }
static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom)
{
return container_of(dom, struct rk_iommu_domain, domain);
}
/** /**
* Inspired by _wait_for in intel_drv.h * Inspired by _wait_for in intel_drv.h
* This is NOT safe for use in interrupt context. * This is NOT safe for use in interrupt context.
...@@ -503,7 +510,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) ...@@ -503,7 +510,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
struct rk_iommu_domain *rk_domain = domain->priv; struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags; unsigned long flags;
phys_addr_t pt_phys, phys = 0; phys_addr_t pt_phys, phys = 0;
u32 dte, pte; u32 dte, pte;
...@@ -639,7 +646,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, ...@@ -639,7 +646,7 @@ static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot)
{ {
struct rk_iommu_domain *rk_domain = domain->priv; struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags; unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova; dma_addr_t iova = (dma_addr_t)_iova;
u32 *page_table, *pte_addr; u32 *page_table, *pte_addr;
...@@ -670,7 +677,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, ...@@ -670,7 +677,7 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
size_t size) size_t size)
{ {
struct rk_iommu_domain *rk_domain = domain->priv; struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags; unsigned long flags;
dma_addr_t iova = (dma_addr_t)_iova; dma_addr_t iova = (dma_addr_t)_iova;
phys_addr_t pt_phys; phys_addr_t pt_phys;
...@@ -726,7 +733,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, ...@@ -726,7 +733,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct rk_iommu *iommu; struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = domain->priv; struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags; unsigned long flags;
int ret; int ret;
phys_addr_t dte_addr; phys_addr_t dte_addr;
...@@ -778,7 +785,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, ...@@ -778,7 +785,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct rk_iommu *iommu; struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = domain->priv; struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags; unsigned long flags;
/* Allow 'virtual devices' (eg drm) to detach from domain */ /* Allow 'virtual devices' (eg drm) to detach from domain */
...@@ -804,13 +811,16 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, ...@@ -804,13 +811,16 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
dev_info(dev, "Detached from iommu domain\n"); dev_info(dev, "Detached from iommu domain\n");
} }
static int rk_iommu_domain_init(struct iommu_domain *domain) static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
{ {
struct rk_iommu_domain *rk_domain; struct rk_iommu_domain *rk_domain;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
if (!rk_domain) if (!rk_domain)
return -ENOMEM; return NULL;
/* /*
* rk32xx iommus use a 2 level pagetable. * rk32xx iommus use a 2 level pagetable.
...@@ -827,17 +837,16 @@ static int rk_iommu_domain_init(struct iommu_domain *domain) ...@@ -827,17 +837,16 @@ static int rk_iommu_domain_init(struct iommu_domain *domain)
spin_lock_init(&rk_domain->dt_lock); spin_lock_init(&rk_domain->dt_lock);
INIT_LIST_HEAD(&rk_domain->iommus); INIT_LIST_HEAD(&rk_domain->iommus);
domain->priv = rk_domain; return &rk_domain->domain;
return 0;
err_dt: err_dt:
kfree(rk_domain); kfree(rk_domain);
return -ENOMEM; return NULL;
} }
static void rk_iommu_domain_destroy(struct iommu_domain *domain) static void rk_iommu_domain_free(struct iommu_domain *domain)
{ {
struct rk_iommu_domain *rk_domain = domain->priv; struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
int i; int i;
WARN_ON(!list_empty(&rk_domain->iommus)); WARN_ON(!list_empty(&rk_domain->iommus));
...@@ -852,8 +861,7 @@ static void rk_iommu_domain_destroy(struct iommu_domain *domain) ...@@ -852,8 +861,7 @@ static void rk_iommu_domain_destroy(struct iommu_domain *domain)
} }
free_page((unsigned long)rk_domain->dt); free_page((unsigned long)rk_domain->dt);
kfree(domain->priv); kfree(rk_domain);
domain->priv = NULL;
} }
static bool rk_iommu_is_dev_iommu_master(struct device *dev) static bool rk_iommu_is_dev_iommu_master(struct device *dev)
...@@ -952,8 +960,8 @@ static void rk_iommu_remove_device(struct device *dev) ...@@ -952,8 +960,8 @@ static void rk_iommu_remove_device(struct device *dev)
} }
static const struct iommu_ops rk_iommu_ops = { static const struct iommu_ops rk_iommu_ops = {
.domain_init = rk_iommu_domain_init, .domain_alloc = rk_iommu_domain_alloc,
.domain_destroy = rk_iommu_domain_destroy, .domain_free = rk_iommu_domain_free,
.attach_dev = rk_iommu_attach_device, .attach_dev = rk_iommu_attach_device,
.detach_dev = rk_iommu_detach_device, .detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map, .map = rk_iommu_map,
......
...@@ -42,11 +42,17 @@ struct shmobile_iommu_domain { ...@@ -42,11 +42,17 @@ struct shmobile_iommu_domain {
spinlock_t map_lock; spinlock_t map_lock;
spinlock_t attached_list_lock; spinlock_t attached_list_lock;
struct list_head attached_list; struct list_head attached_list;
struct iommu_domain domain;
}; };
static struct shmobile_iommu_archdata *ipmmu_archdata; static struct shmobile_iommu_archdata *ipmmu_archdata;
static struct kmem_cache *l1cache, *l2cache; static struct kmem_cache *l1cache, *l2cache;
static struct shmobile_iommu_domain *to_sh_domain(struct iommu_domain *dom)
{
return container_of(dom, struct shmobile_iommu_domain, domain);
}
static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable, static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
struct kmem_cache *cache, size_t size) struct kmem_cache *cache, size_t size)
{ {
...@@ -82,31 +88,33 @@ static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable, ...@@ -82,31 +88,33 @@ static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable,
sizeof(val) * count, DMA_TO_DEVICE); sizeof(val) * count, DMA_TO_DEVICE);
} }
static int shmobile_iommu_domain_init(struct iommu_domain *domain) static struct iommu_domain *shmobile_iommu_domain_alloc(unsigned type)
{ {
struct shmobile_iommu_domain *sh_domain; struct shmobile_iommu_domain *sh_domain;
int i, ret; int i, ret;
sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL); if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
sh_domain = kzalloc(sizeof(*sh_domain), GFP_KERNEL);
if (!sh_domain) if (!sh_domain)
return -ENOMEM; return NULL;
ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE); ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
if (ret < 0) { if (ret < 0) {
kfree(sh_domain); kfree(sh_domain);
return ret; return NULL;
} }
for (i = 0; i < L1_LEN; i++) for (i = 0; i < L1_LEN; i++)
sh_domain->l2[i].pgtable = NULL; sh_domain->l2[i].pgtable = NULL;
spin_lock_init(&sh_domain->map_lock); spin_lock_init(&sh_domain->map_lock);
spin_lock_init(&sh_domain->attached_list_lock); spin_lock_init(&sh_domain->attached_list_lock);
INIT_LIST_HEAD(&sh_domain->attached_list); INIT_LIST_HEAD(&sh_domain->attached_list);
domain->priv = sh_domain; return &sh_domain->domain;
return 0;
} }
static void shmobile_iommu_domain_destroy(struct iommu_domain *domain) static void shmobile_iommu_domain_free(struct iommu_domain *domain)
{ {
struct shmobile_iommu_domain *sh_domain = domain->priv; struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
int i; int i;
for (i = 0; i < L1_LEN; i++) { for (i = 0; i < L1_LEN; i++) {
...@@ -115,14 +123,13 @@ static void shmobile_iommu_domain_destroy(struct iommu_domain *domain) ...@@ -115,14 +123,13 @@ static void shmobile_iommu_domain_destroy(struct iommu_domain *domain)
} }
pgtable_free(&sh_domain->l1, l1cache, L1_SIZE); pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
kfree(sh_domain); kfree(sh_domain);
domain->priv = NULL;
} }
static int shmobile_iommu_attach_device(struct iommu_domain *domain, static int shmobile_iommu_attach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
struct shmobile_iommu_domain *sh_domain = domain->priv; struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
int ret = -EBUSY; int ret = -EBUSY;
if (!archdata) if (!archdata)
...@@ -151,7 +158,7 @@ static void shmobile_iommu_detach_device(struct iommu_domain *domain, ...@@ -151,7 +158,7 @@ static void shmobile_iommu_detach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
struct shmobile_iommu_domain *sh_domain = domain->priv; struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
if (!archdata) if (!archdata)
return; return;
...@@ -214,7 +221,7 @@ static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -214,7 +221,7 @@ static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot)
{ {
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
struct shmobile_iommu_domain *sh_domain = domain->priv; struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
unsigned int l1index, l2index; unsigned int l1index, l2index;
int ret; int ret;
...@@ -258,7 +265,7 @@ static size_t shmobile_iommu_unmap(struct iommu_domain *domain, ...@@ -258,7 +265,7 @@ static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size) unsigned long iova, size_t size)
{ {
struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
struct shmobile_iommu_domain *sh_domain = domain->priv; struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
unsigned int l1index, l2index; unsigned int l1index, l2index;
uint32_t l2entry = 0; uint32_t l2entry = 0;
size_t ret = 0; size_t ret = 0;
...@@ -298,7 +305,7 @@ static size_t shmobile_iommu_unmap(struct iommu_domain *domain, ...@@ -298,7 +305,7 @@ static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
struct shmobile_iommu_domain *sh_domain = domain->priv; struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
uint32_t l1entry = 0, l2entry = 0; uint32_t l1entry = 0, l2entry = 0;
unsigned int l1index, l2index; unsigned int l1index, l2index;
...@@ -355,8 +362,8 @@ static int shmobile_iommu_add_device(struct device *dev) ...@@ -355,8 +362,8 @@ static int shmobile_iommu_add_device(struct device *dev)
} }
static const struct iommu_ops shmobile_iommu_ops = { static const struct iommu_ops shmobile_iommu_ops = {
.domain_init = shmobile_iommu_domain_init, .domain_alloc = shmobile_iommu_domain_alloc,
.domain_destroy = shmobile_iommu_domain_destroy, .domain_free = shmobile_iommu_domain_free,
.attach_dev = shmobile_iommu_attach_device, .attach_dev = shmobile_iommu_attach_device,
.detach_dev = shmobile_iommu_detach_device, .detach_dev = shmobile_iommu_detach_device,
.map = shmobile_iommu_map, .map = shmobile_iommu_map,
......
...@@ -63,11 +63,21 @@ struct gart_device { ...@@ -63,11 +63,21 @@ struct gart_device {
struct device *dev; struct device *dev;
}; };
struct gart_domain {
struct iommu_domain domain; /* generic domain handle */
struct gart_device *gart; /* link to gart device */
};
static struct gart_device *gart_handle; /* unique for a system */ static struct gart_device *gart_handle; /* unique for a system */
#define GART_PTE(_pfn) \ #define GART_PTE(_pfn) \
(GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT)) (GART_ENTRY_PHYS_ADDR_VALID | ((_pfn) << PAGE_SHIFT))
static struct gart_domain *to_gart_domain(struct iommu_domain *dom)
{
return container_of(dom, struct gart_domain, domain);
}
/* /*
* Any interaction between any block on PPSB and a block on APB or AHB * Any interaction between any block on PPSB and a block on APB or AHB
* must have these read-back to ensure the APB/AHB bus transaction is * must have these read-back to ensure the APB/AHB bus transaction is
...@@ -156,20 +166,11 @@ static inline bool gart_iova_range_valid(struct gart_device *gart, ...@@ -156,20 +166,11 @@ static inline bool gart_iova_range_valid(struct gart_device *gart,
static int gart_iommu_attach_dev(struct iommu_domain *domain, static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct gart_device *gart; struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
struct gart_client *client, *c; struct gart_client *client, *c;
int err = 0; int err = 0;
gart = gart_handle;
if (!gart)
return -EINVAL;
domain->priv = gart;
domain->geometry.aperture_start = gart->iovmm_base;
domain->geometry.aperture_end = gart->iovmm_base +
gart->page_count * GART_PAGE_SIZE - 1;
domain->geometry.force_aperture = true;
client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL); client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
if (!client) if (!client)
return -ENOMEM; return -ENOMEM;
...@@ -198,7 +199,8 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain, ...@@ -198,7 +199,8 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
static void gart_iommu_detach_dev(struct iommu_domain *domain, static void gart_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct gart_device *gart = domain->priv; struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
struct gart_client *c; struct gart_client *c;
spin_lock(&gart->client_lock); spin_lock(&gart->client_lock);
...@@ -216,33 +218,55 @@ static void gart_iommu_detach_dev(struct iommu_domain *domain, ...@@ -216,33 +218,55 @@ static void gart_iommu_detach_dev(struct iommu_domain *domain,
spin_unlock(&gart->client_lock); spin_unlock(&gart->client_lock);
} }
static int gart_iommu_domain_init(struct iommu_domain *domain) static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
{ {
return 0; struct gart_domain *gart_domain;
} struct gart_device *gart;
static void gart_iommu_domain_destroy(struct iommu_domain *domain) if (type != IOMMU_DOMAIN_UNMANAGED)
{ return NULL;
struct gart_device *gart = domain->priv;
gart = gart_handle;
if (!gart) if (!gart)
return; return NULL;
spin_lock(&gart->client_lock); gart_domain = kzalloc(sizeof(*gart_domain), GFP_KERNEL);
if (!list_empty(&gart->client)) { if (!gart_domain)
struct gart_client *c; return NULL;
gart_domain->gart = gart;
gart_domain->domain.geometry.aperture_start = gart->iovmm_base;
gart_domain->domain.geometry.aperture_end = gart->iovmm_base +
gart->page_count * GART_PAGE_SIZE - 1;
gart_domain->domain.geometry.force_aperture = true;
return &gart_domain->domain;
}
static void gart_iommu_domain_free(struct iommu_domain *domain)
{
struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
if (gart) {
spin_lock(&gart->client_lock);
if (!list_empty(&gart->client)) {
struct gart_client *c;
list_for_each_entry(c, &gart->client, list) list_for_each_entry(c, &gart->client, list)
gart_iommu_detach_dev(domain, c->dev); gart_iommu_detach_dev(domain, c->dev);
}
spin_unlock(&gart->client_lock);
} }
spin_unlock(&gart->client_lock);
domain->priv = NULL; kfree(gart_domain);
} }
static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t pa, size_t bytes, int prot) phys_addr_t pa, size_t bytes, int prot)
{ {
struct gart_device *gart = domain->priv; struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
unsigned long flags; unsigned long flags;
unsigned long pfn; unsigned long pfn;
...@@ -265,7 +289,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -265,7 +289,8 @@ static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t bytes) size_t bytes)
{ {
struct gart_device *gart = domain->priv; struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
unsigned long flags; unsigned long flags;
if (!gart_iova_range_valid(gart, iova, bytes)) if (!gart_iova_range_valid(gart, iova, bytes))
...@@ -281,7 +306,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, ...@@ -281,7 +306,8 @@ static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
struct gart_device *gart = domain->priv; struct gart_domain *gart_domain = to_gart_domain(domain);
struct gart_device *gart = gart_domain->gart;
unsigned long pte; unsigned long pte;
phys_addr_t pa; phys_addr_t pa;
unsigned long flags; unsigned long flags;
...@@ -310,8 +336,8 @@ static bool gart_iommu_capable(enum iommu_cap cap) ...@@ -310,8 +336,8 @@ static bool gart_iommu_capable(enum iommu_cap cap)
static const struct iommu_ops gart_iommu_ops = { static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable, .capable = gart_iommu_capable,
.domain_init = gart_iommu_domain_init, .domain_alloc = gart_iommu_domain_alloc,
.domain_destroy = gart_iommu_domain_destroy, .domain_free = gart_iommu_domain_free,
.attach_dev = gart_iommu_attach_dev, .attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev, .detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map, .map = gart_iommu_map,
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/bitops.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -24,6 +25,8 @@ struct tegra_smmu { ...@@ -24,6 +25,8 @@ struct tegra_smmu {
struct tegra_mc *mc; struct tegra_mc *mc;
const struct tegra_smmu_soc *soc; const struct tegra_smmu_soc *soc;
unsigned long pfn_mask;
unsigned long *asids; unsigned long *asids;
struct mutex lock; struct mutex lock;
...@@ -31,7 +34,7 @@ struct tegra_smmu { ...@@ -31,7 +34,7 @@ struct tegra_smmu {
}; };
struct tegra_smmu_as { struct tegra_smmu_as {
struct iommu_domain *domain; struct iommu_domain domain;
struct tegra_smmu *smmu; struct tegra_smmu *smmu;
unsigned int use_count; unsigned int use_count;
struct page *count; struct page *count;
...@@ -40,6 +43,11 @@ struct tegra_smmu_as { ...@@ -40,6 +43,11 @@ struct tegra_smmu_as {
u32 attr; u32 attr;
}; };
static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom)
{
return container_of(dom, struct tegra_smmu_as, domain);
}
static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, static inline void smmu_writel(struct tegra_smmu *smmu, u32 value,
unsigned long offset) unsigned long offset)
{ {
...@@ -105,8 +113,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) ...@@ -105,8 +113,6 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset)
#define SMMU_PDE_SHIFT 22 #define SMMU_PDE_SHIFT 22
#define SMMU_PTE_SHIFT 12 #define SMMU_PTE_SHIFT 12
#define SMMU_PFN_MASK 0x000fffff
#define SMMU_PD_READABLE (1 << 31) #define SMMU_PD_READABLE (1 << 31)
#define SMMU_PD_WRITABLE (1 << 30) #define SMMU_PD_WRITABLE (1 << 30)
#define SMMU_PD_NONSECURE (1 << 29) #define SMMU_PD_NONSECURE (1 << 29)
...@@ -224,30 +230,32 @@ static bool tegra_smmu_capable(enum iommu_cap cap) ...@@ -224,30 +230,32 @@ static bool tegra_smmu_capable(enum iommu_cap cap)
return false; return false;
} }
static int tegra_smmu_domain_init(struct iommu_domain *domain) static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{ {
struct tegra_smmu_as *as; struct tegra_smmu_as *as;
unsigned int i; unsigned int i;
uint32_t *pd; uint32_t *pd;
if (type != IOMMU_DOMAIN_UNMANAGED)
return NULL;
as = kzalloc(sizeof(*as), GFP_KERNEL); as = kzalloc(sizeof(*as), GFP_KERNEL);
if (!as) if (!as)
return -ENOMEM; return NULL;
as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE;
as->domain = domain;
as->pd = alloc_page(GFP_KERNEL | __GFP_DMA); as->pd = alloc_page(GFP_KERNEL | __GFP_DMA);
if (!as->pd) { if (!as->pd) {
kfree(as); kfree(as);
return -ENOMEM; return NULL;
} }
as->count = alloc_page(GFP_KERNEL); as->count = alloc_page(GFP_KERNEL);
if (!as->count) { if (!as->count) {
__free_page(as->pd); __free_page(as->pd);
kfree(as); kfree(as);
return -ENOMEM; return NULL;
} }
/* clear PDEs */ /* clear PDEs */
...@@ -264,14 +272,17 @@ static int tegra_smmu_domain_init(struct iommu_domain *domain) ...@@ -264,14 +272,17 @@ static int tegra_smmu_domain_init(struct iommu_domain *domain)
for (i = 0; i < SMMU_NUM_PDE; i++) for (i = 0; i < SMMU_NUM_PDE; i++)
pd[i] = 0; pd[i] = 0;
domain->priv = as; /* setup aperture */
as->domain.geometry.aperture_start = 0;
as->domain.geometry.aperture_end = 0xffffffff;
as->domain.geometry.force_aperture = true;
return 0; return &as->domain;
} }
static void tegra_smmu_domain_destroy(struct iommu_domain *domain) static void tegra_smmu_domain_free(struct iommu_domain *domain)
{ {
struct tegra_smmu_as *as = domain->priv; struct tegra_smmu_as *as = to_smmu_as(domain);
/* TODO: free page directory and page tables */ /* TODO: free page directory and page tables */
ClearPageReserved(as->pd); ClearPageReserved(as->pd);
...@@ -395,7 +406,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain, ...@@ -395,7 +406,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct tegra_smmu *smmu = dev->archdata.iommu; struct tegra_smmu *smmu = dev->archdata.iommu;
struct tegra_smmu_as *as = domain->priv; struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node; struct device_node *np = dev->of_node;
struct of_phandle_args args; struct of_phandle_args args;
unsigned int index = 0; unsigned int index = 0;
...@@ -428,7 +439,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain, ...@@ -428,7 +439,7 @@ static int tegra_smmu_attach_dev(struct iommu_domain *domain,
static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
{ {
struct tegra_smmu_as *as = domain->priv; struct tegra_smmu_as *as = to_smmu_as(domain);
struct device_node *np = dev->of_node; struct device_node *np = dev->of_node;
struct tegra_smmu *smmu = as->smmu; struct tegra_smmu *smmu = as->smmu;
struct of_phandle_args args; struct of_phandle_args args;
...@@ -481,7 +492,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, ...@@ -481,7 +492,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
smmu_flush_tlb_section(smmu, as->id, iova); smmu_flush_tlb_section(smmu, as->id, iova);
smmu_flush(smmu); smmu_flush(smmu);
} else { } else {
page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); page = pfn_to_page(pd[pde] & smmu->pfn_mask);
pt = page_address(page); pt = page_address(page);
} }
...@@ -503,7 +514,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) ...@@ -503,7 +514,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
u32 *pd = page_address(as->pd), *pt; u32 *pd = page_address(as->pd), *pt;
struct page *page; struct page *page;
page = pfn_to_page(pd[pde] & SMMU_PFN_MASK); page = pfn_to_page(pd[pde] & as->smmu->pfn_mask);
pt = page_address(page); pt = page_address(page);
/* /*
...@@ -524,7 +535,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) ...@@ -524,7 +535,7 @@ static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova)
static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot) phys_addr_t paddr, size_t size, int prot)
{ {
struct tegra_smmu_as *as = domain->priv; struct tegra_smmu_as *as = to_smmu_as(domain);
struct tegra_smmu *smmu = as->smmu; struct tegra_smmu *smmu = as->smmu;
unsigned long offset; unsigned long offset;
struct page *page; struct page *page;
...@@ -548,7 +559,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -548,7 +559,7 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size) size_t size)
{ {
struct tegra_smmu_as *as = domain->priv; struct tegra_smmu_as *as = to_smmu_as(domain);
struct tegra_smmu *smmu = as->smmu; struct tegra_smmu *smmu = as->smmu;
unsigned long offset; unsigned long offset;
struct page *page; struct page *page;
...@@ -572,13 +583,13 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, ...@@ -572,13 +583,13 @@ static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
struct tegra_smmu_as *as = domain->priv; struct tegra_smmu_as *as = to_smmu_as(domain);
struct page *page; struct page *page;
unsigned long pfn; unsigned long pfn;
u32 *pte; u32 *pte;
pte = as_get_pte(as, iova, &page); pte = as_get_pte(as, iova, &page);
pfn = *pte & SMMU_PFN_MASK; pfn = *pte & as->smmu->pfn_mask;
return PFN_PHYS(pfn); return PFN_PHYS(pfn);
} }
...@@ -633,8 +644,8 @@ static void tegra_smmu_remove_device(struct device *dev) ...@@ -633,8 +644,8 @@ static void tegra_smmu_remove_device(struct device *dev)
static const struct iommu_ops tegra_smmu_ops = { static const struct iommu_ops tegra_smmu_ops = {
.capable = tegra_smmu_capable, .capable = tegra_smmu_capable,
.domain_init = tegra_smmu_domain_init, .domain_alloc = tegra_smmu_domain_alloc,
.domain_destroy = tegra_smmu_domain_destroy, .domain_free = tegra_smmu_domain_free,
.attach_dev = tegra_smmu_attach_dev, .attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev, .detach_dev = tegra_smmu_detach_dev,
.add_device = tegra_smmu_add_device, .add_device = tegra_smmu_add_device,
...@@ -702,6 +713,10 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, ...@@ -702,6 +713,10 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev,
smmu->dev = dev; smmu->dev = dev;
smmu->mc = mc; smmu->mc = mc;
smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1;
dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n",
mc->soc->num_address_bits, smmu->pfn_mask);
value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f);
if (soc->supports_request_limit) if (soc->supports_request_limit)
......
...@@ -51,9 +51,33 @@ struct iommu_domain_geometry { ...@@ -51,9 +51,33 @@ struct iommu_domain_geometry {
bool force_aperture; /* DMA only allowed in mappable range? */ bool force_aperture; /* DMA only allowed in mappable range? */
}; };
/* Domain feature flags */
#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
implementation */
#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
/*
* This are the possible domain-types
*
* IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
* devices
* IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
* IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
* for VMs
* IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
* This flag allows IOMMU drivers to implement
* certain optimizations for these domains
*/
#define IOMMU_DOMAIN_BLOCKED (0U)
#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
__IOMMU_DOMAIN_DMA_API)
struct iommu_domain { struct iommu_domain {
unsigned type;
const struct iommu_ops *ops; const struct iommu_ops *ops;
void *priv;
iommu_fault_handler_t handler; iommu_fault_handler_t handler;
void *handler_token; void *handler_token;
struct iommu_domain_geometry geometry; struct iommu_domain_geometry geometry;
...@@ -113,8 +137,11 @@ enum iommu_attr { ...@@ -113,8 +137,11 @@ enum iommu_attr {
*/ */
struct iommu_ops { struct iommu_ops {
bool (*capable)(enum iommu_cap); bool (*capable)(enum iommu_cap);
int (*domain_init)(struct iommu_domain *domain);
void (*domain_destroy)(struct iommu_domain *domain); /* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
void (*domain_free)(struct iommu_domain *);
int (*attach_dev)(struct iommu_domain *domain, struct device *dev); int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev); void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova, int (*map)(struct iommu_domain *domain, unsigned long iova,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment