Commit cd6438c5 authored by ZhengShunQian's avatar ZhengShunQian Committed by Joerg Roedel

iommu/rockchip: Reconstruct to support multi slaves

There are some IPs, such as video encoder/decoder, contains 2 slave iommus,
one for reading and the other for writing. They share the same irq and
clock with master.

This patch reconstructs to support this case by making them share the same
Page Directory, Page Tables and even the register operations.
That means every instruction to the reading MMU registers would be
duplicated to the writing MMU and vice versa.
Signed-off-by: default avatarZhengShunQian <zhengsq@rock-chips.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 92e963f5
...@@ -86,7 +86,8 @@ struct rk_iommu_domain { ...@@ -86,7 +86,8 @@ struct rk_iommu_domain {
struct rk_iommu { struct rk_iommu {
struct device *dev; struct device *dev;
void __iomem *base; void __iomem **bases;
int num_mmu;
int irq; int irq;
struct list_head node; /* entry in rk_iommu_domain.iommus */ struct list_head node; /* entry in rk_iommu_domain.iommus */
struct iommu_domain *domain; /* domain to which iommu is attached */ struct iommu_domain *domain; /* domain to which iommu is attached */
...@@ -271,47 +272,70 @@ static u32 rk_iova_page_offset(dma_addr_t iova) ...@@ -271,47 +272,70 @@ static u32 rk_iova_page_offset(dma_addr_t iova)
return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
} }
static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset) static u32 rk_iommu_read(void __iomem *base, u32 offset)
{ {
return readl(iommu->base + offset); return readl(base + offset);
} }
static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value) static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
{ {
writel(value, iommu->base + offset); writel(value, base + offset);
} }
static void rk_iommu_command(struct rk_iommu *iommu, u32 command) static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
{ {
writel(command, iommu->base + RK_MMU_COMMAND); int i;
for (i = 0; i < iommu->num_mmu; i++)
writel(command, iommu->bases[i] + RK_MMU_COMMAND);
} }
static void rk_iommu_base_command(void __iomem *base, u32 command)
{
writel(command, base + RK_MMU_COMMAND);
}
static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
size_t size) size_t size)
{ {
int i;
dma_addr_t iova_end = iova + size; dma_addr_t iova_end = iova + size;
/* /*
* TODO(djkurtz): Figure out when it is more efficient to shootdown the * TODO(djkurtz): Figure out when it is more efficient to shootdown the
* entire iotlb rather than iterate over individual iovas. * entire iotlb rather than iterate over individual iovas.
*/ */
for (i = 0; i < iommu->num_mmu; i++)
for (; iova < iova_end; iova += SPAGE_SIZE) for (; iova < iova_end; iova += SPAGE_SIZE)
rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
} }
static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
{ {
return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE; bool active = true;
int i;
for (i = 0; i < iommu->num_mmu; i++)
active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
RK_MMU_STATUS_STALL_ACTIVE;
return active;
} }
static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
{ {
return rk_iommu_read(iommu, RK_MMU_STATUS) & bool enable = true;
int i;
for (i = 0; i < iommu->num_mmu; i++)
enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
RK_MMU_STATUS_PAGING_ENABLED; RK_MMU_STATUS_PAGING_ENABLED;
return enable;
} }
static int rk_iommu_enable_stall(struct rk_iommu *iommu) static int rk_iommu_enable_stall(struct rk_iommu *iommu)
{ {
int ret; int ret, i;
if (rk_iommu_is_stall_active(iommu)) if (rk_iommu_is_stall_active(iommu))
return 0; return 0;
...@@ -324,15 +348,16 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu) ...@@ -324,15 +348,16 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1); ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
if (ret) if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
rk_iommu_read(iommu, RK_MMU_STATUS)); rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret; return ret;
} }
static int rk_iommu_disable_stall(struct rk_iommu *iommu) static int rk_iommu_disable_stall(struct rk_iommu *iommu)
{ {
int ret; int ret, i;
if (!rk_iommu_is_stall_active(iommu)) if (!rk_iommu_is_stall_active(iommu))
return 0; return 0;
...@@ -341,15 +366,16 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu) ...@@ -341,15 +366,16 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu)
ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1); ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
if (ret) if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
rk_iommu_read(iommu, RK_MMU_STATUS)); rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret; return ret;
} }
static int rk_iommu_enable_paging(struct rk_iommu *iommu) static int rk_iommu_enable_paging(struct rk_iommu *iommu)
{ {
int ret; int ret, i;
if (rk_iommu_is_paging_enabled(iommu)) if (rk_iommu_is_paging_enabled(iommu))
return 0; return 0;
...@@ -358,15 +384,16 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu) ...@@ -358,15 +384,16 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu)
ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1); ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
if (ret) if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
rk_iommu_read(iommu, RK_MMU_STATUS)); rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret; return ret;
} }
static int rk_iommu_disable_paging(struct rk_iommu *iommu) static int rk_iommu_disable_paging(struct rk_iommu *iommu)
{ {
int ret; int ret, i;
if (!rk_iommu_is_paging_enabled(iommu)) if (!rk_iommu_is_paging_enabled(iommu))
return 0; return 0;
...@@ -375,41 +402,49 @@ static int rk_iommu_disable_paging(struct rk_iommu *iommu) ...@@ -375,41 +402,49 @@ static int rk_iommu_disable_paging(struct rk_iommu *iommu)
ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1); ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
if (ret) if (ret)
for (i = 0; i < iommu->num_mmu; i++)
dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
rk_iommu_read(iommu, RK_MMU_STATUS)); rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
return ret; return ret;
} }
static int rk_iommu_force_reset(struct rk_iommu *iommu) static int rk_iommu_force_reset(struct rk_iommu *iommu)
{ {
int ret; int ret, i;
u32 dte_addr; u32 dte_addr;
/* /*
* Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
* and verifying that upper 5 nybbles are read back. * and verifying that upper 5 nybbles are read back.
*/ */
rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
return -EFAULT; return -EFAULT;
} }
}
rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000, for (i = 0; i < iommu->num_mmu; i++) {
ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
FORCE_RESET_TIMEOUT); FORCE_RESET_TIMEOUT);
if (ret) if (ret) {
dev_err(iommu->dev, "FORCE_RESET command timed out\n"); dev_err(iommu->dev, "FORCE_RESET command timed out\n");
return ret; return ret;
}
}
return 0;
} }
static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
{ {
void __iomem *base = iommu->bases[index];
u32 dte_index, pte_index, page_offset; u32 dte_index, pte_index, page_offset;
u32 mmu_dte_addr; u32 mmu_dte_addr;
phys_addr_t mmu_dte_addr_phys, dte_addr_phys; phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
...@@ -425,7 +460,7 @@ static void log_iova(struct rk_iommu *iommu, dma_addr_t iova) ...@@ -425,7 +460,7 @@ static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
pte_index = rk_iova_pte_index(iova); pte_index = rk_iova_pte_index(iova);
page_offset = rk_iova_page_offset(iova); page_offset = rk_iova_page_offset(iova);
mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR); mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
...@@ -460,17 +495,21 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) ...@@ -460,17 +495,21 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
u32 status; u32 status;
u32 int_status; u32 int_status;
dma_addr_t iova; dma_addr_t iova;
irqreturn_t ret = IRQ_NONE;
int i;
int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS); for (i = 0; i < iommu->num_mmu; i++) {
int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
if (int_status == 0) if (int_status == 0)
return IRQ_NONE; continue;
iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR); ret = IRQ_HANDLED;
iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
if (int_status & RK_MMU_IRQ_PAGE_FAULT) { if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
int flags; int flags;
status = rk_iommu_read(iommu, RK_MMU_STATUS); status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
...@@ -478,7 +517,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) ...@@ -478,7 +517,7 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
&iova, &iova,
(flags == IOMMU_FAULT_WRITE) ? "write" : "read"); (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
log_iova(iommu, iova); log_iova(iommu, i, iova);
/* /*
* Report page fault to any installed handlers. * Report page fault to any installed handlers.
...@@ -491,8 +530,8 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) ...@@ -491,8 +530,8 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
else else
dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE); rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
} }
if (int_status & RK_MMU_IRQ_BUS_ERROR) if (int_status & RK_MMU_IRQ_BUS_ERROR)
...@@ -502,9 +541,10 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id) ...@@ -502,9 +541,10 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
dev_err(iommu->dev, "unexpected int_status: %#08x\n", dev_err(iommu->dev, "unexpected int_status: %#08x\n",
int_status); int_status);
rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status); rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
}
return IRQ_HANDLED; return ret;
} }
static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
...@@ -746,7 +786,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, ...@@ -746,7 +786,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
struct rk_iommu *iommu; struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain); struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags; unsigned long flags;
int ret; int ret, i;
phys_addr_t dte_addr; phys_addr_t dte_addr;
/* /*
...@@ -773,9 +813,11 @@ static int rk_iommu_attach_device(struct iommu_domain *domain, ...@@ -773,9 +813,11 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
return ret; return ret;
dte_addr = virt_to_phys(rk_domain->dt); dte_addr = virt_to_phys(rk_domain->dt);
rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr); for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE); rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
ret = rk_iommu_enable_paging(iommu); ret = rk_iommu_enable_paging(iommu);
if (ret) if (ret)
...@@ -798,6 +840,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, ...@@ -798,6 +840,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
struct rk_iommu *iommu; struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain); struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
unsigned long flags; unsigned long flags;
int i;
/* Allow 'virtual devices' (eg drm) to detach from domain */ /* Allow 'virtual devices' (eg drm) to detach from domain */
iommu = rk_iommu_from_dev(dev); iommu = rk_iommu_from_dev(dev);
...@@ -811,8 +854,10 @@ static void rk_iommu_detach_device(struct iommu_domain *domain, ...@@ -811,8 +854,10 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
/* Ignore error while disabling, just keep going */ /* Ignore error while disabling, just keep going */
rk_iommu_enable_stall(iommu); rk_iommu_enable_stall(iommu);
rk_iommu_disable_paging(iommu); rk_iommu_disable_paging(iommu);
rk_iommu_write(iommu, RK_MMU_INT_MASK, 0); for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0); rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
}
rk_iommu_disable_stall(iommu); rk_iommu_disable_stall(iommu);
devm_free_irq(dev, iommu->irq, iommu); devm_free_irq(dev, iommu->irq, iommu);
...@@ -988,6 +1033,7 @@ static int rk_iommu_probe(struct platform_device *pdev) ...@@ -988,6 +1033,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct rk_iommu *iommu; struct rk_iommu *iommu;
struct resource *res; struct resource *res;
int i;
iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
if (!iommu) if (!iommu)
...@@ -995,11 +1041,21 @@ static int rk_iommu_probe(struct platform_device *pdev) ...@@ -995,11 +1041,21 @@ static int rk_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iommu); platform_set_drvdata(pdev, iommu);
iommu->dev = dev; iommu->dev = dev;
iommu->num_mmu = 0;
iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * iommu->num_mmu,
GFP_KERNEL);
if (!iommu->bases)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); for (i = 0; i < pdev->num_resources; i++) {
iommu->base = devm_ioremap_resource(&pdev->dev, res); res = platform_get_resource(pdev, IORESOURCE_MEM, i);
if (IS_ERR(iommu->base)) iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
return PTR_ERR(iommu->base); if (IS_ERR(iommu->bases[i]))
continue;
iommu->num_mmu++;
}
if (iommu->num_mmu == 0)
return PTR_ERR(iommu->bases[0]);
iommu->irq = platform_get_irq(pdev, 0); iommu->irq = platform_get_irq(pdev, 0);
if (iommu->irq < 0) { if (iommu->irq < 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment