Commit 9d4e7a24 authored by Cho KyongHo's avatar Cho KyongHo Committed by Joerg Roedel

iommu/exynos: Change rwlock to spinlock

Since acquiring read_lock is not more frequent than write_lock, it is
not beneficial to use rwlock, this commit changes rwlock to spinlock.
Reviewed-by: default avatarGrant Grundler <grundler@chromium.org>
Signed-off-by: default avatarCho KyongHo <pullip.cho@samsung.com>
Signed-off-by: default avatarShaik Ameer Basha <shaik.ameer@samsung.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 1fab7fa7
......@@ -164,7 +164,7 @@ struct sysmmu_drvdata {
struct clk *clk;
struct clk *clk_master;
int activations;
rwlock_t lock;
spinlock_t lock;
struct iommu_domain *domain;
phys_addr_t pgtable;
};
......@@ -263,12 +263,13 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
unsigned long addr = -1;
int ret = -ENOSYS;
read_lock(&data->lock);
WARN_ON(!is_sysmmu_active(data));
spin_lock(&data->lock);
if (!IS_ERR(data->clk_master))
clk_enable(data->clk_master);
itype = (enum exynos_sysmmu_inttype)
__ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
......@@ -302,7 +303,7 @@ static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
if (!IS_ERR(data->clk_master))
clk_disable(data->clk_master);
read_unlock(&data->lock);
spin_unlock(&data->lock);
return IRQ_HANDLED;
}
......@@ -312,7 +313,7 @@ static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
unsigned long flags;
bool disabled = false;
write_lock_irqsave(&data->lock, flags);
spin_lock_irqsave(&data->lock, flags);
if (!set_sysmmu_inactive(data))
goto finish;
......@@ -330,7 +331,7 @@ static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
data->pgtable = 0;
data->domain = NULL;
finish:
write_unlock_irqrestore(&data->lock, flags);
spin_unlock_irqrestore(&data->lock, flags);
if (disabled)
dev_dbg(data->sysmmu, "Disabled\n");
......@@ -353,7 +354,7 @@ static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
int ret = 0;
unsigned long flags;
write_lock_irqsave(&data->lock, flags);
spin_lock_irqsave(&data->lock, flags);
if (!set_sysmmu_active(data)) {
if (WARN_ON(pgtable != data->pgtable)) {
......@@ -384,7 +385,7 @@ static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
dev_dbg(data->sysmmu, "Enabled\n");
finish:
write_unlock_irqrestore(&data->lock, flags);
spin_unlock_irqrestore(&data->lock, flags);
return ret;
}
......@@ -431,7 +432,7 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova,
unsigned long flags;
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
read_lock_irqsave(&data->lock, flags);
spin_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
unsigned int maj;
......@@ -465,7 +466,7 @@ static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova,
dev_dbg(data->sysmmu, "Disabled. Skipping invalidating TLB.\n");
}
read_unlock_irqrestore(&data->lock, flags);
spin_unlock_irqrestore(&data->lock, flags);
}
void exynos_sysmmu_tlb_invalidate(struct device *dev)
......@@ -473,7 +474,7 @@ void exynos_sysmmu_tlb_invalidate(struct device *dev)
unsigned long flags;
struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
read_lock_irqsave(&data->lock, flags);
spin_lock_irqsave(&data->lock, flags);
if (is_sysmmu_active(data)) {
if (!IS_ERR(data->clk_master))
......@@ -488,7 +489,7 @@ void exynos_sysmmu_tlb_invalidate(struct device *dev)
dev_dbg(data->sysmmu, "Disabled. Skipping invalidating TLB.\n");
}
read_unlock_irqrestore(&data->lock, flags);
spin_unlock_irqrestore(&data->lock, flags);
}
static int exynos_sysmmu_probe(struct platform_device *pdev)
......@@ -543,7 +544,7 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
}
data->sysmmu = dev;
rwlock_init(&data->lock);
spin_lock_init(&data->lock);
INIT_LIST_HEAD(&data->node);
platform_set_drvdata(pdev, data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment