Commit 555e74ea authored by Rajendra Nayak's avatar Rajendra Nayak Committed by Paul Walmsley

OMAP2+: clockdomain: Add per clkdm lock to prevent concurrent state programming

Since the clkdm state programming is now done from within the hwmod
framework (which uses a per-hwmod lock) instead of the being done
from the clock framework (which used a global lock), there is now a
need to have per-clkdm locking to prevent races between different
hwmods/modules belonging to the same clock domain concurrently
programming the clkdm state.
Signed-off-by: default avatarRajendra Nayak <rnayak@ti.com>
Signed-off-by: default avatarBenoit Cousson <b-cousson@ti.com>
Cc: Paul Walmsley <paul@pwsan.com>
Signed-off-by: default avatarPaul Walmsley <paul@pwsan.com>
parent b86cfb52
...@@ -92,6 +92,8 @@ static int _clkdm_register(struct clockdomain *clkdm) ...@@ -92,6 +92,8 @@ static int _clkdm_register(struct clockdomain *clkdm)
pwrdm_add_clkdm(pwrdm, clkdm); pwrdm_add_clkdm(pwrdm, clkdm);
spin_lock_init(&clkdm->lock);
pr_debug("clockdomain: registered %s\n", clkdm->name); pr_debug("clockdomain: registered %s\n", clkdm->name);
return 0; return 0;
...@@ -690,6 +692,9 @@ int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm) ...@@ -690,6 +692,9 @@ int clkdm_clear_all_sleepdeps(struct clockdomain *clkdm)
*/ */
int clkdm_sleep(struct clockdomain *clkdm) int clkdm_sleep(struct clockdomain *clkdm)
{ {
int ret;
unsigned long flags;
if (!clkdm) if (!clkdm)
return -EINVAL; return -EINVAL;
...@@ -704,9 +709,11 @@ int clkdm_sleep(struct clockdomain *clkdm) ...@@ -704,9 +709,11 @@ int clkdm_sleep(struct clockdomain *clkdm)
pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name); pr_debug("clockdomain: forcing sleep on %s\n", clkdm->name);
spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
ret = arch_clkdm->clkdm_sleep(clkdm);
return arch_clkdm->clkdm_sleep(clkdm); spin_unlock_irqrestore(&clkdm->lock, flags);
return ret;
} }
/** /**
...@@ -720,6 +727,9 @@ int clkdm_sleep(struct clockdomain *clkdm) ...@@ -720,6 +727,9 @@ int clkdm_sleep(struct clockdomain *clkdm)
*/ */
int clkdm_wakeup(struct clockdomain *clkdm) int clkdm_wakeup(struct clockdomain *clkdm)
{ {
int ret;
unsigned long flags;
if (!clkdm) if (!clkdm)
return -EINVAL; return -EINVAL;
...@@ -734,9 +744,11 @@ int clkdm_wakeup(struct clockdomain *clkdm) ...@@ -734,9 +744,11 @@ int clkdm_wakeup(struct clockdomain *clkdm)
pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name); pr_debug("clockdomain: forcing wakeup on %s\n", clkdm->name);
spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
ret = arch_clkdm->clkdm_wakeup(clkdm);
return arch_clkdm->clkdm_wakeup(clkdm); spin_unlock_irqrestore(&clkdm->lock, flags);
return ret;
} }
/** /**
...@@ -751,6 +763,8 @@ int clkdm_wakeup(struct clockdomain *clkdm) ...@@ -751,6 +763,8 @@ int clkdm_wakeup(struct clockdomain *clkdm)
*/ */
void clkdm_allow_idle(struct clockdomain *clkdm) void clkdm_allow_idle(struct clockdomain *clkdm)
{ {
unsigned long flags;
if (!clkdm) if (!clkdm)
return; return;
...@@ -766,10 +780,11 @@ void clkdm_allow_idle(struct clockdomain *clkdm) ...@@ -766,10 +780,11 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
pr_debug("clockdomain: enabling automatic idle transitions for %s\n", pr_debug("clockdomain: enabling automatic idle transitions for %s\n",
clkdm->name); clkdm->name);
spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags |= _CLKDM_FLAG_HWSUP_ENABLED; clkdm->_flags |= _CLKDM_FLAG_HWSUP_ENABLED;
arch_clkdm->clkdm_allow_idle(clkdm); arch_clkdm->clkdm_allow_idle(clkdm);
pwrdm_clkdm_state_switch(clkdm); pwrdm_clkdm_state_switch(clkdm);
spin_unlock_irqrestore(&clkdm->lock, flags);
} }
/** /**
...@@ -783,6 +798,8 @@ void clkdm_allow_idle(struct clockdomain *clkdm) ...@@ -783,6 +798,8 @@ void clkdm_allow_idle(struct clockdomain *clkdm)
*/ */
void clkdm_deny_idle(struct clockdomain *clkdm) void clkdm_deny_idle(struct clockdomain *clkdm)
{ {
unsigned long flags;
if (!clkdm) if (!clkdm)
return; return;
...@@ -798,9 +815,10 @@ void clkdm_deny_idle(struct clockdomain *clkdm) ...@@ -798,9 +815,10 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
pr_debug("clockdomain: disabling automatic idle transitions for %s\n", pr_debug("clockdomain: disabling automatic idle transitions for %s\n",
clkdm->name); clkdm->name);
spin_lock_irqsave(&clkdm->lock, flags);
clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED; clkdm->_flags &= ~_CLKDM_FLAG_HWSUP_ENABLED;
arch_clkdm->clkdm_deny_idle(clkdm); arch_clkdm->clkdm_deny_idle(clkdm);
spin_unlock_irqrestore(&clkdm->lock, flags);
} }
/** /**
...@@ -816,16 +834,25 @@ void clkdm_deny_idle(struct clockdomain *clkdm) ...@@ -816,16 +834,25 @@ void clkdm_deny_idle(struct clockdomain *clkdm)
*/ */
bool clkdm_in_hwsup(struct clockdomain *clkdm) bool clkdm_in_hwsup(struct clockdomain *clkdm)
{ {
bool ret;
unsigned long flags;
if (!clkdm) if (!clkdm)
return false; return false;
return (clkdm->_flags & _CLKDM_FLAG_HWSUP_ENABLED) ? true : false; spin_lock_irqsave(&clkdm->lock, flags);
ret = (clkdm->_flags & _CLKDM_FLAG_HWSUP_ENABLED) ? true : false;
spin_unlock_irqrestore(&clkdm->lock, flags);
return ret;
} }
/* Clockdomain-to-clock/hwmod framework interface code */ /* Clockdomain-to-clock/hwmod framework interface code */
static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm) static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
{ {
unsigned long flags;
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable) if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_enable)
return -EINVAL; return -EINVAL;
...@@ -837,9 +864,11 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm) ...@@ -837,9 +864,11 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps) if ((atomic_inc_return(&clkdm->usecount) > 1) && autodeps)
return 0; return 0;
spin_lock_irqsave(&clkdm->lock, flags);
arch_clkdm->clkdm_clk_enable(clkdm); arch_clkdm->clkdm_clk_enable(clkdm);
pwrdm_wait_transition(clkdm->pwrdm.ptr); pwrdm_wait_transition(clkdm->pwrdm.ptr);
pwrdm_clkdm_state_switch(clkdm); pwrdm_clkdm_state_switch(clkdm);
spin_unlock_irqrestore(&clkdm->lock, flags);
pr_debug("clockdomain: clkdm %s: enabled\n", clkdm->name); pr_debug("clockdomain: clkdm %s: enabled\n", clkdm->name);
...@@ -848,6 +877,8 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm) ...@@ -848,6 +877,8 @@ static int _clkdm_clk_hwmod_enable(struct clockdomain *clkdm)
static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm) static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
{ {
unsigned long flags;
if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable) if (!clkdm || !arch_clkdm || !arch_clkdm->clkdm_clk_disable)
return -EINVAL; return -EINVAL;
...@@ -859,8 +890,10 @@ static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm) ...@@ -859,8 +890,10 @@ static int _clkdm_clk_hwmod_disable(struct clockdomain *clkdm)
if (atomic_dec_return(&clkdm->usecount) > 0) if (atomic_dec_return(&clkdm->usecount) > 0)
return 0; return 0;
spin_lock_irqsave(&clkdm->lock, flags);
arch_clkdm->clkdm_clk_disable(clkdm); arch_clkdm->clkdm_clk_disable(clkdm);
pwrdm_clkdm_state_switch(clkdm); pwrdm_clkdm_state_switch(clkdm);
spin_unlock_irqrestore(&clkdm->lock, flags);
pr_debug("clockdomain: clkdm %s: disabled\n", clkdm->name); pr_debug("clockdomain: clkdm %s: disabled\n", clkdm->name);
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#define __ARCH_ARM_MACH_OMAP2_CLOCKDOMAIN_H #define __ARCH_ARM_MACH_OMAP2_CLOCKDOMAIN_H
#include <linux/init.h> #include <linux/init.h>
#include <linux/spinlock.h>
#include "powerdomain.h" #include "powerdomain.h"
#include <plat/clock.h> #include <plat/clock.h>
...@@ -128,6 +129,7 @@ struct clockdomain { ...@@ -128,6 +129,7 @@ struct clockdomain {
const struct omap_chip_id omap_chip; const struct omap_chip_id omap_chip;
atomic_t usecount; atomic_t usecount;
struct list_head node; struct list_head node;
spinlock_t lock;
}; };
/** /**
......
...@@ -183,7 +183,8 @@ static int omap2_clkdm_clk_enable(struct clockdomain *clkdm) ...@@ -183,7 +183,8 @@ static int omap2_clkdm_clk_enable(struct clockdomain *clkdm)
_clkdm_add_autodeps(clkdm); _clkdm_add_autodeps(clkdm);
_enable_hwsup(clkdm); _enable_hwsup(clkdm);
} else { } else {
clkdm_wakeup(clkdm); if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
omap2_clkdm_wakeup(clkdm);
} }
return 0; return 0;
...@@ -205,7 +206,8 @@ static int omap2_clkdm_clk_disable(struct clockdomain *clkdm) ...@@ -205,7 +206,8 @@ static int omap2_clkdm_clk_disable(struct clockdomain *clkdm)
_clkdm_del_autodeps(clkdm); _clkdm_del_autodeps(clkdm);
_enable_hwsup(clkdm); _enable_hwsup(clkdm);
} else { } else {
clkdm_sleep(clkdm); if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
omap2_clkdm_sleep(clkdm);
} }
return 0; return 0;
......
...@@ -95,13 +95,8 @@ static void omap4_clkdm_deny_idle(struct clockdomain *clkdm) ...@@ -95,13 +95,8 @@ static void omap4_clkdm_deny_idle(struct clockdomain *clkdm)
static int omap4_clkdm_clk_enable(struct clockdomain *clkdm) static int omap4_clkdm_clk_enable(struct clockdomain *clkdm)
{ {
bool hwsup = false; if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
return omap4_clkdm_wakeup(clkdm);
hwsup = omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
if (!hwsup)
clkdm_wakeup(clkdm);
return 0; return 0;
} }
...@@ -113,8 +108,8 @@ static int omap4_clkdm_clk_disable(struct clockdomain *clkdm) ...@@ -113,8 +108,8 @@ static int omap4_clkdm_clk_disable(struct clockdomain *clkdm)
hwsup = omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition, hwsup = omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs); clkdm->cm_inst, clkdm->clkdm_offs);
if (!hwsup) if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP))
clkdm_sleep(clkdm); omap4_clkdm_sleep(clkdm);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment