Commit 5ef7748b authored by Stephen Boyd's avatar Stephen Boyd

Merge branches 'clk-qcom-set-rate-gate', 'clk-core-set-rate-gate',...

Merge branches 'clk-qcom-set-rate-gate', 'clk-core-set-rate-gate', 'clk-core-duty-cycle', 'clk-si-prepare' and 'clk-imx-gpio-gates' into clk-next

* clk-qcom-set-rate-gate:
  clk: qcom: drop CLK_SET_RATE_GATE from sdc clocks

* clk-core-set-rate-gate:
  clk: fix CLK_SET_RATE_GATE with clock rate protection

* clk-core-duty-cycle:
  clk: add duty cycle support

* clk-si-prepare:
  :  - SI544/SI514 clk on/off support
  clk-si514, clk-si544: Implement prepare/unprepare/is_prepared operations

* clk-imx-gpio-gates:
  :  - i.MX6UL GPIO clock gates in CCM CCGR
  clk: imx6ul: remove clks_init_on array
  clk: imx6ul: add GPIO clock gates
  dt-bindings: clock: imx6ul: Do not change the clock definition order
......@@ -74,6 +74,33 @@ static int si514_enable_output(struct clk_si514 *data, bool enable)
SI514_CONTROL_OE, enable ? SI514_CONTROL_OE : 0);
}
static int si514_prepare(struct clk_hw *hw)
{
struct clk_si514 *data = to_clk_si514(hw);
return si514_enable_output(data, true);
}
static void si514_unprepare(struct clk_hw *hw)
{
struct clk_si514 *data = to_clk_si514(hw);
si514_enable_output(data, false);
}
static int si514_is_prepared(struct clk_hw *hw)
{
struct clk_si514 *data = to_clk_si514(hw);
unsigned int val;
int err;
err = regmap_read(data->regmap, SI514_REG_CONTROL, &val);
if (err < 0)
return err;
return !!(val & SI514_CONTROL_OE);
}
/* Retrieve clock multiplier and dividers from hardware */
static int si514_get_muldiv(struct clk_si514 *data,
struct clk_si514_muldiv *settings)
......@@ -235,12 +262,17 @@ static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_si514 *data = to_clk_si514(hw);
struct clk_si514_muldiv settings;
unsigned int old_oe_state;
int err;
err = si514_calc_muldiv(&settings, rate);
if (err)
return err;
err = regmap_read(data->regmap, SI514_REG_CONTROL, &old_oe_state);
if (err)
return err;
si514_enable_output(data, false);
err = si514_set_muldiv(data, &settings);
......@@ -255,12 +287,16 @@ static int si514_set_rate(struct clk_hw *hw, unsigned long rate,
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
if (old_oe_state & SI514_CONTROL_OE)
si514_enable_output(data, true);
return err;
}
static const struct clk_ops si514_clk_ops = {
.prepare = si514_prepare,
.unprepare = si514_unprepare,
.is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
.round_rate = si514_round_rate,
.set_rate = si514_set_rate,
......
......@@ -86,6 +86,33 @@ static int si544_enable_output(struct clk_si544 *data, bool enable)
SI544_OE_STATE_ODC_OE, enable ? SI544_OE_STATE_ODC_OE : 0);
}
static int si544_prepare(struct clk_hw *hw)
{
struct clk_si544 *data = to_clk_si544(hw);
return si544_enable_output(data, true);
}
static void si544_unprepare(struct clk_hw *hw)
{
struct clk_si544 *data = to_clk_si544(hw);
si544_enable_output(data, false);
}
static int si544_is_prepared(struct clk_hw *hw)
{
struct clk_si544 *data = to_clk_si544(hw);
unsigned int val;
int err;
err = regmap_read(data->regmap, SI544_REG_OE_STATE, &val);
if (err < 0)
return err;
return !!(val & SI544_OE_STATE_ODC_OE);
}
/* Retrieve clock multiplier and dividers from hardware */
static int si544_get_muldiv(struct clk_si544 *data,
struct clk_si544_muldiv *settings)
......@@ -273,6 +300,7 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_si544 *data = to_clk_si544(hw);
struct clk_si544_muldiv settings;
unsigned int old_oe_state;
int err;
if (!is_valid_frequency(data, rate))
......@@ -282,6 +310,10 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
if (err)
return err;
err = regmap_read(data->regmap, SI544_REG_OE_STATE, &old_oe_state);
if (err)
return err;
si544_enable_output(data, false);
/* Allow FCAL for this frequency update */
......@@ -303,12 +335,16 @@ static int si544_set_rate(struct clk_hw *hw, unsigned long rate,
/* Applying a new frequency can take up to 10ms */
usleep_range(10000, 12000);
if (old_oe_state & SI544_OE_STATE_ODC_OE)
si544_enable_output(data, true);
return err;
}
static const struct clk_ops si544_clk_ops = {
.prepare = si544_prepare,
.unprepare = si544_unprepare,
.is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
.round_rate = si544_round_rate,
.set_rate = si544_set_rate,
......
......@@ -68,6 +68,7 @@ struct clk_core {
unsigned long max_rate;
unsigned long accuracy;
int phase;
struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
struct hlist_head clks;
......@@ -691,6 +692,9 @@ static void clk_core_unprepare(struct clk_core *core)
"Unpreparing critical %s\n", core->name))
return;
if (core->flags & CLK_SET_RATE_GATE)
clk_core_rate_unprotect(core);
if (--core->prepare_count > 0)
return;
......@@ -765,6 +769,16 @@ static int clk_core_prepare(struct clk_core *core)
core->prepare_count++;
/*
* CLK_SET_RATE_GATE is a special case of clock protection
* Instead of a consumer claiming exclusive rate control, it is
* actually the provider which prevents any consumer from making any
* operation which could result in a rate change or rate glitch while
* the clock is prepared.
*/
if (core->flags & CLK_SET_RATE_GATE)
clk_core_rate_protect(core);
return 0;
unprepare:
clk_core_unprepare(core->parent);
......@@ -1888,9 +1902,6 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
if (clk_core_rate_is_protected(core))
return -EBUSY;
if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
return -EBUSY;
/* calculate new rates and get the topmost changed clock */
top = clk_calc_new_rates(core, req_rate);
if (!top)
......@@ -2402,6 +2413,172 @@ int clk_get_phase(struct clk *clk)
}
EXPORT_SYMBOL_GPL(clk_get_phase);
static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
{
/* Assume a default value of 50% */
core->duty.num = 1;
core->duty.den = 2;
}
static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
{
struct clk_duty *duty = &core->duty;
int ret = 0;
if (!core->ops->get_duty_cycle)
return clk_core_update_duty_cycle_parent_nolock(core);
ret = core->ops->get_duty_cycle(core->hw, duty);
if (ret)
goto reset;
/* Don't trust the clock provider too much */
if (duty->den == 0 || duty->num > duty->den) {
ret = -EINVAL;
goto reset;
}
return 0;
reset:
clk_core_reset_duty_cycle_nolock(core);
return ret;
}
static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
{
int ret = 0;
if (core->parent &&
core->flags & CLK_DUTY_CYCLE_PARENT) {
ret = clk_core_update_duty_cycle_nolock(core->parent);
memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
} else {
clk_core_reset_duty_cycle_nolock(core);
}
return ret;
}
static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
struct clk_duty *duty);
static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
struct clk_duty *duty)
{
int ret;
lockdep_assert_held(&prepare_lock);
if (clk_core_rate_is_protected(core))
return -EBUSY;
trace_clk_set_duty_cycle(core, duty);
if (!core->ops->set_duty_cycle)
return clk_core_set_duty_cycle_parent_nolock(core, duty);
ret = core->ops->set_duty_cycle(core->hw, duty);
if (!ret)
memcpy(&core->duty, duty, sizeof(*duty));
trace_clk_set_duty_cycle_complete(core, duty);
return ret;
}
static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
struct clk_duty *duty)
{
int ret = 0;
if (core->parent &&
core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
}
return ret;
}
/**
* clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
* @clk: clock signal source
* @num: numerator of the duty cycle ratio to be applied
* @den: denominator of the duty cycle ratio to be applied
*
* Apply the duty cycle ratio if the ratio is valid and the clock can
* perform this operation
*
* Returns (0) on success, a negative errno otherwise.
*/
int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
{
int ret;
struct clk_duty duty;
if (!clk)
return 0;
/* sanity check the ratio */
if (den == 0 || num > den)
return -EINVAL;
duty.num = num;
duty.den = den;
clk_prepare_lock();
if (clk->exclusive_count)
clk_core_rate_unprotect(clk->core);
ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
if (clk->exclusive_count)
clk_core_rate_protect(clk->core);
clk_prepare_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
unsigned int scale)
{
struct clk_duty *duty = &core->duty;
int ret;
clk_prepare_lock();
ret = clk_core_update_duty_cycle_nolock(core);
if (!ret)
ret = mult_frac(scale, duty->num, duty->den);
clk_prepare_unlock();
return ret;
}
/**
* clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
* @clk: clock signal source
* @scale: scaling factor to be applied to represent the ratio as an integer
*
* Returns the duty cycle ratio of a clock node multiplied by the provided
* scaling factor, or negative errno on error.
*/
int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
{
if (!clk)
return 0;
return clk_core_get_scaled_duty_cycle(clk->core, scale);
}
EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
......@@ -2455,12 +2632,13 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
if (!c)
return;
seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %-3d\n",
seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n",
level * 3 + 1, "",
30 - level * 3, c->name,
c->enable_count, c->prepare_count, c->protect_count,
clk_core_get_rate(c), clk_core_get_accuracy(c),
clk_core_get_phase(c));
clk_core_get_phase(c),
clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
......@@ -2482,9 +2660,9 @@ static int clk_summary_show(struct seq_file *s, void *data)
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
seq_puts(s, " enable prepare protect \n");
seq_puts(s, " clock count count count rate accuracy phase\n");
seq_puts(s, "----------------------------------------------------------------------------------------\n");
seq_puts(s, " enable prepare protect duty\n");
seq_puts(s, " clock count count count rate accuracy phase cycle\n");
seq_puts(s, "---------------------------------------------------------------------------------------------\n");
clk_prepare_lock();
......@@ -2511,6 +2689,8 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}
static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
......@@ -2572,6 +2752,7 @@ static const struct {
ENTRY(CLK_SET_RATE_UNGATE),
ENTRY(CLK_IS_CRITICAL),
ENTRY(CLK_OPS_PARENT_ENABLE),
ENTRY(CLK_DUTY_CYCLE_PARENT),
#undef ENTRY
};
......@@ -2610,6 +2791,17 @@ static int possible_parents_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(possible_parents);
static int clk_duty_cycle_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
struct clk_duty *duty = &core->duty;
seq_printf(s, "%u/%u\n", duty->num, duty->den);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
{
struct dentry *root;
......@@ -2628,6 +2820,8 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
debugfs_create_file("clk_duty_cycle", 0444, root, core,
&clk_duty_cycle_fops);
if (core->num_parents > 1)
debugfs_create_file("clk_possible_parents", 0444, root, core,
......@@ -2845,6 +3039,11 @@ static int __clk_core_init(struct clk_core *core)
else
core->phase = 0;
/*
* Set clk's duty cycle.
*/
clk_core_update_duty_cycle_nolock(core);
/*
* Set clk's rate. The preferred method is to use .recalc_rate. For
* simple clocks and lazy developers the default fallback is to use the
......
......@@ -79,12 +79,6 @@ static const char *cko_sels[] = { "cko1", "cko2", };
static struct clk *clks[IMX6UL_CLK_END];
static struct clk_onecell_data clk_data;
static int const clks_init_on[] __initconst = {
IMX6UL_CLK_AIPSTZ1, IMX6UL_CLK_AIPSTZ2,
IMX6UL_CLK_AXI, IMX6UL_CLK_ARM, IMX6UL_CLK_ROM,
IMX6UL_CLK_MMDC_P0_FAST, IMX6UL_CLK_MMDC_P0_IPG,
};
static const struct clk_div_table clk_enet_ref_table[] = {
{ .val = 0, .div = 20, },
{ .val = 1, .div = 10, },
......@@ -129,7 +123,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
void __iomem *base;
int i;
clks[IMX6UL_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
......@@ -336,8 +329,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_AHB] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
/* CCGR0 */
clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2("aips_tz1", "ahb", base + 0x68, 0);
clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2("aips_tz2", "ahb", base + 0x68, 2);
clks[IMX6UL_CLK_AIPSTZ1] = imx_clk_gate2_flags("aips_tz1", "ahb", base + 0x68, 0, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_AIPSTZ2] = imx_clk_gate2_flags("aips_tz2", "ahb", base + 0x68, 2, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_APBHDMA] = imx_clk_gate2("apbh_dma", "bch_podf", base + 0x68, 4);
clks[IMX6UL_CLK_ASRC_IPG] = imx_clk_gate2_shared("asrc_ipg", "ahb", base + 0x68, 6, &share_count_asrc);
clks[IMX6UL_CLK_ASRC_MEM] = imx_clk_gate2_shared("asrc_mem", "ahb", base + 0x68, 6, &share_count_asrc);
......@@ -360,6 +353,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART2_SERIAL] = imx_clk_gate2("uart2_serial", "uart_podf", base + 0x68, 28);
if (clk_on_imx6ull())
clks[IMX6UL_CLK_AIPSTZ3] = imx_clk_gate2("aips_tz3", "ahb", base + 0x80, 18);
clks[IMX6UL_CLK_GPIO2] = imx_clk_gate2("gpio2", "ipg", base + 0x68, 30);
/* CCGR1 */
clks[IMX6UL_CLK_ECSPI1] = imx_clk_gate2("ecspi1", "ecspi_podf", base + 0x6c, 0);
......@@ -376,6 +370,8 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPT1_SERIAL] = imx_clk_gate2("gpt1_serial", "perclk", base + 0x6c, 22);
clks[IMX6UL_CLK_UART4_IPG] = imx_clk_gate2("uart4_ipg", "ipg", base + 0x6c, 24);
clks[IMX6UL_CLK_UART4_SERIAL] = imx_clk_gate2("uart4_serial", "uart_podf", base + 0x6c, 24);
clks[IMX6UL_CLK_GPIO1] = imx_clk_gate2("gpio1", "ipg", base + 0x6c, 26);
clks[IMX6UL_CLK_GPIO5] = imx_clk_gate2("gpio5", "ipg", base + 0x6c, 30);
/* CCGR2 */
if (clk_on_imx6ull()) {
......@@ -389,6 +385,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_I2C3] = imx_clk_gate2("i2c3", "perclk", base + 0x70, 10);
clks[IMX6UL_CLK_OCOTP] = imx_clk_gate2("ocotp", "ipg", base + 0x70, 12);
clks[IMX6UL_CLK_IOMUXC] = imx_clk_gate2("iomuxc", "lcdif_podf", base + 0x70, 14);
clks[IMX6UL_CLK_GPIO3] = imx_clk_gate2("gpio3", "ipg", base + 0x70, 26);
clks[IMX6UL_CLK_LCDIF_APB] = imx_clk_gate2("lcdif_apb", "axi", base + 0x70, 28);
clks[IMX6UL_CLK_PXP] = imx_clk_gate2("pxp", "axi", base + 0x70, 30);
......@@ -405,11 +402,12 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_UART6_IPG] = imx_clk_gate2("uart6_ipg", "ipg", base + 0x74, 6);
clks[IMX6UL_CLK_UART6_SERIAL] = imx_clk_gate2("uart6_serial", "uart_podf", base + 0x74, 6);
clks[IMX6UL_CLK_LCDIF_PIX] = imx_clk_gate2("lcdif_pix", "lcdif_podf", base + 0x74, 10);
clks[IMX6UL_CLK_GPIO4] = imx_clk_gate2("gpio4", "ipg", base + 0x74, 12);
clks[IMX6UL_CLK_QSPI] = imx_clk_gate2("qspi1", "qspi1_podf", base + 0x74, 14);
clks[IMX6UL_CLK_WDOG1] = imx_clk_gate2("wdog1", "ipg", base + 0x74, 16);
clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20);
clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2("mmdc_p0_ipg", "ipg", base + 0x74, 24);
clks[IMX6UL_CLK_AXI] = imx_clk_gate("axi", "axi_podf", base + 0x74, 28);
clks[IMX6UL_CLK_MMDC_P0_FAST] = imx_clk_gate_flags("mmdc_p0_fast", "mmdc_podf", base + 0x74, 20, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_MMDC_P0_IPG] = imx_clk_gate2_flags("mmdc_p0_ipg", "ipg", base + 0x74, 24, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_AXI] = imx_clk_gate_flags("axi", "axi_podf", base + 0x74, 28, CLK_IS_CRITICAL);
/* CCGR4 */
clks[IMX6UL_CLK_PER_BCH] = imx_clk_gate2("per_bch", "bch_podf", base + 0x78, 12);
......@@ -423,7 +421,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clks[IMX6UL_CLK_GPMI_APB] = imx_clk_gate2("gpmi_apb", "bch_podf", base + 0x78, 30);
/* CCGR5 */
clks[IMX6UL_CLK_ROM] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
clks[IMX6UL_CLK_ROM] = imx_clk_gate2_flags("rom", "ahb", base + 0x7c, 0, CLK_IS_CRITICAL);
clks[IMX6UL_CLK_SDMA] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clks[IMX6UL_CLK_KPP] = imx_clk_gate2("kpp", "ipg", base + 0x7c, 8);
clks[IMX6UL_CLK_WDOG2] = imx_clk_gate2("wdog2", "ipg", base + 0x7c, 10);
......@@ -497,10 +495,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
clk_set_rate(clks[IMX6UL_CLK_ENET2_REF], 50000000);
clk_set_rate(clks[IMX6UL_CLK_CSI], 24000000);
/* keep all the clks on just for bringup */
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
if (clk_on_imx6ull())
clk_prepare_enable(clks[IMX6UL_CLK_AIPSTZ3]);
......
......@@ -235,27 +235,31 @@
#define IMX6UL_CLK_CSI_PODF 222
#define IMX6UL_CLK_PLL3_120M 223
#define IMX6UL_CLK_KPP 224
#define IMX6UL_CLK_CKO1_SEL 225
#define IMX6UL_CLK_CKO1_PODF 226
#define IMX6UL_CLK_CKO1 227
#define IMX6UL_CLK_CKO2_SEL 228
#define IMX6UL_CLK_CKO2_PODF 229
#define IMX6UL_CLK_CKO2 230
#define IMX6UL_CLK_CKO 231
#define IMX6ULL_CLK_ESAI_PRED 225
#define IMX6ULL_CLK_ESAI_PODF 226
#define IMX6ULL_CLK_ESAI_EXTAL 227
#define IMX6ULL_CLK_ESAI_MEM 228
#define IMX6ULL_CLK_ESAI_IPG 229
#define IMX6ULL_CLK_DCP_CLK 230
#define IMX6ULL_CLK_EPDC_PRE_SEL 231
#define IMX6ULL_CLK_EPDC_SEL 232
#define IMX6ULL_CLK_EPDC_PODF 233
#define IMX6ULL_CLK_EPDC_ACLK 234
#define IMX6ULL_CLK_EPDC_PIX 235
#define IMX6ULL_CLK_ESAI_SEL 236
#define IMX6UL_CLK_CKO1_SEL 237
#define IMX6UL_CLK_CKO1_PODF 238
#define IMX6UL_CLK_CKO1 239
#define IMX6UL_CLK_CKO2_SEL 240
#define IMX6UL_CLK_CKO2_PODF 241
#define IMX6UL_CLK_CKO2 242
#define IMX6UL_CLK_CKO 243
#define IMX6UL_CLK_GPIO1 244
#define IMX6UL_CLK_GPIO2 245
#define IMX6UL_CLK_GPIO3 246
#define IMX6UL_CLK_GPIO4 247
#define IMX6UL_CLK_GPIO5 248
/* For i.MX6ULL */
#define IMX6ULL_CLK_ESAI_PRED 232
#define IMX6ULL_CLK_ESAI_PODF 233
#define IMX6ULL_CLK_ESAI_EXTAL 234
#define IMX6ULL_CLK_ESAI_MEM 235
#define IMX6ULL_CLK_ESAI_IPG 236
#define IMX6ULL_CLK_DCP_CLK 237
#define IMX6ULL_CLK_EPDC_PRE_SEL 238
#define IMX6ULL_CLK_EPDC_SEL 239
#define IMX6ULL_CLK_EPDC_PODF 240
#define IMX6ULL_CLK_EPDC_ACLK 241
#define IMX6ULL_CLK_EPDC_PIX 242
#define IMX6ULL_CLK_ESAI_SEL 243
#define IMX6UL_CLK_END 244
#define IMX6UL_CLK_END 249
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
......@@ -38,6 +38,8 @@
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
/* parents need enable during gate/ungate, set rate and re-parent */
#define CLK_OPS_PARENT_ENABLE BIT(12)
/* duty cycle call may be forwarded to the parent clock */
#define CLK_DUTY_CYCLE_PARENT BIT(13)
struct clk;
struct clk_hw;
......@@ -66,6 +68,17 @@ struct clk_rate_request {
struct clk_hw *best_parent_hw;
};
/**
* struct clk_duty - Struture encoding the duty cycle ratio of a clock
*
* @num: Numerator of the duty cycle ratio
* @den: Denominator of the duty cycle ratio
*/
struct clk_duty {
unsigned int num;
unsigned int den;
};
/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
......@@ -169,6 +182,15 @@ struct clk_rate_request {
* by the second argument. Valid values for degrees are
* 0-359. Return 0 on success, otherwise -EERROR.
*
* @get_duty_cycle: Queries the hardware to get the current duty cycle ratio
* of a clock. Returned values denominator cannot be 0 and must be
* superior or equal to the numerator.
*
* @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by
* the numerator (2nd argurment) and denominator (3rd argument).
* Argument must be a valid ratio (denominator > 0
* and >= numerator) Return 0 on success, otherwise -EERROR.
*
* @init: Perform platform-specific initialization magic.
* This is not not used by any of the basic clock types.
* Please consider other ways of solving initialization problems
......@@ -218,6 +240,10 @@ struct clk_ops {
unsigned long parent_accuracy);
int (*get_phase)(struct clk_hw *hw);
int (*set_phase)(struct clk_hw *hw, int degrees);
int (*get_duty_cycle)(struct clk_hw *hw,
struct clk_duty *duty);
int (*set_duty_cycle)(struct clk_hw *hw,
struct clk_duty *duty);
void (*init)(struct clk_hw *hw);
void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
};
......
......@@ -141,6 +141,27 @@ int clk_set_phase(struct clk *clk, int degrees);
*/
int clk_get_phase(struct clk *clk);
/**
* clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
* @clk: clock signal source
* @num: numerator of the duty cycle ratio to be applied
* @den: denominator of the duty cycle ratio to be applied
*
* Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
* success, -EERROR otherwise.
*/
int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
/**
* clk_get_duty_cycle - return the duty cycle ratio of a clock signal
* @clk: clock signal source
* @scale: scaling factor to be applied to represent the ratio as an integer
*
* Returns the duty cycle ratio multiplied by the scale provided, otherwise
* returns -EERROR.
*/
int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
......@@ -183,6 +204,18 @@ static inline long clk_get_phase(struct clk *clk)
return -ENOTSUPP;
}
static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
unsigned int den)
{
return -ENOTSUPP;
}
static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
unsigned int scale)
{
return 0;
}
static inline bool clk_is_match(const struct clk *p, const struct clk *q)
{
return p == q;
......
......@@ -192,6 +192,42 @@ DEFINE_EVENT(clk_phase, clk_set_phase_complete,
TP_ARGS(core, phase)
);
DECLARE_EVENT_CLASS(clk_duty_cycle,
TP_PROTO(struct clk_core *core, struct clk_duty *duty),
TP_ARGS(core, duty),
TP_STRUCT__entry(
__string( name, core->name )
__field( unsigned int, num )
__field( unsigned int, den )
),
TP_fast_assign(
__assign_str(name, core->name);
__entry->num = duty->num;
__entry->den = duty->den;
),
TP_printk("%s %u/%u", __get_str(name), (unsigned int)__entry->num,
(unsigned int)__entry->den)
);
DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle,
TP_PROTO(struct clk_core *core, struct clk_duty *duty),
TP_ARGS(core, duty)
);
DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle_complete,
TP_PROTO(struct clk_core *core, struct clk_duty *duty),
TP_ARGS(core, duty)
);
#endif /* _TRACE_CLK_H */
/* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment