Commit 1a8bfa1e authored by Tony Lindgren's avatar Tony Lindgren Committed by Russell King

[ARM] 3142/1: OMAP 2/5: Update files common to omap1 and omap2

Patch from Tony Lindgren

This patch syncs the mainline kernel with linux-omap tree.
The highlights of the patch are:

- Serial port and framebuffer init improvments by Imre Deak

- Common omap pin mux framework by Tony Lindgren

- Common omap clock framework by Tony Lindren
Signed-off-by: default avatarTony Lindgren <tony@atomide.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 3179a019
......@@ -3,7 +3,7 @@
#
# Common support
obj-y := common.o sram.o sram-fn.o clock.o dma.o mux.o gpio.o mcbsp.o usb.o
obj-y := common.o sram.o sram-fn.o clock.o devices.o dma.o mux.o gpio.o mcbsp.o usb.o
obj-m :=
obj-n :=
obj- :=
......
/*
* linux/arch/arm/plat-omap/clock.c
*
* Copyright (C) 2004 Nokia corporation
* Copyright (C) 2004 - 2005 Nokia corporation
* Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
*
* Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/version.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
......@@ -18,562 +23,20 @@
#include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/hardware/clock.h>
#include <asm/arch/board.h>
#include <asm/arch/usb.h>
#include "clock.h"
#include "sram.h"
#include <asm/arch/clock.h>
static LIST_HEAD(clocks);
LIST_HEAD(clocks);
static DECLARE_MUTEX(clocks_sem);
static DEFINE_SPINLOCK(clockfw_lock);
static void propagate_rate(struct clk * clk);
/* UART clock function */
static int set_uart_rate(struct clk * clk, unsigned long rate);
/* External clock (MCLK & BCLK) functions */
static int set_ext_clk_rate(struct clk * clk, unsigned long rate);
static long round_ext_clk_rate(struct clk * clk, unsigned long rate);
static void init_ext_clk(struct clk * clk);
/* MPU virtual clock functions */
static int select_table_rate(struct clk * clk, unsigned long rate);
static long round_to_table_rate(struct clk * clk, unsigned long rate);
void clk_setdpll(__u16, __u16);
static struct mpu_rate rate_table[] = {
/* MPU MHz, xtal MHz, dpll1 MHz, CKCTL, DPLL_CTL
* armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
*/
#if defined(CONFIG_OMAP_ARM_216MHZ)
{ 216000000, 12000000, 216000000, 0x050d, 0x2910 }, /* 1/1/2/2/2/8 */
#endif
#if defined(CONFIG_OMAP_ARM_195MHZ)
{ 195000000, 13000000, 195000000, 0x050e, 0x2790 }, /* 1/1/2/2/4/8 */
#endif
#if defined(CONFIG_OMAP_ARM_192MHZ)
{ 192000000, 19200000, 192000000, 0x050f, 0x2510 }, /* 1/1/2/2/8/8 */
{ 192000000, 12000000, 192000000, 0x050f, 0x2810 }, /* 1/1/2/2/8/8 */
{ 96000000, 12000000, 192000000, 0x055f, 0x2810 }, /* 2/2/2/2/8/8 */
{ 48000000, 12000000, 192000000, 0x0baf, 0x2810 }, /* 4/8/4/4/8/8 */
{ 24000000, 12000000, 192000000, 0x0fff, 0x2810 }, /* 8/8/8/8/8/8 */
#endif
#if defined(CONFIG_OMAP_ARM_182MHZ)
{ 182000000, 13000000, 182000000, 0x050e, 0x2710 }, /* 1/1/2/2/4/8 */
#endif
#if defined(CONFIG_OMAP_ARM_168MHZ)
{ 168000000, 12000000, 168000000, 0x010f, 0x2710 }, /* 1/1/1/2/8/8 */
#endif
#if defined(CONFIG_OMAP_ARM_150MHZ)
{ 150000000, 12000000, 150000000, 0x010a, 0x2cb0 }, /* 1/1/1/2/4/4 */
#endif
#if defined(CONFIG_OMAP_ARM_120MHZ)
{ 120000000, 12000000, 120000000, 0x010a, 0x2510 }, /* 1/1/1/2/4/4 */
#endif
#if defined(CONFIG_OMAP_ARM_96MHZ)
{ 96000000, 12000000, 96000000, 0x0005, 0x2410 }, /* 1/1/1/1/2/2 */
#endif
#if defined(CONFIG_OMAP_ARM_60MHZ)
{ 60000000, 12000000, 60000000, 0x0005, 0x2290 }, /* 1/1/1/1/2/2 */
#endif
#if defined(CONFIG_OMAP_ARM_30MHZ)
{ 30000000, 12000000, 60000000, 0x0555, 0x2290 }, /* 2/2/2/2/2/2 */
#endif
{ 0, 0, 0, 0, 0 },
};
static void ckctl_recalc(struct clk * clk);
int __clk_enable(struct clk *clk);
void __clk_disable(struct clk *clk);
void __clk_unuse(struct clk *clk);
int __clk_use(struct clk *clk);
static void followparent_recalc(struct clk * clk)
{
clk->rate = clk->parent->rate;
}
static void watchdog_recalc(struct clk * clk)
{
clk->rate = clk->parent->rate / 14;
}
static void uart_recalc(struct clk * clk)
{
unsigned int val = omap_readl(clk->enable_reg);
if (val & clk->enable_bit)
clk->rate = 48000000;
else
clk->rate = 12000000;
}
static struct clk ck_ref = {
.name = "ck_ref",
.rate = 12000000,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
ALWAYS_ENABLED,
};
static struct clk ck_dpll1 = {
.name = "ck_dpll1",
.parent = &ck_ref,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
RATE_PROPAGATES | ALWAYS_ENABLED,
};
static struct clk ck_dpll1out = {
.name = "ck_dpll1out",
.parent = &ck_dpll1,
.flags = CLOCK_IN_OMAP16XX,
.enable_reg = ARM_IDLECT2,
.enable_bit = EN_CKOUT_ARM,
.recalc = &followparent_recalc,
};
static struct clk arm_ck = {
.name = "arm_ck",
.parent = &ck_dpll1,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
.rate_offset = CKCTL_ARMDIV_OFFSET,
.recalc = &ckctl_recalc,
};
static struct clk armper_ck = {
.name = "armper_ck",
.parent = &ck_dpll1,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
RATE_CKCTL,
.enable_reg = ARM_IDLECT2,
.enable_bit = EN_PERCK,
.rate_offset = CKCTL_PERDIV_OFFSET,
.recalc = &ckctl_recalc,
};
static struct clk arm_gpio_ck = {
.name = "arm_gpio_ck",
.parent = &ck_dpll1,
.flags = CLOCK_IN_OMAP1510,
.enable_reg = ARM_IDLECT2,
.enable_bit = EN_GPIOCK,
.recalc = &followparent_recalc,
};
static struct clk armxor_ck = {
.name = "armxor_ck",
.parent = &ck_ref,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
.enable_reg = ARM_IDLECT2,
.enable_bit = EN_XORPCK,
.recalc = &followparent_recalc,
};
static struct clk armtim_ck = {
.name = "armtim_ck",
.parent = &ck_ref,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
.enable_reg = ARM_IDLECT2,
.enable_bit = EN_TIMCK,
.recalc = &followparent_recalc,
};
static struct clk armwdt_ck = {
.name = "armwdt_ck",
.parent = &ck_ref,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
.enable_reg = ARM_IDLECT2,
.enable_bit = EN_WDTCK,
.recalc = &watchdog_recalc,
};
static struct clk arminth_ck16xx = {
.name = "arminth_ck",
.parent = &arm_ck,
.flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
.recalc = &followparent_recalc,
/* Note: On 16xx the frequency can be divided by 2 by programming
* ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
*
* 1510 version is in TC clocks.
*/
};
static struct clk dsp_ck = {
.name = "dsp_ck",
.parent = &ck_dpll1,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
RATE_CKCTL,
.enable_reg = ARM_CKCTL,
.enable_bit = EN_DSPCK,
.rate_offset = CKCTL_DSPDIV_OFFSET,
.recalc = &ckctl_recalc,
};
static struct clk dspmmu_ck = {
.name = "dspmmu_ck",
.parent = &ck_dpll1,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
RATE_CKCTL | ALWAYS_ENABLED,
.rate_offset = CKCTL_DSPMMUDIV_OFFSET,
.recalc = &ckctl_recalc,
};
static struct clk dspper_ck = {
.name = "dspper_ck",
.parent = &ck_dpll1,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
RATE_CKCTL | DSP_DOMAIN_CLOCK | VIRTUAL_IO_ADDRESS,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_PERCK,
.rate_offset = CKCTL_PERDIV_OFFSET,
.recalc = &followparent_recalc,
//.recalc = &ckctl_recalc,
};
static struct clk dspxor_ck = {
.name = "dspxor_ck",
.parent = &ck_ref,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
DSP_DOMAIN_CLOCK | VIRTUAL_IO_ADDRESS,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_XORPCK,
.recalc = &followparent_recalc,
};
static struct clk dsptim_ck = {
.name = "dsptim_ck",
.parent = &ck_ref,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
DSP_DOMAIN_CLOCK | VIRTUAL_IO_ADDRESS,
.enable_reg = DSP_IDLECT2,
.enable_bit = EN_DSPTIMCK,
.recalc = &followparent_recalc,
};
static struct clk tc_ck = {
.name = "tc_ck",
.parent = &ck_dpll1,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730 |
RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
.rate_offset = CKCTL_TCDIV_OFFSET,
.recalc = &ckctl_recalc,
};
static struct clk arminth_ck1510 = {
.name = "arminth_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
.recalc = &followparent_recalc,
/* Note: On 1510 the frequency follows TC_CK
*
* 16xx version is in MPU clocks.
*/
};
static struct clk tipb_ck = {
.name = "tibp_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
.recalc = &followparent_recalc,
};
static struct clk l3_ocpi_ck = {
.name = "l3_ocpi_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP16XX,
.enable_reg = ARM_IDLECT3,
.enable_bit = EN_OCPI_CK,
.recalc = &followparent_recalc,
};
DEFINE_SPINLOCK(clockfw_lock);
static struct clk tc1_ck = {
.name = "tc1_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP16XX,
.enable_reg = ARM_IDLECT3,
.enable_bit = EN_TC1_CK,
.recalc = &followparent_recalc,
};
static struct clk_functions *arch_clock;
static struct clk tc2_ck = {
.name = "tc2_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP16XX,
.enable_reg = ARM_IDLECT3,
.enable_bit = EN_TC2_CK,
.recalc = &followparent_recalc,
};
/*-------------------------------------------------------------------------
* Standard clock functions defined in asm/hardware/clock.h
*-------------------------------------------------------------------------*/
static struct clk dma_ck = {
.name = "dma_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
ALWAYS_ENABLED,
.recalc = &followparent_recalc,
};
static struct clk dma_lcdfree_ck = {
.name = "dma_lcdfree_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
.recalc = &followparent_recalc,
};
static struct clk api_ck = {
.name = "api_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
.enable_reg = ARM_IDLECT2,
.enable_bit = EN_APICK,
.recalc = &followparent_recalc,
};
static struct clk lb_ck = {
.name = "lb_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP1510,
.enable_reg = ARM_IDLECT2,
.enable_bit = EN_LBCK,
.recalc = &followparent_recalc,
};
static struct clk rhea1_ck = {
.name = "rhea1_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
.recalc = &followparent_recalc,
};
static struct clk rhea2_ck = {
.name = "rhea2_ck",
.parent = &tc_ck,
.flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
.recalc = &followparent_recalc,
};
static struct clk lcd_ck = {
.name = "lcd_ck",
.parent = &ck_dpll1,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730 |
RATE_CKCTL,
.enable_reg = ARM_IDLECT2,
.enable_bit = EN_LCDCK,
.rate_offset = CKCTL_LCDDIV_OFFSET,
.recalc = &ckctl_recalc,
};
static struct clk uart1_1510 = {
.name = "uart1_ck",
/* Direct from ULPD, no parent */
.rate = 12000000,
.flags = CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT | ALWAYS_ENABLED,
.enable_reg = MOD_CONF_CTRL_0,
.enable_bit = 29, /* Chooses between 12MHz and 48MHz */
.set_rate = &set_uart_rate,
.recalc = &uart_recalc,
};
static struct clk uart1_16xx = {
.name = "uart1_ck",
/* Direct from ULPD, no parent */
.rate = 48000000,
.flags = CLOCK_IN_OMAP16XX | RATE_FIXED | ENABLE_REG_32BIT,
.enable_reg = MOD_CONF_CTRL_0,
.enable_bit = 29,
};
static struct clk uart2_ck = {
.name = "uart2_ck",
/* Direct from ULPD, no parent */
.rate = 12000000,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | ENABLE_REG_32BIT |
ALWAYS_ENABLED,
.enable_reg = MOD_CONF_CTRL_0,
.enable_bit = 30, /* Chooses between 12MHz and 48MHz */
.set_rate = &set_uart_rate,
.recalc = &uart_recalc,
};
static struct clk uart3_1510 = {
.name = "uart3_ck",
/* Direct from ULPD, no parent */
.rate = 12000000,
.flags = CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT | ALWAYS_ENABLED,
.enable_reg = MOD_CONF_CTRL_0,
.enable_bit = 31, /* Chooses between 12MHz and 48MHz */
.set_rate = &set_uart_rate,
.recalc = &uart_recalc,
};
static struct clk uart3_16xx = {
.name = "uart3_ck",
/* Direct from ULPD, no parent */
.rate = 48000000,
.flags = CLOCK_IN_OMAP16XX | RATE_FIXED | ENABLE_REG_32BIT,
.enable_reg = MOD_CONF_CTRL_0,
.enable_bit = 31,
};
static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
.name = "usb_clko",
/* Direct from ULPD, no parent */
.rate = 6000000,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
RATE_FIXED | ENABLE_REG_32BIT,
.enable_reg = ULPD_CLOCK_CTRL,
.enable_bit = USB_MCLK_EN_BIT,
};
static struct clk usb_hhc_ck1510 = {
.name = "usb_hhc_ck",
/* Direct from ULPD, no parent */
.rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
.flags = CLOCK_IN_OMAP1510 |
RATE_FIXED | ENABLE_REG_32BIT,
.enable_reg = MOD_CONF_CTRL_0,
.enable_bit = USB_HOST_HHC_UHOST_EN,
};
static struct clk usb_hhc_ck16xx = {
.name = "usb_hhc_ck",
/* Direct from ULPD, no parent */
.rate = 48000000,
/* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
.flags = CLOCK_IN_OMAP16XX |
RATE_FIXED | ENABLE_REG_32BIT,
.enable_reg = OTG_BASE + 0x08 /* OTG_SYSCON_2 */,
.enable_bit = 8 /* UHOST_EN */,
};
static struct clk usb_dc_ck = {
.name = "usb_dc_ck",
/* Direct from ULPD, no parent */
.rate = 48000000,
.flags = CLOCK_IN_OMAP16XX | RATE_FIXED,
.enable_reg = SOFT_REQ_REG,
.enable_bit = 4,
};
static struct clk mclk_1510 = {
.name = "mclk",
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.rate = 12000000,
.flags = CLOCK_IN_OMAP1510 | RATE_FIXED,
};
static struct clk mclk_16xx = {
.name = "mclk",
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.flags = CLOCK_IN_OMAP16XX,
.enable_reg = COM_CLK_DIV_CTRL_SEL,
.enable_bit = COM_ULPD_PLL_CLK_REQ,
.set_rate = &set_ext_clk_rate,
.round_rate = &round_ext_clk_rate,
.init = &init_ext_clk,
};
static struct clk bclk_1510 = {
.name = "bclk",
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.rate = 12000000,
.flags = CLOCK_IN_OMAP1510 | RATE_FIXED,
};
static struct clk bclk_16xx = {
.name = "bclk",
/* Direct from ULPD, no parent. May be enabled by ext hardware. */
.flags = CLOCK_IN_OMAP16XX,
.enable_reg = SWD_CLK_DIV_CTRL_SEL,
.enable_bit = SWD_ULPD_PLL_CLK_REQ,
.set_rate = &set_ext_clk_rate,
.round_rate = &round_ext_clk_rate,
.init = &init_ext_clk,
};
static struct clk mmc1_ck = {
.name = "mmc1_ck",
/* Functional clock is direct from ULPD, interface clock is ARMPER */
.parent = &armper_ck,
.rate = 48000000,
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
RATE_FIXED | ENABLE_REG_32BIT,
.enable_reg = MOD_CONF_CTRL_0,
.enable_bit = 23,
};
static struct clk mmc2_ck = {
.name = "mmc2_ck",
/* Functional clock is direct from ULPD, interface clock is ARMPER */
.parent = &armper_ck,
.rate = 48000000,
.flags = CLOCK_IN_OMAP16XX |
RATE_FIXED | ENABLE_REG_32BIT,
.enable_reg = MOD_CONF_CTRL_0,
.enable_bit = 20,
};
static struct clk virtual_ck_mpu = {
.name = "mpu",
.flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
VIRTUAL_CLOCK | ALWAYS_ENABLED,
.parent = &arm_ck, /* Is smarter alias for */
.recalc = &followparent_recalc,
.set_rate = &select_table_rate,
.round_rate = &round_to_table_rate,
};
static struct clk * onchip_clks[] = {
/* non-ULPD clocks */
&ck_ref,
&ck_dpll1,
/* CK_GEN1 clocks */
&ck_dpll1out,
&arm_ck,
&armper_ck,
&arm_gpio_ck,
&armxor_ck,
&armtim_ck,
&armwdt_ck,
&arminth_ck1510, &arminth_ck16xx,
/* CK_GEN2 clocks */
&dsp_ck,
&dspmmu_ck,
&dspper_ck,
&dspxor_ck,
&dsptim_ck,
/* CK_GEN3 clocks */
&tc_ck,
&tipb_ck,
&l3_ocpi_ck,
&tc1_ck,
&tc2_ck,
&dma_ck,
&dma_lcdfree_ck,
&api_ck,
&lb_ck,
&rhea1_ck,
&rhea2_ck,
&lcd_ck,
/* ULPD clocks */
&uart1_1510,
&uart1_16xx,
&uart2_ck,
&uart3_1510,
&uart3_16xx,
&usb_clko,
&usb_hhc_ck1510, &usb_hhc_ck16xx,
&usb_dc_ck,
&mclk_1510, &mclk_16xx,
&bclk_1510, &bclk_16xx,
&mmc1_ck,
&mmc2_ck,
/* Virtual clocks */
&virtual_ck_mpu,
};
struct clk *clk_get(struct device *dev, const char *id)
struct clk * clk_get(struct device *dev, const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
......@@ -590,534 +53,200 @@ struct clk *clk_get(struct device *dev, const char *id)
}
EXPORT_SYMBOL(clk_get);
void clk_put(struct clk *clk)
{
if (clk && !IS_ERR(clk))
module_put(clk->owner);
}
EXPORT_SYMBOL(clk_put);
int __clk_enable(struct clk *clk)
{
__u16 regval16;
__u32 regval32;
if (clk->flags & ALWAYS_ENABLED)
return 0;
if (unlikely(clk->enable_reg == 0)) {
printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
clk->name);
return 0;
}
if (clk->flags & DSP_DOMAIN_CLOCK) {
__clk_use(&api_ck);
}
if (clk->flags & ENABLE_REG_32BIT) {
if (clk->flags & VIRTUAL_IO_ADDRESS) {
regval32 = __raw_readl(clk->enable_reg);
regval32 |= (1 << clk->enable_bit);
__raw_writel(regval32, clk->enable_reg);
} else {
regval32 = omap_readl(clk->enable_reg);
regval32 |= (1 << clk->enable_bit);
omap_writel(regval32, clk->enable_reg);
}
} else {
if (clk->flags & VIRTUAL_IO_ADDRESS) {
regval16 = __raw_readw(clk->enable_reg);
regval16 |= (1 << clk->enable_bit);
__raw_writew(regval16, clk->enable_reg);
} else {
regval16 = omap_readw(clk->enable_reg);
regval16 |= (1 << clk->enable_bit);
omap_writew(regval16, clk->enable_reg);
}
}
if (clk->flags & DSP_DOMAIN_CLOCK) {
__clk_unuse(&api_ck);
}
return 0;
}
void __clk_disable(struct clk *clk)
{
__u16 regval16;
__u32 regval32;
if (clk->enable_reg == 0)
return;
if (clk->flags & DSP_DOMAIN_CLOCK) {
__clk_use(&api_ck);
}
if (clk->flags & ENABLE_REG_32BIT) {
if (clk->flags & VIRTUAL_IO_ADDRESS) {
regval32 = __raw_readl(clk->enable_reg);
regval32 &= ~(1 << clk->enable_bit);
__raw_writel(regval32, clk->enable_reg);
} else {
regval32 = omap_readl(clk->enable_reg);
regval32 &= ~(1 << clk->enable_bit);
omap_writel(regval32, clk->enable_reg);
}
} else {
if (clk->flags & VIRTUAL_IO_ADDRESS) {
regval16 = __raw_readw(clk->enable_reg);
regval16 &= ~(1 << clk->enable_bit);
__raw_writew(regval16, clk->enable_reg);
} else {
regval16 = omap_readw(clk->enable_reg);
regval16 &= ~(1 << clk->enable_bit);
omap_writew(regval16, clk->enable_reg);
}
}
if (clk->flags & DSP_DOMAIN_CLOCK) {
__clk_unuse(&api_ck);
}
}
void __clk_unuse(struct clk *clk)
{
if (clk->usecount > 0 && !(--clk->usecount)) {
__clk_disable(clk);
if (likely(clk->parent))
__clk_unuse(clk->parent);
}
}
int __clk_use(struct clk *clk)
{
int ret = 0;
if (clk->usecount++ == 0) {
if (likely(clk->parent))
ret = __clk_use(clk->parent);
if (unlikely(ret != 0)) {
clk->usecount--;
return ret;
}
ret = __clk_enable(clk);
if (unlikely(ret != 0) && clk->parent) {
__clk_unuse(clk->parent);
clk->usecount--;
}
}
return ret;
}
int clk_enable(struct clk *clk)
{
unsigned long flags;
int ret;
int ret = 0;
spin_lock_irqsave(&clockfw_lock, flags);
ret = __clk_enable(clk);
if (clk->enable)
ret = clk->enable(clk);
else if (arch_clock->clk_enable)
ret = arch_clock->clk_enable(clk);
else
printk(KERN_ERR "Could not enable clock %s\n", clk->name);
spin_unlock_irqrestore(&clockfw_lock, flags);
return ret;
}
EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clockfw_lock, flags);
__clk_disable(clk);
if (clk->disable)
clk->disable(clk);
else if (arch_clock->clk_disable)
arch_clock->clk_disable(clk);
else
printk(KERN_ERR "Could not disable clock %s\n", clk->name);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
int clk_use(struct clk *clk)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&clockfw_lock, flags);
ret = __clk_use(clk);
if (arch_clock->clk_use)
ret = arch_clock->clk_use(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
return ret;
}
EXPORT_SYMBOL(clk_use);
void clk_unuse(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clockfw_lock, flags);
__clk_unuse(clk);
if (arch_clock->clk_unuse)
arch_clock->clk_unuse(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
EXPORT_SYMBOL(clk_unuse);
int clk_get_usecount(struct clk *clk)
{
return clk->usecount;
}
EXPORT_SYMBOL(clk_get_usecount);
unsigned long clk_get_rate(struct clk *clk)
{
return clk->rate;
}
EXPORT_SYMBOL(clk_get_rate);
static __u16 verify_ckctl_value(__u16 newval)
{
/* This function checks for following limitations set
* by the hardware (all conditions must be true):
* DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
* ARM_CK >= TC_CK
* DSP_CK >= TC_CK
* DSPMMU_CK >= TC_CK
*
* In addition following rules are enforced:
* LCD_CK <= TC_CK
* ARMPER_CK <= TC_CK
*
* However, maximum frequencies are not checked for!
*/
__u8 per_exp;
__u8 lcd_exp;
__u8 arm_exp;
__u8 dsp_exp;
__u8 tc_exp;
__u8 dspmmu_exp;
per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
if (dspmmu_exp < dsp_exp)
dspmmu_exp = dsp_exp;
if (dspmmu_exp > dsp_exp+1)
dspmmu_exp = dsp_exp+1;
if (tc_exp < arm_exp)
tc_exp = arm_exp;
if (tc_exp < dspmmu_exp)
tc_exp = dspmmu_exp;
if (tc_exp > lcd_exp)
lcd_exp = tc_exp;
if (tc_exp > per_exp)
per_exp = tc_exp;
unsigned long flags;
int ret = 0;
newval &= 0xf000;
newval |= per_exp << CKCTL_PERDIV_OFFSET;
newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
newval |= tc_exp << CKCTL_TCDIV_OFFSET;
newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
spin_lock_irqsave(&clockfw_lock, flags);
ret = clk->usecount;
spin_unlock_irqrestore(&clockfw_lock, flags);
return newval;
return ret;
}
EXPORT_SYMBOL(clk_get_usecount);
static int calc_dsor_exp(struct clk *clk, unsigned long rate)
unsigned long clk_get_rate(struct clk *clk)
{
/* Note: If target frequency is too low, this function will return 4,
* which is invalid value. Caller must check for this value and act
* accordingly.
*
* Note: This function does not check for following limitations set
* by the hardware (all conditions must be true):
* DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
* ARM_CK >= TC_CK
* DSP_CK >= TC_CK
* DSPMMU_CK >= TC_CK
*/
unsigned long realrate;
struct clk * parent;
unsigned dsor_exp;
if (unlikely(!(clk->flags & RATE_CKCTL)))
return -EINVAL;
parent = clk->parent;
if (unlikely(parent == 0))
return -EIO;
realrate = parent->rate;
for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
if (realrate <= rate)
break;
unsigned long flags;
unsigned long ret = 0;
realrate /= 2;
}
spin_lock_irqsave(&clockfw_lock, flags);
ret = clk->rate;
spin_unlock_irqrestore(&clockfw_lock, flags);
return dsor_exp;
return ret;
}
EXPORT_SYMBOL(clk_get_rate);
static void ckctl_recalc(struct clk * clk)
void clk_put(struct clk *clk)
{
int dsor;
/* Calculate divisor encoded as 2-bit exponent */
if (clk->flags & DSP_DOMAIN_CLOCK) {
/* The clock control bits are in DSP domain,
* so api_ck is needed for access.
* Note that DSP_CKCTL virt addr = phys addr, so
* we must use __raw_readw() instead of omap_readw().
*/
__clk_use(&api_ck);
dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
__clk_unuse(&api_ck);
} else {
dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
}
if (unlikely(clk->rate == clk->parent->rate / dsor))
return; /* No change, quick exit */
clk->rate = clk->parent->rate / dsor;
if (unlikely(clk->flags & RATE_PROPAGATES))
propagate_rate(clk);
if (clk && !IS_ERR(clk))
module_put(clk->owner);
}
EXPORT_SYMBOL(clk_put);
/*-------------------------------------------------------------------------
* Optional clock functions defined in asm/hardware/clock.h
*-------------------------------------------------------------------------*/
long clk_round_rate(struct clk *clk, unsigned long rate)
{
int dsor_exp;
if (clk->flags & RATE_FIXED)
return clk->rate;
if (clk->flags & RATE_CKCTL) {
dsor_exp = calc_dsor_exp(clk, rate);
if (dsor_exp < 0)
return dsor_exp;
if (dsor_exp > 3)
dsor_exp = 3;
return clk->parent->rate / (1 << dsor_exp);
}
unsigned long flags;
long ret = 0;
if(clk->round_rate != 0)
return clk->round_rate(clk, rate);
spin_lock_irqsave(&clockfw_lock, flags);
if (arch_clock->clk_round_rate)
ret = arch_clock->clk_round_rate(clk, rate);
spin_unlock_irqrestore(&clockfw_lock, flags);
return clk->rate;
return ret;
}
EXPORT_SYMBOL(clk_round_rate);
static void propagate_rate(struct clk * clk)
{
struct clk ** clkp;
for (clkp = onchip_clks; clkp < onchip_clks+ARRAY_SIZE(onchip_clks); clkp++) {
if (likely((*clkp)->parent != clk)) continue;
if (likely((*clkp)->recalc))
(*clkp)->recalc(*clkp);
}
}
static int select_table_rate(struct clk * clk, unsigned long rate)
int clk_set_rate(struct clk *clk, unsigned long rate)
{
/* Find the highest supported frequency <= rate and switch to it */
struct mpu_rate * ptr;
if (clk != &virtual_ck_mpu)
return -EINVAL;
for (ptr = rate_table; ptr->rate; ptr++) {
if (ptr->xtal != ck_ref.rate)
continue;
/* DPLL1 cannot be reprogrammed without risking system crash */
if (likely(ck_dpll1.rate!=0) && ptr->pll_rate != ck_dpll1.rate)
continue;
/* Can check only after xtal frequency check */
if (ptr->rate <= rate)
break;
}
if (!ptr->rate)
return -EINVAL;
unsigned long flags;
int ret = 0;
/*
* In most cases we should not need to reprogram DPLL.
* Reprogramming the DPLL is tricky, it must be done from SRAM.
*/
omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
spin_lock_irqsave(&clockfw_lock, flags);
if (arch_clock->clk_set_rate)
ret = arch_clock->clk_set_rate(clk, rate);
spin_unlock_irqrestore(&clockfw_lock, flags);
ck_dpll1.rate = ptr->pll_rate;
propagate_rate(&ck_dpll1);
return 0;
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
static long round_to_table_rate(struct clk * clk, unsigned long rate)
int clk_set_parent(struct clk *clk, struct clk *parent)
{
/* Find the highest supported frequency <= rate */
struct mpu_rate * ptr;
long highest_rate;
if (clk != &virtual_ck_mpu)
return -EINVAL;
highest_rate = -EINVAL;
for (ptr = rate_table; ptr->rate; ptr++) {
if (ptr->xtal != ck_ref.rate)
continue;
highest_rate = ptr->rate;
unsigned long flags;
int ret = 0;
/* Can check only after xtal frequency check */
if (ptr->rate <= rate)
break;
}
spin_lock_irqsave(&clockfw_lock, flags);
if (arch_clock->clk_set_parent)
ret = arch_clock->clk_set_parent(clk, parent);
spin_unlock_irqrestore(&clockfw_lock, flags);
return highest_rate;
return ret;
}
EXPORT_SYMBOL(clk_set_parent);
int clk_set_rate(struct clk *clk, unsigned long rate)
struct clk *clk_get_parent(struct clk *clk)
{
int ret = -EINVAL;
int dsor_exp;
__u16 regval;
unsigned long flags;
if (clk->flags & RATE_CKCTL) {
dsor_exp = calc_dsor_exp(clk, rate);
if (dsor_exp > 3)
dsor_exp = -EINVAL;
if (dsor_exp < 0)
return dsor_exp;
spin_lock_irqsave(&clockfw_lock, flags);
regval = omap_readw(ARM_CKCTL);
regval &= ~(3 << clk->rate_offset);
regval |= dsor_exp << clk->rate_offset;
regval = verify_ckctl_value(regval);
omap_writew(regval, ARM_CKCTL);
clk->rate = clk->parent->rate / (1 << dsor_exp);
spin_unlock_irqrestore(&clockfw_lock, flags);
ret = 0;
} else if(clk->set_rate != 0) {
spin_lock_irqsave(&clockfw_lock, flags);
ret = clk->set_rate(clk, rate);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
unsigned long flags;
struct clk * ret = NULL;
if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
propagate_rate(clk);
spin_lock_irqsave(&clockfw_lock, flags);
if (arch_clock->clk_get_parent)
ret = arch_clock->clk_get_parent(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
EXPORT_SYMBOL(clk_get_parent);
/*-------------------------------------------------------------------------
* OMAP specific clock functions shared between omap1 and omap2
*-------------------------------------------------------------------------*/
static unsigned calc_ext_dsor(unsigned long rate)
{
unsigned dsor;
unsigned int __initdata mpurate;
/* MCLK and BCLK divisor selection is not linear:
* freq = 96MHz / dsor
*
* RATIO_SEL range: dsor <-> RATIO_SEL
* 0..6: (RATIO_SEL+2) <-> (dsor-2)
* 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
* Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
* can not be used.
*/
for (dsor = 2; dsor < 96; ++dsor) {
if ((dsor & 1) && dsor > 8)
continue;
if (rate >= 96000000 / dsor)
break;
}
return dsor;
}
/* Only needed on 1510 */
static int set_uart_rate(struct clk * clk, unsigned long rate)
{
unsigned int val;
val = omap_readl(clk->enable_reg);
if (rate == 12000000)
val &= ~(1 << clk->enable_bit);
else if (rate == 48000000)
val |= (1 << clk->enable_bit);
else
return -EINVAL;
omap_writel(val, clk->enable_reg);
clk->rate = rate;
return 0;
}
static int set_ext_clk_rate(struct clk * clk, unsigned long rate)
/*
* By default we use the rate set by the bootloader.
* You can override this with mpurate= cmdline option.
*/
static int __init omap_clk_setup(char *str)
{
unsigned dsor;
__u16 ratio_bits;
get_option(&str, &mpurate);
dsor = calc_ext_dsor(rate);
clk->rate = 96000000 / dsor;
if (dsor > 8)
ratio_bits = ((dsor - 8) / 2 + 6) << 2;
else
ratio_bits = (dsor - 2) << 2;
if (!mpurate)
return 1;
ratio_bits |= omap_readw(clk->enable_reg) & ~0xfd;
omap_writew(ratio_bits, clk->enable_reg);
if (mpurate < 1000)
mpurate *= 1000000;
return 0;
return 1;
}
__setup("mpurate=", omap_clk_setup);
static long round_ext_clk_rate(struct clk * clk, unsigned long rate)
/* Used for clocks that always have same value as the parent clock */
void followparent_recalc(struct clk *clk)
{
return 96000000 / calc_ext_dsor(rate);
clk->rate = clk->parent->rate;
}
static void init_ext_clk(struct clk * clk)
/* Propagate rate to children */
void propagate_rate(struct clk * tclk)
{
unsigned dsor;
__u16 ratio_bits;
struct clk *clkp;
/* Determine current rate and ensure clock is based on 96MHz APLL */
ratio_bits = omap_readw(clk->enable_reg) & ~1;
omap_writew(ratio_bits, clk->enable_reg);
ratio_bits = (ratio_bits & 0xfc) >> 2;
if (ratio_bits > 6)
dsor = (ratio_bits - 6) * 2 + 8;
else
dsor = ratio_bits + 2;
clk-> rate = 96000000 / dsor;
list_for_each_entry(clkp, &clocks, node) {
if (likely(clkp->parent != tclk))
continue;
if (likely((u32)clkp->recalc))
clkp->recalc(clkp);
}
}
int clk_register(struct clk *clk)
{
down(&clocks_sem);
......@@ -1125,6 +254,7 @@ int clk_register(struct clk *clk)
if (clk->init)
clk->init(clk);
up(&clocks_sem);
return 0;
}
EXPORT_SYMBOL(clk_register);
......@@ -1137,203 +267,38 @@ void clk_unregister(struct clk *clk)
}
EXPORT_SYMBOL(clk_unregister);
#ifdef CONFIG_OMAP_RESET_CLOCKS
/*
* Resets some clocks that may be left on from bootloader,
* but leaves serial clocks on. See also omap_late_clk_reset().
*/
static inline void omap_early_clk_reset(void)
void clk_deny_idle(struct clk *clk)
{
//omap_writel(0x3 << 29, MOD_CONF_CTRL_0);
unsigned long flags;
spin_lock_irqsave(&clockfw_lock, flags);
if (arch_clock->clk_deny_idle)
arch_clock->clk_deny_idle(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
#else
#define omap_early_clk_reset() {}
#endif
EXPORT_SYMBOL(clk_deny_idle);
int __init clk_init(void)
void clk_allow_idle(struct clk *clk)
{
struct clk ** clkp;
const struct omap_clock_config *info;
int crystal_type = 0; /* Default 12 MHz */
omap_early_clk_reset();
for (clkp = onchip_clks; clkp < onchip_clks+ARRAY_SIZE(onchip_clks); clkp++) {
if (((*clkp)->flags &CLOCK_IN_OMAP1510) && cpu_is_omap1510()) {
clk_register(*clkp);
continue;
}
if (((*clkp)->flags &CLOCK_IN_OMAP16XX) && cpu_is_omap16xx()) {
clk_register(*clkp);
continue;
}
if (((*clkp)->flags &CLOCK_IN_OMAP730) && cpu_is_omap730()) {
clk_register(*clkp);
continue;
}
}
info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
if (info != NULL) {
if (!cpu_is_omap1510())
crystal_type = info->system_clock_type;
}
#if defined(CONFIG_ARCH_OMAP730)
ck_ref.rate = 13000000;
#elif defined(CONFIG_ARCH_OMAP16XX)
if (crystal_type == 2)
ck_ref.rate = 19200000;
#endif
printk("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: 0x%04x\n",
omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
omap_readw(ARM_CKCTL));
/* We want to be in syncronous scalable mode */
omap_writew(0x1000, ARM_SYSST);
#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
/* Use values set by bootloader. Determine PLL rate and recalculate
* dependent clocks as if kernel had changed PLL or divisors.
*/
{
unsigned pll_ctl_val = omap_readw(DPLL_CTL);
ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */
if (pll_ctl_val & 0x10) {
/* PLL enabled, apply multiplier and divisor */
if (pll_ctl_val & 0xf80)
ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7;
ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1;
} else {
/* PLL disabled, apply bypass divisor */
switch (pll_ctl_val & 0xc) {
case 0:
break;
case 0x4:
ck_dpll1.rate /= 2;
break;
default:
ck_dpll1.rate /= 4;
break;
}
}
}
propagate_rate(&ck_dpll1);
#else
/* Find the highest supported frequency and enable it */
if (select_table_rate(&virtual_ck_mpu, ~0)) {
printk(KERN_ERR "System frequencies not set. Check your config.\n");
/* Guess sane values (60MHz) */
omap_writew(0x2290, DPLL_CTL);
omap_writew(0x1005, ARM_CKCTL);
ck_dpll1.rate = 60000000;
propagate_rate(&ck_dpll1);
}
#endif
/* Cache rates for clocks connected to ck_ref (not dpll1) */
propagate_rate(&ck_ref);
printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
"%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
#ifdef CONFIG_MACH_OMAP_PERSEUS2
/* Select slicer output as OMAP input clock */
omap_writew(omap_readw(OMAP730_PCC_UPLD_CTRL) & ~0x1, OMAP730_PCC_UPLD_CTRL);
#endif
/* Turn off DSP and ARM_TIMXO. Make sure ARM_INTHCK is not divided */
omap_writew(omap_readw(ARM_CKCTL) & 0x0fff, ARM_CKCTL);
/* Put DSP/MPUI into reset until needed */
omap_writew(0, ARM_RSTCT1);
omap_writew(1, ARM_RSTCT2);
omap_writew(0x400, ARM_IDLECT1);
/*
* According to OMAP5910 Erratum SYS_DMA_1, bit DMACK_REQ (bit 8)
* of the ARM_IDLECT2 register must be set to zero. The power-on
* default value of this bit is one.
*/
omap_writew(0x0000, ARM_IDLECT2); /* Turn LCD clock off also */
/*
* Only enable those clocks we will need, let the drivers
* enable other clocks as necessary
*/
clk_use(&armper_ck);
clk_use(&armxor_ck);
clk_use(&armtim_ck);
if (cpu_is_omap1510())
clk_enable(&arm_gpio_ck);
unsigned long flags;
return 0;
spin_lock_irqsave(&clockfw_lock, flags);
if (arch_clock->clk_allow_idle)
arch_clock->clk_allow_idle(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
EXPORT_SYMBOL(clk_allow_idle);
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_OMAP_RESET_CLOCKS
static int __init omap_late_clk_reset(void)
int __init clk_init(struct clk_functions * custom_clocks)
{
/* Turn off all unused clocks */
struct clk *p;
__u32 regval32;
/* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
regval32 = omap_readw(SOFT_REQ_REG) & (1 << 4);
omap_writew(regval32, SOFT_REQ_REG);
omap_writew(0, SOFT_REQ_REG2);
list_for_each_entry(p, &clocks, node) {
if (p->usecount > 0 || (p->flags & ALWAYS_ENABLED) ||
p->enable_reg == 0)
continue;
/* Assume no DSP clocks have been activated by bootloader */
if (p->flags & DSP_DOMAIN_CLOCK)
continue;
/* Is the clock already disabled? */
if (p->flags & ENABLE_REG_32BIT) {
if (p->flags & VIRTUAL_IO_ADDRESS)
regval32 = __raw_readl(p->enable_reg);
else
regval32 = omap_readl(p->enable_reg);
} else {
if (p->flags & VIRTUAL_IO_ADDRESS)
regval32 = __raw_readw(p->enable_reg);
else
regval32 = omap_readw(p->enable_reg);
}
if ((regval32 & (1 << p->enable_bit)) == 0)
continue;
/* FIXME: This clock seems to be necessary but no-one
* has asked for its activation. */
if (p == &tc2_ck // FIX: pm.c (SRAM), CCP, Camera
|| p == &ck_dpll1out // FIX: SoSSI, SSR
|| p == &arm_gpio_ck // FIX: GPIO code for 1510
) {
printk(KERN_INFO "FIXME: Clock \"%s\" seems unused\n",
p->name);
continue;
}
printk(KERN_INFO "Disabling unused clock \"%s\"... ", p->name);
__clk_disable(p);
printk(" done\n");
if (!custom_clocks) {
printk(KERN_ERR "No custom clock functions registered\n");
BUG();
}
arch_clock = custom_clocks;
return 0;
}
late_initcall(omap_late_clk_reset);
#endif
/*
* linux/arch/arm/plat-omap/clock.h
*
* Copyright (C) 2004 Nokia corporation
* Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
* Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARCH_ARM_OMAP_CLOCK_H
#define __ARCH_ARM_OMAP_CLOCK_H
struct module;
struct clk {
struct list_head node;
struct module *owner;
const char *name;
struct clk *parent;
unsigned long rate;
__s8 usecount;
__u16 flags;
__u32 enable_reg;
__u8 enable_bit;
__u8 rate_offset;
void (*recalc)(struct clk *);
int (*set_rate)(struct clk *, unsigned long);
long (*round_rate)(struct clk *, unsigned long);
void (*init)(struct clk *);
};
struct mpu_rate {
unsigned long rate;
unsigned long xtal;
unsigned long pll_rate;
__u16 ckctl_val;
__u16 dpllctl_val;
};
/* Clock flags */
#define RATE_CKCTL 1
#define RATE_FIXED 2
#define RATE_PROPAGATES 4
#define VIRTUAL_CLOCK 8
#define ALWAYS_ENABLED 16
#define ENABLE_REG_32BIT 32
#define CLOCK_IN_OMAP16XX 64
#define CLOCK_IN_OMAP1510 128
#define CLOCK_IN_OMAP730 256
#define DSP_DOMAIN_CLOCK 512
#define VIRTUAL_IO_ADDRESS 1024
/* ARM_CKCTL bit shifts */
#define CKCTL_PERDIV_OFFSET 0
#define CKCTL_LCDDIV_OFFSET 2
#define CKCTL_ARMDIV_OFFSET 4
#define CKCTL_DSPDIV_OFFSET 6
#define CKCTL_TCDIV_OFFSET 8
#define CKCTL_DSPMMUDIV_OFFSET 10
/*#define ARM_TIMXO 12*/
#define EN_DSPCK 13
/*#define ARM_INTHCK_SEL 14*/ /* Divide-by-2 for mpu inth_ck */
/* DSP_CKCTL bit shifts */
#define CKCTL_DSPPERDIV_OFFSET 0
/* ARM_IDLECT1 bit shifts */
/*#define IDLWDT_ARM 0*/
/*#define IDLXORP_ARM 1*/
/*#define IDLPER_ARM 2*/
/*#define IDLLCD_ARM 3*/
/*#define IDLLB_ARM 4*/
/*#define IDLHSAB_ARM 5*/
/*#define IDLIF_ARM 6*/
/*#define IDLDPLL_ARM 7*/
/*#define IDLAPI_ARM 8*/
/*#define IDLTIM_ARM 9*/
/*#define SETARM_IDLE 11*/
/* ARM_IDLECT2 bit shifts */
#define EN_WDTCK 0
#define EN_XORPCK 1
#define EN_PERCK 2
#define EN_LCDCK 3
#define EN_LBCK 4 /* Not on 1610/1710 */
/*#define EN_HSABCK 5*/
#define EN_APICK 6
#define EN_TIMCK 7
#define DMACK_REQ 8
#define EN_GPIOCK 9 /* Not on 1610/1710 */
/*#define EN_LBFREECK 10*/
#define EN_CKOUT_ARM 11
/* ARM_IDLECT3 bit shifts */
#define EN_OCPI_CK 0
#define EN_TC1_CK 2
#define EN_TC2_CK 4
/* DSP_IDLECT2 bit shifts (0,1,2 are same as for ARM_IDLECT2) */
#define EN_DSPTIMCK 5
/* Various register defines for clock controls scattered around OMAP chip */
#define USB_MCLK_EN_BIT 4 /* In ULPD_CLKC_CTRL */
#define USB_HOST_HHC_UHOST_EN 9 /* In MOD_CONF_CTRL_0 */
#define SWD_ULPD_PLL_CLK_REQ 1 /* In SWD_CLK_DIV_CTRL_SEL */
#define COM_ULPD_PLL_CLK_REQ 1 /* In COM_CLK_DIV_CTRL_SEL */
#define SWD_CLK_DIV_CTRL_SEL 0xfffe0874
#define COM_CLK_DIV_CTRL_SEL 0xfffe0878
#define SOFT_REQ_REG 0xfffe0834
#define SOFT_REQ_REG2 0xfffe0880
int clk_register(struct clk *clk);
void clk_unregister(struct clk *clk);
int clk_init(void);
#endif
......@@ -31,7 +31,7 @@
#include <asm/arch/mux.h>
#include <asm/arch/fpga.h>
#include "clock.h"
#include <asm/arch/clock.h>
#define NO_LENGTH_CHECK 0xffffffff
......@@ -117,19 +117,43 @@ EXPORT_SYMBOL(omap_get_var_config);
static int __init omap_add_serial_console(void)
{
const struct omap_serial_console_config *info;
info = omap_get_config(OMAP_TAG_SERIAL_CONSOLE,
struct omap_serial_console_config);
if (info != NULL && info->console_uart) {
static char speed[11], *opt = NULL;
const struct omap_serial_console_config *con_info;
const struct omap_uart_config *uart_info;
static char speed[11], *opt = NULL;
int line, i, uart_idx;
uart_info = omap_get_config(OMAP_TAG_UART, struct omap_uart_config);
con_info = omap_get_config(OMAP_TAG_SERIAL_CONSOLE,
struct omap_serial_console_config);
if (uart_info == NULL || con_info == NULL)
return 0;
if (con_info->console_uart == 0)
return 0;
if (con_info->console_speed) {
snprintf(speed, sizeof(speed), "%u", con_info->console_speed);
opt = speed;
}
if (info->console_speed) {
snprintf(speed, sizeof(speed), "%u", info->console_speed);
opt = speed;
}
return add_preferred_console("ttyS", info->console_uart - 1, opt);
uart_idx = con_info->console_uart - 1;
if (uart_idx >= OMAP_MAX_NR_PORTS) {
printk(KERN_INFO "Console: external UART#%d. "
"Not adding it as console this time.\n",
uart_idx + 1);
return 0;
}
if (!(uart_info->enabled_uarts & (1 << uart_idx))) {
printk(KERN_ERR "Console: Selected UART#%d is "
"not enabled for this platform\n",
uart_idx + 1);
return -1;
}
line = 0;
for (i = 0; i < uart_idx; i++) {
if (uart_info->enabled_uarts & (1 << i))
line++;
}
return 0;
return add_preferred_console("ttyS", line, opt);
}
console_initcall(omap_add_serial_console);
/*
* linux/arch/arm/plat-omap/devices.c
*
* Common platform device setup/initialization for OMAP1 and OMAP2
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <asm/hardware.h>
#include <asm/io.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/arch/tc.h>
#include <asm/arch/board.h>
#include <asm/arch/mux.h>
#include <asm/arch/gpio.h>
void omap_nop_release(struct device *dev)
{
/* Nothing */
}
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
#define OMAP1_I2C_BASE 0xfffb3800
#define OMAP2_I2C_BASE1 0x48070000
#define OMAP_I2C_SIZE 0x3f
#define OMAP1_I2C_INT INT_I2C
#define OMAP2_I2C_INT1 56
static struct resource i2c_resources1[] = {
{
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM,
},
{
.start = 0,
.flags = IORESOURCE_IRQ,
},
};
/* DMA not used; works around erratum writing to non-empty i2c fifo */
static struct platform_device omap_i2c_device1 = {
.name = "i2c_omap",
.id = 1,
.dev = {
.release = omap_nop_release,
},
.num_resources = ARRAY_SIZE(i2c_resources1),
.resource = i2c_resources1,
};
/* See also arch/arm/mach-omap2/devices.c for second I2C on 24xx */
static void omap_init_i2c(void)
{
if (cpu_is_omap24xx()) {
i2c_resources1[0].start = OMAP2_I2C_BASE1;
i2c_resources1[0].end = OMAP2_I2C_BASE1 + OMAP_I2C_SIZE;
i2c_resources1[1].start = OMAP2_I2C_INT1;
} else {
i2c_resources1[0].start = OMAP1_I2C_BASE;
i2c_resources1[0].end = OMAP1_I2C_BASE + OMAP_I2C_SIZE;
i2c_resources1[1].start = OMAP1_I2C_INT;
}
/* FIXME define and use a boot tag, in case of boards that
* either don't wire up I2C, or chips that mux it differently...
* it can include clocking and address info, maybe more.
*/
if (cpu_is_omap24xx()) {
omap_cfg_reg(M19_24XX_I2C1_SCL);
omap_cfg_reg(L15_24XX_I2C1_SDA);
} else {
omap_cfg_reg(I2C_SCL);
omap_cfg_reg(I2C_SDA);
}
(void) platform_device_register(&omap_i2c_device1);
}
#else
static inline void omap_init_i2c(void) {}
#endif
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
#ifdef CONFIG_ARCH_OMAP24XX
#define OMAP_MMC1_BASE 0x4809c000
#define OMAP_MMC1_INT 83
#else
#define OMAP_MMC1_BASE 0xfffb7800
#define OMAP_MMC1_INT INT_MMC
#endif
#define OMAP_MMC2_BASE 0xfffb7c00 /* omap16xx only */
static struct omap_mmc_conf mmc1_conf;
static u64 mmc1_dmamask = 0xffffffff;
static struct resource mmc1_resources[] = {
{
.start = IO_ADDRESS(OMAP_MMC1_BASE),
.end = IO_ADDRESS(OMAP_MMC1_BASE) + 0x7f,
.flags = IORESOURCE_MEM,
},
{
.start = OMAP_MMC1_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device mmc_omap_device1 = {
.name = "mmci-omap",
.id = 1,
.dev = {
.release = omap_nop_release,
.dma_mask = &mmc1_dmamask,
.platform_data = &mmc1_conf,
},
.num_resources = ARRAY_SIZE(mmc1_resources),
.resource = mmc1_resources,
};
#ifdef CONFIG_ARCH_OMAP16XX
static struct omap_mmc_conf mmc2_conf;
static u64 mmc2_dmamask = 0xffffffff;
static struct resource mmc2_resources[] = {
{
.start = IO_ADDRESS(OMAP_MMC2_BASE),
.end = IO_ADDRESS(OMAP_MMC2_BASE) + 0x7f,
.flags = IORESOURCE_MEM,
},
{
.start = INT_1610_MMC2,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device mmc_omap_device2 = {
.name = "mmci-omap",
.id = 2,
.dev = {
.release = omap_nop_release,
.dma_mask = &mmc2_dmamask,
.platform_data = &mmc2_conf,
},
.num_resources = ARRAY_SIZE(mmc2_resources),
.resource = mmc2_resources,
};
#endif
static void __init omap_init_mmc(void)
{
const struct omap_mmc_config *mmc_conf;
const struct omap_mmc_conf *mmc;
/* NOTE: assumes MMC was never (wrongly) enabled */
mmc_conf = omap_get_config(OMAP_TAG_MMC, struct omap_mmc_config);
if (!mmc_conf)
return;
/* block 1 is always available and has just one pinout option */
mmc = &mmc_conf->mmc[0];
if (mmc->enabled) {
if (!cpu_is_omap24xx()) {
omap_cfg_reg(MMC_CMD);
omap_cfg_reg(MMC_CLK);
omap_cfg_reg(MMC_DAT0);
if (cpu_is_omap1710()) {
omap_cfg_reg(M15_1710_MMC_CLKI);
omap_cfg_reg(P19_1710_MMC_CMDDIR);
omap_cfg_reg(P20_1710_MMC_DATDIR0);
}
}
if (mmc->wire4) {
if (!cpu_is_omap24xx()) {
omap_cfg_reg(MMC_DAT1);
/* NOTE: DAT2 can be on W10 (here) or M15 */
if (!mmc->nomux)
omap_cfg_reg(MMC_DAT2);
omap_cfg_reg(MMC_DAT3);
}
}
mmc1_conf = *mmc;
(void) platform_device_register(&mmc_omap_device1);
}
#ifdef CONFIG_ARCH_OMAP16XX
/* block 2 is on newer chips, and has many pinout options */
mmc = &mmc_conf->mmc[1];
if (mmc->enabled) {
if (!mmc->nomux) {
omap_cfg_reg(Y8_1610_MMC2_CMD);
omap_cfg_reg(Y10_1610_MMC2_CLK);
omap_cfg_reg(R18_1610_MMC2_CLKIN);
omap_cfg_reg(W8_1610_MMC2_DAT0);
if (mmc->wire4) {
omap_cfg_reg(V8_1610_MMC2_DAT1);
omap_cfg_reg(W15_1610_MMC2_DAT2);
omap_cfg_reg(R10_1610_MMC2_DAT3);
}
/* These are needed for the level shifter */
omap_cfg_reg(V9_1610_MMC2_CMDDIR);
omap_cfg_reg(V5_1610_MMC2_DATDIR0);
omap_cfg_reg(W19_1610_MMC2_DATDIR1);
}
/* Feedback clock must be set on OMAP-1710 MMC2 */
if (cpu_is_omap1710())
omap_writel(omap_readl(MOD_CONF_CTRL_1) | (1 << 24),
MOD_CONF_CTRL_1);
mmc2_conf = *mmc;
(void) platform_device_register(&mmc_omap_device2);
}
#endif
return;
}
#else
static inline void omap_init_mmc(void) {}
#endif
#if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
#ifdef CONFIG_ARCH_OMAP24XX
#define OMAP_WDT_BASE 0x48022000
#else
#define OMAP_WDT_BASE 0xfffeb000
#endif
static struct resource wdt_resources[] = {
{
.start = OMAP_WDT_BASE,
.end = OMAP_WDT_BASE + 0x4f,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device omap_wdt_device = {
.name = "omap_wdt",
.id = -1,
.dev = {
.release = omap_nop_release,
},
.num_resources = ARRAY_SIZE(wdt_resources),
.resource = wdt_resources,
};
static void omap_init_wdt(void)
{
(void) platform_device_register(&omap_wdt_device);
}
#else
static inline void omap_init_wdt(void) {}
#endif
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OMAP_RNG) || defined(CONFIG_OMAP_RNG_MODULE)
#ifdef CONFIG_ARCH_OMAP24XX
#define OMAP_RNG_BASE 0x480A0000
#else
#define OMAP_RNG_BASE 0xfffe5000
#endif
static struct resource rng_resources[] = {
{
.start = OMAP_RNG_BASE,
.end = OMAP_RNG_BASE + 0x4f,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device omap_rng_device = {
.name = "omap_rng",
.id = -1,
.dev = {
.release = omap_nop_release,
},
.num_resources = ARRAY_SIZE(rng_resources),
.resource = rng_resources,
};
static void omap_init_rng(void)
{
(void) platform_device_register(&omap_rng_device);
}
#else
static inline void omap_init_rng(void) {}
#endif
#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
static struct omap_lcd_config omap_fb_conf;
static u64 omap_fb_dma_mask = ~(u32)0;
static struct platform_device omap_fb_device = {
.name = "omapfb",
.id = -1,
.dev = {
.release = omap_nop_release,
.dma_mask = &omap_fb_dma_mask,
.coherent_dma_mask = ~(u32)0,
.platform_data = &omap_fb_conf,
},
.num_resources = 0,
};
static inline void omap_init_fb(void)
{
const struct omap_lcd_config *conf;
conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
if (conf != NULL)
omap_fb_conf = *conf;
platform_device_register(&omap_fb_device);
}
#else
static inline void omap_init_fb(void) {}
#endif
/*
* This gets called after board-specific INIT_MACHINE, and initializes most
* on-chip peripherals accessible on this board (except for few like USB):
*
* (a) Does any "standard config" pin muxing needed. Board-specific
* code will have muxed GPIO pins and done "nonstandard" setup;
* that code could live in the boot loader.
* (b) Populating board-specific platform_data with the data drivers
* rely on to handle wiring variations.
* (c) Creating platform devices as meaningful on this board and
* with this kernel configuration.
*
* Claiming GPIOs, and setting their direction and initial values, is the
* responsibility of the device drivers. So is responding to probe().
*
* Board-specific knowlege like creating devices or pin setup is to be
* kept out of drivers as much as possible. In particular, pin setup
* may be handled by the boot loader, and drivers should expect it will
* normally have been done by the time they're probed.
*/
static int __init omap_init_devices(void)
{
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
omap_init_fb();
omap_init_i2c();
omap_init_mmc();
omap_init_wdt();
omap_init_rng();
return 0;
}
arch_initcall(omap_init_devices);
......@@ -6,6 +6,8 @@
* DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
* Graphics DMA and LCD DMA graphics tranformations
* by Imre Deak <imre.deak@nokia.com>
* OMAP2 support Copyright (C) 2004-2005 Texas Instruments, Inc.
* Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
* Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
*
* Support functions for the OMAP internal DMA channels.
......@@ -31,8 +33,15 @@
#include <asm/arch/tc.h>
#define OMAP_DMA_ACTIVE 0x01
#define DEBUG_PRINTS
#undef DEBUG_PRINTS
#ifdef DEBUG_PRINTS
#define debug_printk(x) printk x
#else
#define debug_printk(x)
#endif
#define OMAP_DMA_ACTIVE 0x01
#define OMAP_DMA_CCR_EN (1 << 7)
#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
......@@ -55,7 +64,7 @@ static int dma_chan_count;
static spinlock_t dma_chan_lock;
static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT];
const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
const static u8 omap1_dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
......@@ -63,6 +72,20 @@ const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
};
#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
__FUNCTION__);
#ifdef CONFIG_ARCH_OMAP15XX
/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
int omap_dma_in_1510_mode(void)
{
return enable_1510_mode;
}
#else
#define omap_dma_in_1510_mode() 0
#endif
#ifdef CONFIG_ARCH_OMAP1
static inline int get_gdma_dev(int req)
{
u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
......@@ -82,6 +105,9 @@ static inline void set_gdma_dev(int req, int dev)
l |= (dev - 1) << shift;
omap_writel(l, reg);
}
#else
#define set_gdma_dev(req, dev) do {} while (0)
#endif
static void clear_lch_regs(int lch)
{
......@@ -121,38 +147,62 @@ void omap_set_dma_priority(int dst_port, int priority)
}
void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
int frame_count, int sync_mode)
int frame_count, int sync_mode,
int dma_trigger, int src_or_dst_synch)
{
u16 w;
OMAP_DMA_CSDP_REG(lch) &= ~0x03;
OMAP_DMA_CSDP_REG(lch) |= data_type;
w = omap_readw(OMAP_DMA_CSDP(lch));
w &= ~0x03;
w |= data_type;
omap_writew(w, OMAP_DMA_CSDP(lch));
if (cpu_class_is_omap1()) {
OMAP_DMA_CCR_REG(lch) &= ~(1 << 5);
if (sync_mode == OMAP_DMA_SYNC_FRAME)
OMAP_DMA_CCR_REG(lch) |= 1 << 5;
OMAP1_DMA_CCR2_REG(lch) &= ~(1 << 2);
if (sync_mode == OMAP_DMA_SYNC_BLOCK)
OMAP1_DMA_CCR2_REG(lch) |= 1 << 2;
}
if (cpu_is_omap24xx() && dma_trigger) {
u32 val = OMAP_DMA_CCR_REG(lch);
if (dma_trigger > 63)
val |= 1 << 20;
if (dma_trigger > 31)
val |= 1 << 19;
w = omap_readw(OMAP_DMA_CCR(lch));
w &= ~(1 << 5);
if (sync_mode == OMAP_DMA_SYNC_FRAME)
w |= 1 << 5;
omap_writew(w, OMAP_DMA_CCR(lch));
val |= (dma_trigger & 0x1f);
w = omap_readw(OMAP_DMA_CCR2(lch));
w &= ~(1 << 2);
if (sync_mode == OMAP_DMA_SYNC_BLOCK)
w |= 1 << 2;
omap_writew(w, OMAP_DMA_CCR2(lch));
if (sync_mode & OMAP_DMA_SYNC_FRAME)
val |= 1 << 5;
omap_writew(elem_count, OMAP_DMA_CEN(lch));
omap_writew(frame_count, OMAP_DMA_CFN(lch));
if (sync_mode & OMAP_DMA_SYNC_BLOCK)
val |= 1 << 18;
if (src_or_dst_synch)
val |= 1 << 24; /* source synch */
else
val &= ~(1 << 24); /* dest synch */
OMAP_DMA_CCR_REG(lch) = val;
}
OMAP_DMA_CEN_REG(lch) = elem_count;
OMAP_DMA_CFN_REG(lch) = frame_count;
}
void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
{
u16 w;
BUG_ON(omap_dma_in_1510_mode());
w = omap_readw(OMAP_DMA_CCR2(lch)) & ~0x03;
if (cpu_is_omap24xx()) {
REVISIT_24XX();
return;
}
w = OMAP1_DMA_CCR2_REG(lch) & ~0x03;
switch (mode) {
case OMAP_DMA_CONSTANT_FILL:
w |= 0x01;
......@@ -165,63 +215,84 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
default:
BUG();
}
omap_writew(w, OMAP_DMA_CCR2(lch));
OMAP1_DMA_CCR2_REG(lch) = w;
w = omap_readw(OMAP_DMA_LCH_CTRL(lch)) & ~0x0f;
w = OMAP1_DMA_LCH_CTRL_REG(lch) & ~0x0f;
/* Default is channel type 2D */
if (mode) {
omap_writew((u16)color, OMAP_DMA_COLOR_L(lch));
omap_writew((u16)(color >> 16), OMAP_DMA_COLOR_U(lch));
OMAP1_DMA_COLOR_L_REG(lch) = (u16)color;
OMAP1_DMA_COLOR_U_REG(lch) = (u16)(color >> 16);
w |= 1; /* Channel type G */
}
omap_writew(w, OMAP_DMA_LCH_CTRL(lch));
OMAP1_DMA_LCH_CTRL_REG(lch) = w;
}
/* Note that src_port is only for omap1 */
void omap_set_dma_src_params(int lch, int src_port, int src_amode,
unsigned long src_start)
unsigned long src_start,
int src_ei, int src_fi)
{
u16 w;
if (cpu_class_is_omap1()) {
OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 2);
OMAP_DMA_CSDP_REG(lch) |= src_port << 2;
}
OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 12);
OMAP_DMA_CCR_REG(lch) |= src_amode << 12;
if (cpu_class_is_omap1()) {
OMAP1_DMA_CSSA_U_REG(lch) = src_start >> 16;
OMAP1_DMA_CSSA_L_REG(lch) = src_start;
}
w = omap_readw(OMAP_DMA_CSDP(lch));
w &= ~(0x1f << 2);
w |= src_port << 2;
omap_writew(w, OMAP_DMA_CSDP(lch));
if (cpu_is_omap24xx())
OMAP2_DMA_CSSA_REG(lch) = src_start;
w = omap_readw(OMAP_DMA_CCR(lch));
w &= ~(0x03 << 12);
w |= src_amode << 12;
omap_writew(w, OMAP_DMA_CCR(lch));
OMAP_DMA_CSEI_REG(lch) = src_ei;
OMAP_DMA_CSFI_REG(lch) = src_fi;
}
omap_writew(src_start >> 16, OMAP_DMA_CSSA_U(lch));
omap_writew(src_start, OMAP_DMA_CSSA_L(lch));
void omap_set_dma_params(int lch, struct omap_dma_channel_params * params)
{
omap_set_dma_transfer_params(lch, params->data_type,
params->elem_count, params->frame_count,
params->sync_mode, params->trigger,
params->src_or_dst_synch);
omap_set_dma_src_params(lch, params->src_port,
params->src_amode, params->src_start,
params->src_ei, params->src_fi);
omap_set_dma_dest_params(lch, params->dst_port,
params->dst_amode, params->dst_start,
params->dst_ei, params->dst_fi);
}
void omap_set_dma_src_index(int lch, int eidx, int fidx)
{
omap_writew(eidx, OMAP_DMA_CSEI(lch));
omap_writew(fidx, OMAP_DMA_CSFI(lch));
if (cpu_is_omap24xx()) {
REVISIT_24XX();
return;
}
OMAP_DMA_CSEI_REG(lch) = eidx;
OMAP_DMA_CSFI_REG(lch) = fidx;
}
void omap_set_dma_src_data_pack(int lch, int enable)
{
u16 w;
w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 6);
w |= enable ? (1 << 6) : 0;
omap_writew(w, OMAP_DMA_CSDP(lch));
OMAP_DMA_CSDP_REG(lch) &= ~(1 << 6);
if (enable)
OMAP_DMA_CSDP_REG(lch) |= (1 << 6);
}
void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
{
u16 w;
OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7);
w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 7);
switch (burst_mode) {
case OMAP_DMA_DATA_BURST_DIS:
break;
case OMAP_DMA_DATA_BURST_4:
w |= (0x01 << 7);
OMAP_DMA_CSDP_REG(lch) |= (0x02 << 7);
break;
case OMAP_DMA_DATA_BURST_8:
/* not supported by current hardware
......@@ -231,110 +302,283 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
default:
BUG();
}
omap_writew(w, OMAP_DMA_CSDP(lch));
}
/* Note that dest_port is only for OMAP1 */
void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
unsigned long dest_start)
unsigned long dest_start,
int dst_ei, int dst_fi)
{
u16 w;
if (cpu_class_is_omap1()) {
OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 9);
OMAP_DMA_CSDP_REG(lch) |= dest_port << 9;
}
w = omap_readw(OMAP_DMA_CSDP(lch));
w &= ~(0x1f << 9);
w |= dest_port << 9;
omap_writew(w, OMAP_DMA_CSDP(lch));
OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 14);
OMAP_DMA_CCR_REG(lch) |= dest_amode << 14;
if (cpu_class_is_omap1()) {
OMAP1_DMA_CDSA_U_REG(lch) = dest_start >> 16;
OMAP1_DMA_CDSA_L_REG(lch) = dest_start;
}
w = omap_readw(OMAP_DMA_CCR(lch));
w &= ~(0x03 << 14);
w |= dest_amode << 14;
omap_writew(w, OMAP_DMA_CCR(lch));
if (cpu_is_omap24xx())
OMAP2_DMA_CDSA_REG(lch) = dest_start;
omap_writew(dest_start >> 16, OMAP_DMA_CDSA_U(lch));
omap_writew(dest_start, OMAP_DMA_CDSA_L(lch));
OMAP_DMA_CDEI_REG(lch) = dst_ei;
OMAP_DMA_CDFI_REG(lch) = dst_fi;
}
void omap_set_dma_dest_index(int lch, int eidx, int fidx)
{
omap_writew(eidx, OMAP_DMA_CDEI(lch));
omap_writew(fidx, OMAP_DMA_CDFI(lch));
if (cpu_is_omap24xx()) {
REVISIT_24XX();
return;
}
OMAP_DMA_CDEI_REG(lch) = eidx;
OMAP_DMA_CDFI_REG(lch) = fidx;
}
void omap_set_dma_dest_data_pack(int lch, int enable)
{
u16 w;
w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 13);
w |= enable ? (1 << 13) : 0;
omap_writew(w, OMAP_DMA_CSDP(lch));
OMAP_DMA_CSDP_REG(lch) &= ~(1 << 13);
if (enable)
OMAP_DMA_CSDP_REG(lch) |= 1 << 13;
}
void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
{
u16 w;
OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14);
w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 14);
switch (burst_mode) {
case OMAP_DMA_DATA_BURST_DIS:
break;
case OMAP_DMA_DATA_BURST_4:
w |= (0x01 << 14);
OMAP_DMA_CSDP_REG(lch) |= (0x02 << 14);
break;
case OMAP_DMA_DATA_BURST_8:
w |= (0x03 << 14);
OMAP_DMA_CSDP_REG(lch) |= (0x03 << 14);
break;
default:
printk(KERN_ERR "Invalid DMA burst mode\n");
BUG();
return;
}
omap_writew(w, OMAP_DMA_CSDP(lch));
}
static inline void init_intr(int lch)
static inline void omap_enable_channel_irq(int lch)
{
u16 w;
u32 status;
/* Read CSR to make sure it's cleared. */
w = omap_readw(OMAP_DMA_CSR(lch));
status = OMAP_DMA_CSR_REG(lch);
/* Enable some nice interrupts. */
omap_writew(dma_chan[lch].enabled_irqs, OMAP_DMA_CICR(lch));
OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs;
dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
}
static inline void enable_lnk(int lch)
static void omap_disable_channel_irq(int lch)
{
u16 w;
if (cpu_is_omap24xx())
OMAP_DMA_CICR_REG(lch) = 0;
}
void omap_enable_dma_irq(int lch, u16 bits)
{
dma_chan[lch].enabled_irqs |= bits;
}
/* Clear the STOP_LNK bits */
w = omap_readw(OMAP_DMA_CLNK_CTRL(lch));
w &= ~(1 << 14);
omap_writew(w, OMAP_DMA_CLNK_CTRL(lch));
void omap_disable_dma_irq(int lch, u16 bits)
{
dma_chan[lch].enabled_irqs &= ~bits;
}
static inline void enable_lnk(int lch)
{
if (cpu_class_is_omap1())
OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 14);
/* And set the ENABLE_LNK bits */
/* Set the ENABLE_LNK bits */
if (dma_chan[lch].next_lch != -1)
omap_writew(dma_chan[lch].next_lch | (1 << 15),
OMAP_DMA_CLNK_CTRL(lch));
OMAP_DMA_CLNK_CTRL_REG(lch) =
dma_chan[lch].next_lch | (1 << 15);
}
static inline void disable_lnk(int lch)
{
u16 w;
/* Disable interrupts */
omap_writew(0, OMAP_DMA_CICR(lch));
if (cpu_class_is_omap1()) {
OMAP_DMA_CICR_REG(lch) = 0;
/* Set the STOP_LNK bit */
OMAP_DMA_CLNK_CTRL_REG(lch) |= 1 << 14;
}
/* Set the STOP_LNK bit */
w = omap_readw(OMAP_DMA_CLNK_CTRL(lch));
w |= (1 << 14);
w = omap_writew(w, OMAP_DMA_CLNK_CTRL(lch));
if (cpu_is_omap24xx()) {
omap_disable_channel_irq(lch);
/* Clear the ENABLE_LNK bit */
OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 15);
}
dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
}
void omap_start_dma(int lch)
static inline void omap2_enable_irq_lch(int lch)
{
u16 w;
u32 val;
if (!cpu_is_omap24xx())
return;
val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
val |= 1 << lch;
omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
}
int omap_request_dma(int dev_id, const char *dev_name,
void (* callback)(int lch, u16 ch_status, void *data),
void *data, int *dma_ch_out)
{
int ch, free_ch = -1;
unsigned long flags;
struct omap_dma_lch *chan;
spin_lock_irqsave(&dma_chan_lock, flags);
for (ch = 0; ch < dma_chan_count; ch++) {
if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
free_ch = ch;
if (dev_id == 0)
break;
}
}
if (free_ch == -1) {
spin_unlock_irqrestore(&dma_chan_lock, flags);
return -EBUSY;
}
chan = dma_chan + free_ch;
chan->dev_id = dev_id;
if (cpu_class_is_omap1())
clear_lch_regs(free_ch);
if (cpu_is_omap24xx())
omap_clear_dma(free_ch);
spin_unlock_irqrestore(&dma_chan_lock, flags);
chan->dev_name = dev_name;
chan->callback = callback;
chan->data = data;
chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ |
OMAP_DMA_BLOCK_IRQ;
if (cpu_is_omap24xx())
chan->enabled_irqs |= OMAP2_DMA_TRANS_ERR_IRQ;
if (cpu_is_omap16xx()) {
/* If the sync device is set, configure it dynamically. */
if (dev_id != 0) {
set_gdma_dev(free_ch + 1, dev_id);
dev_id = free_ch + 1;
}
/* Disable the 1510 compatibility mode and set the sync device
* id. */
OMAP_DMA_CCR_REG(free_ch) = dev_id | (1 << 10);
} else if (cpu_is_omap730() || cpu_is_omap15xx()) {
OMAP_DMA_CCR_REG(free_ch) = dev_id;
}
if (cpu_is_omap24xx()) {
omap2_enable_irq_lch(free_ch);
omap_enable_channel_irq(free_ch);
/* Clear the CSR register and IRQ status register */
OMAP_DMA_CSR_REG(free_ch) = 0x0;
omap_writel(~0x0, OMAP_DMA4_IRQSTATUS_L0);
}
*dma_ch_out = free_ch;
return 0;
}
void omap_free_dma(int lch)
{
unsigned long flags;
spin_lock_irqsave(&dma_chan_lock, flags);
if (dma_chan[lch].dev_id == -1) {
printk("omap_dma: trying to free nonallocated DMA channel %d\n",
lch);
spin_unlock_irqrestore(&dma_chan_lock, flags);
return;
}
dma_chan[lch].dev_id = -1;
dma_chan[lch].next_lch = -1;
dma_chan[lch].callback = NULL;
spin_unlock_irqrestore(&dma_chan_lock, flags);
if (cpu_class_is_omap1()) {
/* Disable all DMA interrupts for the channel. */
OMAP_DMA_CICR_REG(lch) = 0;
/* Make sure the DMA transfer is stopped. */
OMAP_DMA_CCR_REG(lch) = 0;
}
if (cpu_is_omap24xx()) {
u32 val;
/* Disable interrupts */
val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
val &= ~(1 << lch);
omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
/* Clear the CSR register and IRQ status register */
OMAP_DMA_CSR_REG(lch) = 0x0;
val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
val |= 1 << lch;
omap_writel(val, OMAP_DMA4_IRQSTATUS_L0);
/* Disable all DMA interrupts for the channel. */
OMAP_DMA_CICR_REG(lch) = 0;
/* Make sure the DMA transfer is stopped. */
OMAP_DMA_CCR_REG(lch) = 0;
omap_clear_dma(lch);
}
}
/*
* Clears any DMA state so the DMA engine is ready to restart with new buffers
* through omap_start_dma(). Any buffers in flight are discarded.
*/
void omap_clear_dma(int lch)
{
unsigned long flags;
local_irq_save(flags);
if (cpu_class_is_omap1()) {
int status;
OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
/* Clear pending interrupts */
status = OMAP_DMA_CSR_REG(lch);
}
if (cpu_is_omap24xx()) {
int i;
u32 lch_base = OMAP24XX_DMA_BASE + lch * 0x60 + 0x80;
for (i = 0; i < 0x44; i += 4)
omap_writel(0, lch_base + i);
}
local_irq_restore(flags);
}
void omap_start_dma(int lch)
{
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch;
char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
......@@ -348,31 +592,37 @@ void omap_start_dma(int lch)
do {
next_lch = dma_chan[cur_lch].next_lch;
/* The loop case: we've been here already */
/* The loop case: we've been here already */
if (dma_chan_link_map[cur_lch])
break;
/* Mark the current channel */
dma_chan_link_map[cur_lch] = 1;
enable_lnk(cur_lch);
init_intr(cur_lch);
omap_enable_channel_irq(cur_lch);
cur_lch = next_lch;
} while (next_lch != -1);
} else if (cpu_is_omap24xx()) {
/* Errata: Need to write lch even if not using chaining */
OMAP_DMA_CLNK_CTRL_REG(lch) = lch;
}
init_intr(lch);
omap_enable_channel_irq(lch);
/* Errata: On ES2.0 BUFFERING disable must be set.
* This will always fail on ES1.0 */
if (cpu_is_omap24xx()) {
OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
}
OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
w = omap_readw(OMAP_DMA_CCR(lch));
w |= OMAP_DMA_CCR_EN;
omap_writew(w, OMAP_DMA_CCR(lch));
dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
}
void omap_stop_dma(int lch)
{
u16 w;
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch = lch;
char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
......@@ -393,146 +643,83 @@ void omap_stop_dma(int lch)
return;
}
/* Disable all interrupts on the channel */
omap_writew(0, OMAP_DMA_CICR(lch));
if (cpu_class_is_omap1())
OMAP_DMA_CICR_REG(lch) = 0;
w = omap_readw(OMAP_DMA_CCR(lch));
w &= ~OMAP_DMA_CCR_EN;
omap_writew(w, OMAP_DMA_CCR(lch));
OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
}
void omap_enable_dma_irq(int lch, u16 bits)
/*
* Returns current physical source address for the given DMA channel.
* If the channel is running the caller must disable interrupts prior calling
* this function and process the returned value before re-enabling interrupt to
* prevent races with the interrupt handler. Note that in continuous mode there
* is a chance for CSSA_L register overflow inbetween the two reads resulting
* in incorrect return value.
*/
dma_addr_t omap_get_dma_src_pos(int lch)
{
dma_chan[lch].enabled_irqs |= bits;
}
dma_addr_t offset;
void omap_disable_dma_irq(int lch, u16 bits)
{
dma_chan[lch].enabled_irqs &= ~bits;
}
if (cpu_class_is_omap1())
offset = (dma_addr_t) (OMAP1_DMA_CSSA_L_REG(lch) |
(OMAP1_DMA_CSSA_U_REG(lch) << 16));
static int dma_handle_ch(int ch)
{
u16 csr;
if (cpu_is_omap24xx())
offset = OMAP_DMA_CSAC_REG(lch);
if (enable_1510_mode && ch >= 6) {
csr = dma_chan[ch].saved_csr;
dma_chan[ch].saved_csr = 0;
} else
csr = omap_readw(OMAP_DMA_CSR(ch));
if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
dma_chan[ch + 6].saved_csr = csr >> 7;
csr &= 0x7f;
}
if ((csr & 0x3f) == 0)
return 0;
if (unlikely(dma_chan[ch].dev_id == -1)) {
printk(KERN_WARNING "Spurious interrupt from DMA channel %d (CSR %04x)\n",
ch, csr);
return 0;
}
if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
printk(KERN_WARNING "DMA timeout with device %d\n", dma_chan[ch].dev_id);
if (unlikely(csr & OMAP_DMA_DROP_IRQ))
printk(KERN_WARNING "DMA synchronization event drop occurred with device %d\n",
dma_chan[ch].dev_id);
if (likely(csr & OMAP_DMA_BLOCK_IRQ))
dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
if (likely(dma_chan[ch].callback != NULL))
dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
return 1;
return offset;
}
static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
/*
* Returns current physical destination address for the given DMA channel.
* If the channel is running the caller must disable interrupts prior calling
* this function and process the returned value before re-enabling interrupt to
* prevent races with the interrupt handler. Note that in continuous mode there
* is a chance for CDSA_L register overflow inbetween the two reads resulting
* in incorrect return value.
*/
dma_addr_t omap_get_dma_dst_pos(int lch)
{
int ch = ((int) dev_id) - 1;
int handled = 0;
dma_addr_t offset;
for (;;) {
int handled_now = 0;
if (cpu_class_is_omap1())
offset = (dma_addr_t) (OMAP1_DMA_CDSA_L_REG(lch) |
(OMAP1_DMA_CDSA_U_REG(lch) << 16));
handled_now += dma_handle_ch(ch);
if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
handled_now += dma_handle_ch(ch + 6);
if (!handled_now)
break;
handled += handled_now;
}
if (cpu_is_omap24xx())
offset = OMAP2_DMA_CDSA_REG(lch);
return handled ? IRQ_HANDLED : IRQ_NONE;
return offset;
}
int omap_request_dma(int dev_id, const char *dev_name,
void (* callback)(int lch, u16 ch_status, void *data),
void *data, int *dma_ch_out)
/*
* Returns current source transfer counting for the given DMA channel.
* Can be used to monitor the progress of a transfer inside a block.
* It must be called with disabled interrupts.
*/
int omap_get_dma_src_addr_counter(int lch)
{
int ch, free_ch = -1;
unsigned long flags;
struct omap_dma_lch *chan;
spin_lock_irqsave(&dma_chan_lock, flags);
for (ch = 0; ch < dma_chan_count; ch++) {
if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
free_ch = ch;
if (dev_id == 0)
break;
}
}
if (free_ch == -1) {
spin_unlock_irqrestore(&dma_chan_lock, flags);
return -EBUSY;
}
chan = dma_chan + free_ch;
chan->dev_id = dev_id;
clear_lch_regs(free_ch);
spin_unlock_irqrestore(&dma_chan_lock, flags);
chan->dev_id = dev_id;
chan->dev_name = dev_name;
chan->callback = callback;
chan->data = data;
chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
if (cpu_is_omap16xx()) {
/* If the sync device is set, configure it dynamically. */
if (dev_id != 0) {
set_gdma_dev(free_ch + 1, dev_id);
dev_id = free_ch + 1;
}
/* Disable the 1510 compatibility mode and set the sync device
* id. */
omap_writew(dev_id | (1 << 10), OMAP_DMA_CCR(free_ch));
} else {
omap_writew(dev_id, OMAP_DMA_CCR(free_ch));
}
*dma_ch_out = free_ch;
return 0;
return (dma_addr_t) OMAP_DMA_CSAC_REG(lch);
}
void omap_free_dma(int ch)
int omap_dma_running(void)
{
unsigned long flags;
int lch;
spin_lock_irqsave(&dma_chan_lock, flags);
if (dma_chan[ch].dev_id == -1) {
printk("omap_dma: trying to free nonallocated DMA channel %d\n", ch);
spin_unlock_irqrestore(&dma_chan_lock, flags);
return;
}
dma_chan[ch].dev_id = -1;
spin_unlock_irqrestore(&dma_chan_lock, flags);
/* Check if LCD DMA is running */
if (cpu_is_omap16xx())
if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
return 1;
/* Disable all DMA interrupts for the channel. */
omap_writew(0, OMAP_DMA_CICR(ch));
/* Make sure the DMA transfer is stopped. */
omap_writew(0, OMAP_DMA_CCR(ch));
}
for (lch = 0; lch < dma_chan_count; lch++)
if (OMAP_DMA_CCR_REG(lch) & OMAP_DMA_CCR_EN)
return 1;
int omap_dma_in_1510_mode(void)
{
return enable_1510_mode;
return 0;
}
/*
......@@ -550,7 +737,8 @@ void omap_dma_link_lch (int lch_head, int lch_queue)
if ((dma_chan[lch_head].dev_id == -1) ||
(dma_chan[lch_queue].dev_id == -1)) {
printk(KERN_ERR "omap_dma: trying to link non requested channels\n");
printk(KERN_ERR "omap_dma: trying to link "
"non requested channels\n");
dump_stack();
}
......@@ -570,20 +758,149 @@ void omap_dma_unlink_lch (int lch_head, int lch_queue)
if (dma_chan[lch_head].next_lch != lch_queue ||
dma_chan[lch_head].next_lch == -1) {
printk(KERN_ERR "omap_dma: trying to unlink non linked channels\n");
printk(KERN_ERR "omap_dma: trying to unlink "
"non linked channels\n");
dump_stack();
}
if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
(dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
printk(KERN_ERR "omap_dma: You need to stop the DMA channels before unlinking\n");
printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
"before unlinking\n");
dump_stack();
}
dma_chan[lch_head].next_lch = -1;
}
/*----------------------------------------------------------------------------*/
#ifdef CONFIG_ARCH_OMAP1
static int omap1_dma_handle_ch(int ch)
{
u16 csr;
if (enable_1510_mode && ch >= 6) {
csr = dma_chan[ch].saved_csr;
dma_chan[ch].saved_csr = 0;
} else
csr = OMAP_DMA_CSR_REG(ch);
if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
dma_chan[ch + 6].saved_csr = csr >> 7;
csr &= 0x7f;
}
if ((csr & 0x3f) == 0)
return 0;
if (unlikely(dma_chan[ch].dev_id == -1)) {
printk(KERN_WARNING "Spurious interrupt from DMA channel "
"%d (CSR %04x)\n", ch, csr);
return 0;
}
if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
printk(KERN_WARNING "DMA timeout with device %d\n",
dma_chan[ch].dev_id);
if (unlikely(csr & OMAP_DMA_DROP_IRQ))
printk(KERN_WARNING "DMA synchronization event drop occurred "
"with device %d\n", dma_chan[ch].dev_id);
if (likely(csr & OMAP_DMA_BLOCK_IRQ))
dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
if (likely(dma_chan[ch].callback != NULL))
dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
return 1;
}
static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id,
struct pt_regs *regs)
{
int ch = ((int) dev_id) - 1;
int handled = 0;
for (;;) {
int handled_now = 0;
handled_now += omap1_dma_handle_ch(ch);
if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
handled_now += omap1_dma_handle_ch(ch + 6);
if (!handled_now)
break;
handled += handled_now;
}
return handled ? IRQ_HANDLED : IRQ_NONE;
}
#else
#define omap1_dma_irq_handler NULL
#endif
#ifdef CONFIG_ARCH_OMAP2
static int omap2_dma_handle_ch(int ch)
{
u32 status = OMAP_DMA_CSR_REG(ch);
u32 val;
if (!status)
return 0;
if (unlikely(dma_chan[ch].dev_id == -1))
return 0;
/* REVISIT: According to 24xx TRM, there's no TOUT_IE */
if (unlikely(status & OMAP_DMA_TOUT_IRQ))
printk(KERN_INFO "DMA timeout with device %d\n",
dma_chan[ch].dev_id);
if (unlikely(status & OMAP_DMA_DROP_IRQ))
printk(KERN_INFO
"DMA synchronization event drop occurred with device "
"%d\n", dma_chan[ch].dev_id);
if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ))
printk(KERN_INFO "DMA transaction error with device %d\n",
dma_chan[ch].dev_id);
OMAP_DMA_CSR_REG(ch) = 0x20;
val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
/* ch in this function is from 0-31 while in register it is 1-32 */
val = 1 << (ch);
omap_writel(val, OMAP_DMA4_IRQSTATUS_L0);
if (likely(dma_chan[ch].callback != NULL))
dma_chan[ch].callback(ch, status, dma_chan[ch].data);
return 0;
}
/* STATUS register count is from 1-32 while our is 0-31 */
static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id,
struct pt_regs *regs)
{
u32 val;
int i;
val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
for (i = 1; i <= OMAP_LOGICAL_DMA_CH_COUNT; i++) {
int active = val & (1 << (i - 1));
if (active)
omap2_dma_handle_ch(i - 1);
}
return IRQ_HANDLED;
}
static struct irqaction omap24xx_dma_irq = {
.name = "DMA",
.handler = omap2_dma_irq_handler,
.flags = SA_INTERRUPT
};
#else
static struct irqaction omap24xx_dma_irq;
#endif
/*----------------------------------------------------------------------------*/
static struct lcd_dma_info {
spinlock_t lock;
......@@ -795,7 +1112,7 @@ static void set_b1_regs(void)
/* Always set the source port as SDRAM for now*/
w &= ~(0x03 << 6);
if (lcd_dma.callback != NULL)
w |= 1 << 1; /* Block interrupt enable */
w |= 1 << 1; /* Block interrupt enable */
else
w &= ~(1 << 1);
omap_writew(w, OMAP1610_DMA_LCD_CTRL);
......@@ -814,7 +1131,8 @@ static void set_b1_regs(void)
omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
}
static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id,
struct pt_regs *regs)
{
u16 w;
......@@ -870,7 +1188,8 @@ void omap_free_lcd_dma(void)
return;
}
if (!enable_1510_mode)
omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR);
omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
OMAP1610_DMA_LCD_CCR);
lcd_dma.reserved = 0;
spin_unlock(&lcd_dma.lock);
}
......@@ -939,93 +1258,24 @@ void omap_stop_lcd_dma(void)
omap_writew(w, OMAP1610_DMA_LCD_CTRL);
}
/*
* Clears any DMA state so the DMA engine is ready to restart with new buffers
* through omap_start_dma(). Any buffers in flight are discarded.
*/
void omap_clear_dma(int lch)
{
unsigned long flags;
int status;
local_irq_save(flags);
omap_writew(omap_readw(OMAP_DMA_CCR(lch)) & ~OMAP_DMA_CCR_EN,
OMAP_DMA_CCR(lch));
status = OMAP_DMA_CSR(lch); /* clear pending interrupts */
local_irq_restore(flags);
}
/*
* Returns current physical source address for the given DMA channel.
* If the channel is running the caller must disable interrupts prior calling
* this function and process the returned value before re-enabling interrupt to
* prevent races with the interrupt handler. Note that in continuous mode there
* is a chance for CSSA_L register overflow inbetween the two reads resulting
* in incorrect return value.
*/
dma_addr_t omap_get_dma_src_pos(int lch)
{
return (dma_addr_t) (omap_readw(OMAP_DMA_CSSA_L(lch)) |
(omap_readw(OMAP_DMA_CSSA_U(lch)) << 16));
}
/*
* Returns current physical destination address for the given DMA channel.
* If the channel is running the caller must disable interrupts prior calling
* this function and process the returned value before re-enabling interrupt to
* prevent races with the interrupt handler. Note that in continuous mode there
* is a chance for CDSA_L register overflow inbetween the two reads resulting
* in incorrect return value.
*/
dma_addr_t omap_get_dma_dst_pos(int lch)
{
return (dma_addr_t) (omap_readw(OMAP_DMA_CDSA_L(lch)) |
(omap_readw(OMAP_DMA_CDSA_U(lch)) << 16));
}
/*
* Returns current source transfer counting for the given DMA channel.
* Can be used to monitor the progress of a transfer inside a block.
* It must be called with disabled interrupts.
*/
int omap_get_dma_src_addr_counter(int lch)
{
return (dma_addr_t) omap_readw(OMAP_DMA_CSAC(lch));
}
int omap_dma_running(void)
{
int lch;
/* Check if LCD DMA is running */
if (cpu_is_omap16xx())
if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
return 1;
for (lch = 0; lch < dma_chan_count; lch++) {
u16 w;
w = omap_readw(OMAP_DMA_CCR(lch));
if (w & OMAP_DMA_CCR_EN)
return 1;
}
return 0;
}
/*----------------------------------------------------------------------------*/
static int __init omap_init_dma(void)
{
int ch, r;
if (cpu_is_omap1510()) {
printk(KERN_INFO "DMA support for OMAP1510 initialized\n");
if (cpu_is_omap15xx()) {
printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
dma_chan_count = 9;
enable_1510_mode = 1;
} else if (cpu_is_omap16xx() || cpu_is_omap730()) {
printk(KERN_INFO "OMAP DMA hardware version %d\n",
omap_readw(OMAP_DMA_HW_ID));
printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
(omap_readw(OMAP_DMA_CAPS_0_U) << 16) | omap_readw(OMAP_DMA_CAPS_0_L),
(omap_readw(OMAP_DMA_CAPS_1_U) << 16) | omap_readw(OMAP_DMA_CAPS_1_L),
(omap_readw(OMAP_DMA_CAPS_0_U) << 16) |
omap_readw(OMAP_DMA_CAPS_0_L),
(omap_readw(OMAP_DMA_CAPS_1_U) << 16) |
omap_readw(OMAP_DMA_CAPS_1_L),
omap_readw(OMAP_DMA_CAPS_2), omap_readw(OMAP_DMA_CAPS_3),
omap_readw(OMAP_DMA_CAPS_4));
if (!enable_1510_mode) {
......@@ -1038,6 +1288,11 @@ static int __init omap_init_dma(void)
dma_chan_count = 16;
} else
dma_chan_count = 9;
} else if (cpu_is_omap24xx()) {
u8 revision = omap_readb(OMAP_DMA4_REVISION);
printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
revision >> 4, revision & 0xf);
dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT;
} else {
dma_chan_count = 0;
return 0;
......@@ -1049,41 +1304,56 @@ static int __init omap_init_dma(void)
memset(&dma_chan, 0, sizeof(dma_chan));
for (ch = 0; ch < dma_chan_count; ch++) {
omap_clear_dma(ch);
dma_chan[ch].dev_id = -1;
dma_chan[ch].next_lch = -1;
if (ch >= 6 && enable_1510_mode)
continue;
/* request_irq() doesn't like dev_id (ie. ch) being zero,
* so we have to kludge around this. */
r = request_irq(dma_irq[ch], dma_irq_handler, 0, "DMA",
(void *) (ch + 1));
if (cpu_class_is_omap1()) {
/* request_irq() doesn't like dev_id (ie. ch) being
* zero, so we have to kludge around this. */
r = request_irq(omap1_dma_irq[ch],
omap1_dma_irq_handler, 0, "DMA",
(void *) (ch + 1));
if (r != 0) {
int i;
printk(KERN_ERR "unable to request IRQ %d "
"for DMA (error %d)\n",
omap1_dma_irq[ch], r);
for (i = 0; i < ch; i++)
free_irq(omap1_dma_irq[i],
(void *) (i + 1));
return r;
}
}
}
if (cpu_is_omap24xx())
setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq);
/* FIXME: Update LCD DMA to work on 24xx */
if (cpu_class_is_omap1()) {
r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
"LCD DMA", NULL);
if (r != 0) {
int i;
printk(KERN_ERR "unable to request IRQ %d for DMA (error %d)\n",
dma_irq[ch], r);
for (i = 0; i < ch; i++)
free_irq(dma_irq[i], (void *) (i + 1));
printk(KERN_ERR "unable to request IRQ for LCD DMA "
"(error %d)\n", r);
for (i = 0; i < dma_chan_count; i++)
free_irq(omap1_dma_irq[i], (void *) (i + 1));
return r;
}
}
r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, "LCD DMA", NULL);
if (r != 0) {
int i;
printk(KERN_ERR "unable to request IRQ for LCD DMA (error %d)\n", r);
for (i = 0; i < dma_chan_count; i++)
free_irq(dma_irq[i], (void *) (i + 1));
return r;
}
return 0;
}
arch_initcall(omap_init_dma);
EXPORT_SYMBOL(omap_get_dma_src_pos);
EXPORT_SYMBOL(omap_get_dma_dst_pos);
EXPORT_SYMBOL(omap_get_dma_src_addr_counter);
......@@ -1109,6 +1379,8 @@ EXPORT_SYMBOL(omap_set_dma_dest_index);
EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
EXPORT_SYMBOL(omap_set_dma_params);
EXPORT_SYMBOL(omap_dma_link_lch);
EXPORT_SYMBOL(omap_dma_unlink_lch);
......
......@@ -140,7 +140,7 @@ static struct gpio_bank gpio_bank_1610[5] = {
};
#endif
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
static struct gpio_bank gpio_bank_1510[2] = {
{ OMAP_MPUIO_BASE, INT_MPUIO, IH_MPUIO_BASE, METHOD_MPUIO },
{ OMAP1510_GPIO_BASE, INT_GPIO_BANK1, IH_GPIO_BASE, METHOD_GPIO_1510 }
......@@ -173,7 +173,7 @@ static int gpio_bank_count;
static inline struct gpio_bank *get_gpio_bank(int gpio)
{
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
if (OMAP_GPIO_IS_MPUIO(gpio))
return &gpio_bank[0];
......@@ -222,7 +222,7 @@ static inline int gpio_valid(int gpio)
return -1;
return 0;
}
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510() && gpio < 16)
return 0;
#endif
......@@ -654,7 +654,7 @@ int omap_request_gpio(int gpio)
/* Set trigger to none. You need to enable the trigger after request_irq */
_set_gpio_triggering(bank, get_gpio_index(gpio), IRQT_NOEDGE);
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
if (bank->method == METHOD_GPIO_1510) {
void __iomem *reg;
......@@ -739,7 +739,7 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
bank = (struct gpio_bank *) desc->data;
if (bank->method == METHOD_MPUIO)
isr_reg = bank->base + OMAP_MPUIO_GPIO_INT;
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
if (bank->method == METHOD_GPIO_1510)
isr_reg = bank->base + OMAP1510_GPIO_INT_STATUS;
#endif
......@@ -774,7 +774,7 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
d = irq_desc + gpio_irq;
desc_handle_irq(gpio_irq, d, regs);
}
}
}
}
static void gpio_ack_irq(unsigned int irq)
......@@ -837,8 +837,9 @@ static struct irqchip mpuio_irq_chip = {
.unmask = mpuio_unmask_irq
};
static int initialized = 0;
static struct clk * gpio_ck = NULL;
static int initialized;
static struct clk * gpio_ick;
static struct clk * gpio_fck;
static int __init _omap_gpio_init(void)
{
......@@ -848,14 +849,26 @@ static int __init _omap_gpio_init(void)
initialized = 1;
if (cpu_is_omap1510()) {
gpio_ck = clk_get(NULL, "arm_gpio_ck");
if (IS_ERR(gpio_ck))
gpio_ick = clk_get(NULL, "arm_gpio_ck");
if (IS_ERR(gpio_ick))
printk("Could not get arm_gpio_ck\n");
else
clk_use(gpio_ck);
clk_use(gpio_ick);
}
if (cpu_is_omap24xx()) {
gpio_ick = clk_get(NULL, "gpios_ick");
if (IS_ERR(gpio_ick))
printk("Could not get gpios_ick\n");
else
clk_use(gpio_ick);
gpio_fck = clk_get(NULL, "gpios_fck");
if (IS_ERR(gpio_ick))
printk("Could not get gpios_fck\n");
else
clk_use(gpio_fck);
}
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
printk(KERN_INFO "OMAP1510 GPIO hardware\n");
gpio_bank_count = 2;
......@@ -901,7 +914,7 @@ static int __init _omap_gpio_init(void)
if (bank->method == METHOD_MPUIO) {
omap_writew(0xFFFF, OMAP_MPUIO_BASE + OMAP_MPUIO_GPIO_MASKIT);
}
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
if (bank->method == METHOD_GPIO_1510) {
__raw_writew(0xffff, bank->base + OMAP1510_GPIO_INT_MASK);
__raw_writew(0x0000, bank->base + OMAP1510_GPIO_INT_STATUS);
......@@ -1038,6 +1051,7 @@ static struct sys_device omap_gpio_device = {
/*
* This may get called early from board specific init
* for boards that have interrupts routed via FPGA.
*/
int omap_gpio_init(void)
{
......
......@@ -491,17 +491,20 @@ int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int leng
omap_set_dma_transfer_params(mcbsp[id].dma_tx_lch,
OMAP_DMA_DATA_TYPE_S16,
length >> 1, 1,
OMAP_DMA_SYNC_ELEMENT);
OMAP_DMA_SYNC_ELEMENT,
0, 0);
omap_set_dma_dest_params(mcbsp[id].dma_tx_lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1);
mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1,
0, 0);
omap_set_dma_src_params(mcbsp[id].dma_tx_lch,
OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC,
buffer);
buffer,
0, 0);
omap_start_dma(mcbsp[id].dma_tx_lch);
wait_for_completion(&(mcbsp[id].tx_dma_completion));
......@@ -531,17 +534,20 @@ int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int leng
omap_set_dma_transfer_params(mcbsp[id].dma_rx_lch,
OMAP_DMA_DATA_TYPE_S16,
length >> 1, 1,
OMAP_DMA_SYNC_ELEMENT);
OMAP_DMA_SYNC_ELEMENT,
0, 0);
omap_set_dma_src_params(mcbsp[id].dma_rx_lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1);
mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1,
0, 0);
omap_set_dma_dest_params(mcbsp[id].dma_rx_lch,
OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC,
buffer);
buffer,
0, 0);
omap_start_dma(mcbsp[id].dma_rx_lch);
wait_for_completion(&(mcbsp[id].rx_dma_completion));
......@@ -643,7 +649,7 @@ static const struct omap_mcbsp_info mcbsp_730[] = {
};
#endif
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
static const struct omap_mcbsp_info mcbsp_1510[] = {
[0] = { .virt_base = OMAP1510_MCBSP1_BASE,
.dma_rx_sync = OMAP_DMA_MCBSP1_RX,
......@@ -712,7 +718,7 @@ static int __init omap_mcbsp_init(void)
mcbsp_count = ARRAY_SIZE(mcbsp_730);
}
#endif
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
mcbsp_info = mcbsp_1510;
mcbsp_count = ARRAY_SIZE(mcbsp_1510);
......
......@@ -3,7 +3,7 @@
*
* Utility to set the Omap MUX and PULL_DWN registers from a table in mux.h
*
* Copyright (C) 2003 Nokia Corporation
* Copyright (C) 2003 - 2005 Nokia Corporation
*
* Written by Tony Lindgren <tony.lindgren@nokia.com>
*
......@@ -25,38 +25,74 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/io.h>
#include <linux/spinlock.h>
#define __MUX_C__
#include <asm/arch/mux.h>
#ifdef CONFIG_OMAP_MUX
#define OMAP24XX_L4_BASE 0x48000000
#define OMAP24XX_PULL_ENA (1 << 3)
#define OMAP24XX_PULL_UP (1 << 4)
static struct pin_config * pin_table;
static unsigned long pin_table_sz;
extern struct pin_config * omap730_pins;
extern struct pin_config * omap1xxx_pins;
extern struct pin_config * omap24xx_pins;
int __init omap_mux_register(struct pin_config * pins, unsigned long size)
{
pin_table = pins;
pin_table_sz = size;
return 0;
}
/*
* Sets the Omap MUX and PULL_DWN registers based on the table
*/
int __init_or_module
omap_cfg_reg(const reg_cfg_t reg_cfg)
int __init_or_module omap_cfg_reg(const unsigned long index)
{
static DEFINE_SPINLOCK(mux_spin_lock);
unsigned long flags;
reg_cfg_set *cfg;
struct pin_config *cfg;
unsigned int reg_orig = 0, reg = 0, pu_pd_orig = 0, pu_pd = 0,
pull_orig = 0, pull = 0;
unsigned int mask, warn = 0;
if (cpu_is_omap7xx())
return 0;
if (!pin_table)
BUG();
if (reg_cfg > ARRAY_SIZE(reg_cfg_table)) {
printk(KERN_ERR "MUX: reg_cfg %d\n", reg_cfg);
return -EINVAL;
if (index >= pin_table_sz) {
printk(KERN_ERR "Invalid pin mux index: %lu (%lu)\n",
index, pin_table_sz);
dump_stack();
return -ENODEV;
}
cfg = (reg_cfg_set *)&reg_cfg_table[reg_cfg];
cfg = (struct pin_config *)&pin_table[index];
if (cpu_is_omap24xx()) {
u8 reg = 0;
reg |= cfg->mask & 0x7;
if (cfg->pull_val)
reg |= OMAP24XX_PULL_ENA;
if(cfg->pu_pd_val)
reg |= OMAP24XX_PULL_UP;
#ifdef CONFIG_OMAP_MUX_DEBUG
printk("Muxing %s (0x%08x): 0x%02x -> 0x%02x\n",
cfg->name, OMAP24XX_L4_BASE + cfg->mux_reg,
omap_readb(OMAP24XX_L4_BASE + cfg->mux_reg), reg);
#endif
omap_writeb(reg, OMAP24XX_L4_BASE + cfg->mux_reg);
return 0;
}
/* Check the mux register in question */
if (cfg->mux_reg) {
......@@ -157,7 +193,8 @@ omap_cfg_reg(const reg_cfg_t reg_cfg)
return 0;
#endif
}
EXPORT_SYMBOL(omap_cfg_reg);
#else
#define omap_mux_init() do {} while(0)
#define omap_cfg_reg(x) do {} while(0)
#endif /* CONFIG_OMAP_MUX */
......@@ -54,11 +54,12 @@
#include <asm/arch/tps65010.h>
#include <asm/arch/dsp_common.h>
#include "clock.h"
#include "sram.h"
#include <asm/arch/clock.h>
#include <asm/arch/sram.h>
static unsigned int arm_sleep_save[ARM_SLEEP_SAVE_SIZE];
static unsigned short ulpd_sleep_save[ULPD_SLEEP_SAVE_SIZE];
static unsigned int mpui730_sleep_save[MPUI730_SLEEP_SAVE_SIZE];
static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE];
static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE];
......@@ -120,8 +121,8 @@ void omap_pm_idle(void)
*/
static void omap_pm_wakeup_setup(void)
{
u32 level1_wake = OMAP_IRQ_BIT(INT_IH2_IRQ);
u32 level2_wake = OMAP_IRQ_BIT(INT_UART2) | OMAP_IRQ_BIT(INT_KEYBOARD);
u32 level1_wake = 0;
u32 level2_wake = OMAP_IRQ_BIT(INT_UART2);
/*
* Turn off all interrupts except GPIO bank 1, L1-2nd level cascade,
......@@ -129,19 +130,29 @@ static void omap_pm_wakeup_setup(void)
* drivers must still separately call omap_set_gpio_wakeup() to
* wake up to a GPIO interrupt.
*/
if (cpu_is_omap1510() || cpu_is_omap16xx())
level1_wake |= OMAP_IRQ_BIT(INT_GPIO_BANK1);
else if (cpu_is_omap730())
level1_wake |= OMAP_IRQ_BIT(INT_730_GPIO_BANK1);
if (cpu_is_omap730())
level1_wake = OMAP_IRQ_BIT(INT_730_GPIO_BANK1) |
OMAP_IRQ_BIT(INT_730_IH2_IRQ);
else if (cpu_is_omap1510())
level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
OMAP_IRQ_BIT(INT_1510_IH2_IRQ);
else if (cpu_is_omap16xx())
level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
OMAP_IRQ_BIT(INT_1610_IH2_IRQ);
omap_writel(~level1_wake, OMAP_IH1_MIR);
if (cpu_is_omap1510())
if (cpu_is_omap730()) {
omap_writel(~level2_wake, OMAP_IH2_0_MIR);
omap_writel(~(OMAP_IRQ_BIT(INT_730_WAKE_UP_REQ) | OMAP_IRQ_BIT(INT_730_MPUIO_KEYPAD)), OMAP_IH2_1_MIR);
} else if (cpu_is_omap1510()) {
level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
omap_writel(~level2_wake, OMAP_IH2_MIR);
/* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */
if (cpu_is_omap16xx()) {
} else if (cpu_is_omap16xx()) {
level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
omap_writel(~level2_wake, OMAP_IH2_0_MIR);
/* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */
omap_writel(~OMAP_IRQ_BIT(INT_1610_WAKE_UP_REQ), OMAP_IH2_1_MIR);
omap_writel(~0x0, OMAP_IH2_2_MIR);
omap_writel(~0x0, OMAP_IH2_3_MIR);
......@@ -185,7 +196,17 @@ void omap_pm_suspend(void)
* Save interrupt, MPUI, ARM and UPLD control registers.
*/
if (cpu_is_omap1510()) {
if (cpu_is_omap730()) {
MPUI730_SAVE(OMAP_IH1_MIR);
MPUI730_SAVE(OMAP_IH2_0_MIR);
MPUI730_SAVE(OMAP_IH2_1_MIR);
MPUI730_SAVE(MPUI_CTRL);
MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG);
MPUI730_SAVE(MPUI_DSP_API_CONFIG);
MPUI730_SAVE(EMIFS_CONFIG);
MPUI730_SAVE(EMIFF_SDRAM_CONFIG);
} else if (cpu_is_omap1510()) {
MPUI1510_SAVE(OMAP_IH1_MIR);
MPUI1510_SAVE(OMAP_IH2_MIR);
MPUI1510_SAVE(MPUI_CTRL);
......@@ -280,7 +301,13 @@ void omap_pm_suspend(void)
ULPD_RESTORE(ULPD_CLOCK_CTRL);
ULPD_RESTORE(ULPD_STATUS_REQ);
if (cpu_is_omap1510()) {
if (cpu_is_omap730()) {
MPUI730_RESTORE(EMIFS_CONFIG);
MPUI730_RESTORE(EMIFF_SDRAM_CONFIG);
MPUI730_RESTORE(OMAP_IH1_MIR);
MPUI730_RESTORE(OMAP_IH2_0_MIR);
MPUI730_RESTORE(OMAP_IH2_1_MIR);
} else if (cpu_is_omap1510()) {
MPUI1510_RESTORE(MPUI_CTRL);
MPUI1510_RESTORE(MPUI_DSP_BOOT_CONFIG);
MPUI1510_RESTORE(MPUI_DSP_API_CONFIG);
......@@ -355,7 +382,14 @@ static int omap_pm_read_proc(
ULPD_SAVE(ULPD_DPLL_CTRL);
ULPD_SAVE(ULPD_POWER_CTRL);
if (cpu_is_omap1510()) {
if (cpu_is_omap730()) {
MPUI730_SAVE(MPUI_CTRL);
MPUI730_SAVE(MPUI_DSP_STATUS);
MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG);
MPUI730_SAVE(MPUI_DSP_API_CONFIG);
MPUI730_SAVE(EMIFF_SDRAM_CONFIG);
MPUI730_SAVE(EMIFS_CONFIG);
} else if (cpu_is_omap1510()) {
MPUI1510_SAVE(MPUI_CTRL);
MPUI1510_SAVE(MPUI_DSP_STATUS);
MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG);
......@@ -404,7 +438,21 @@ static int omap_pm_read_proc(
ULPD_SHOW(ULPD_STATUS_REQ),
ULPD_SHOW(ULPD_POWER_CTRL));
if (cpu_is_omap1510()) {
if (cpu_is_omap730()) {
my_buffer_offset += sprintf(my_base + my_buffer_offset,
"MPUI730_CTRL_REG 0x%-8x \n"
"MPUI730_DSP_STATUS_REG: 0x%-8x \n"
"MPUI730_DSP_BOOT_CONFIG_REG: 0x%-8x \n"
"MPUI730_DSP_API_CONFIG_REG: 0x%-8x \n"
"MPUI730_SDRAM_CONFIG_REG: 0x%-8x \n"
"MPUI730_EMIFS_CONFIG_REG: 0x%-8x \n",
MPUI730_SHOW(MPUI_CTRL),
MPUI730_SHOW(MPUI_DSP_STATUS),
MPUI730_SHOW(MPUI_DSP_BOOT_CONFIG),
MPUI730_SHOW(MPUI_DSP_API_CONFIG),
MPUI730_SHOW(EMIFF_SDRAM_CONFIG),
MPUI730_SHOW(EMIFS_CONFIG));
} else if (cpu_is_omap1510()) {
my_buffer_offset += sprintf(my_base + my_buffer_offset,
"MPUI1510_CTRL_REG 0x%-8x \n"
"MPUI1510_DSP_STATUS_REG: 0x%-8x \n"
......@@ -553,7 +601,12 @@ static int __init omap_pm_init(void)
* These routines need to be in SRAM as that's the only
* memory the MPU can see when it wakes up.
*/
if (cpu_is_omap1510()) {
if (cpu_is_omap730()) {
omap_sram_idle = omap_sram_push(omap730_idle_loop_suspend,
omap730_idle_loop_suspend_sz);
omap_sram_suspend = omap_sram_push(omap730_cpu_suspend,
omap730_cpu_suspend_sz);
} else if (cpu_is_omap1510()) {
omap_sram_idle = omap_sram_push(omap1510_idle_loop_suspend,
omap1510_idle_loop_suspend_sz);
omap_sram_suspend = omap_sram_push(omap1510_cpu_suspend,
......@@ -572,7 +625,11 @@ static int __init omap_pm_init(void)
pm_idle = omap_pm_idle;
setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
if (cpu_is_omap730())
setup_irq(INT_730_WAKE_UP_REQ, &omap_wakeup_irq);
else if (cpu_is_omap16xx())
setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
#if 0
/* --- BEGIN BOARD-DEPENDENT CODE --- */
/* Sleepx mask direction */
......@@ -591,7 +648,9 @@ static int __init omap_pm_init(void)
omap_writew(ULPD_POWER_CTRL_REG_VAL, ULPD_POWER_CTRL);
/* Configure IDLECT3 */
if (cpu_is_omap16xx())
if (cpu_is_omap730())
omap_writel(OMAP730_IDLECT3_VAL, OMAP730_IDLECT3);
else if (cpu_is_omap16xx())
omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3);
pm_set_ops(&omap_pm_ops);
......@@ -600,8 +659,10 @@ static int __init omap_pm_init(void)
omap_pm_init_proc();
#endif
/* configure LOW_PWR pin */
omap_cfg_reg(T20_1610_LOW_PWR);
if (cpu_is_omap16xx()) {
/* configure LOW_PWR pin */
omap_cfg_reg(T20_1610_LOW_PWR);
}
return 0;
}
......
/*
* linux/arch/arm/plat-omap/sleep.S
*
* Low-level OMAP1510/1610 sleep/wakeUp support
* Low-level OMAP730/1510/1610 sleep/wakeUp support
*
* Initial SA1110 code:
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
......@@ -52,7 +52,57 @@
* processor specific functions here.
*/
#ifdef CONFIG_ARCH_OMAP1510
#if defined(CONFIG_ARCH_OMAP730)
ENTRY(omap730_idle_loop_suspend)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
@ load base address of ARM_IDLECT1 and ARM_IDLECT2
mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
@ turn off clock domains
@ get ARM_IDLECT2 into r2
ldrh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
mov r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff
orr r5, r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff00
strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
@ request ARM idle
@ get ARM_IDLECT1 into r1
ldrh r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
orr r3, r1, #OMAP730_IDLE_LOOP_REQUEST & 0xffff
strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
mov r5, #IDLE_WAIT_CYCLES & 0xff
orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00
l_730: subs r5, r5, #1
bne l_730
/*
* Let's wait for the next clock tick to wake us up.
*/
mov r0, #0
mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
/*
* omap730_idle_loop_suspend()'s resume point.
*
* It will just start executing here, so we'll restore stuff from the
* stack, reset the ARM_IDLECT1 and ARM_IDLECT2.
*/
@ restore ARM_IDLECT1 and ARM_IDLECT2 and return
@ r1 has ARM_IDLECT1 and r2 still has ARM_IDLECT2
strh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
strh r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
ldmfd sp!, {r0 - r12, pc} @ restore regs and return
ENTRY(omap730_idle_loop_suspend_sz)
.word . - omap730_idle_loop_suspend
#endif /* CONFIG_ARCH_OMAP730 */
#ifdef CONFIG_ARCH_OMAP15XX
ENTRY(omap1510_idle_loop_suspend)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
......@@ -100,7 +150,7 @@ l_1510: subs r5, r5, #1
ENTRY(omap1510_idle_loop_suspend_sz)
.word . - omap1510_idle_loop_suspend
#endif /* CONFIG_ARCH_OMAP1510 */
#endif /* CONFIG_ARCH_OMAP15XX */
#if defined(CONFIG_ARCH_OMAP16XX)
ENTRY(omap1610_idle_loop_suspend)
......@@ -169,7 +219,86 @@ ENTRY(omap1610_idle_loop_suspend_sz)
*
*/
#ifdef CONFIG_ARCH_OMAP1510
#if defined(CONFIG_ARCH_OMAP730)
ENTRY(omap730_cpu_suspend)
@ save registers on stack
stmfd sp!, {r0 - r12, lr}
@ Drain write cache
mov r4, #0
mcr p15, 0, r0, c7, c10, 4
nop
@ load base address of Traffic Controller
mov r6, #TCMIF_ASM_BASE & 0xff000000
orr r6, r6, #TCMIF_ASM_BASE & 0x00ff0000
orr r6, r6, #TCMIF_ASM_BASE & 0x0000ff00
@ prepare to put SDRAM into self-refresh manually
ldr r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
orr r9, r7, #SELF_REFRESH_MODE & 0xff000000
orr r9, r9, #SELF_REFRESH_MODE & 0x000000ff
str r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
@ prepare to put EMIFS to Sleep
ldr r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
orr r9, r8, #IDLE_EMIFS_REQUEST & 0xff
str r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ load base address of ARM_IDLECT1 and ARM_IDLECT2
mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
@ turn off clock domains
@ do not disable PERCK (0x04)
mov r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff
orr r5, r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff00
strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
@ request ARM idle
mov r3, #OMAP730_IDLECT1_SLEEP_VAL & 0xff
orr r3, r3, #OMAP730_IDLECT1_SLEEP_VAL & 0xff00
strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ disable instruction cache
mrc p15, 0, r9, c1, c0, 0
bic r2, r9, #0x1000
mcr p15, 0, r2, c1, c0, 0
nop
/*
* Let's wait for the next wake up event to wake us up. r0 can't be
* used here because r0 holds ARM_IDLECT1
*/
mov r2, #0
mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
/*
* omap730_cpu_suspend()'s resume point.
*
* It will just start executing here, so we'll restore stuff from the
* stack.
*/
@ re-enable Icache
mcr p15, 0, r9, c1, c0, 0
@ reset the ARM_IDLECT1 and ARM_IDLECT2.
strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
@ Restore EMIFF controls
str r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
str r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
@ restore regs and return
ldmfd sp!, {r0 - r12, pc}
ENTRY(omap730_cpu_suspend_sz)
.word . - omap730_cpu_suspend
#endif /* CONFIG_ARCH_OMAP730 */
#ifdef CONFIG_ARCH_OMAP15XX
ENTRY(omap1510_cpu_suspend)
@ save registers on stack
......@@ -241,7 +370,7 @@ l_1510_2:
ENTRY(omap1510_cpu_suspend_sz)
.word . - omap1510_cpu_suspend
#endif /* CONFIG_ARCH_OMAP1510 */
#endif /* CONFIG_ARCH_OMAP15XX */
#if defined(CONFIG_ARCH_OMAP16XX)
ENTRY(omap1610_cpu_suspend)
......
......@@ -20,10 +20,13 @@
#include <asm/io.h>
#include <asm/cacheflush.h>
#include "sram.h"
#include <asm/arch/sram.h>
#define OMAP1_SRAM_PA 0x20000000
#define OMAP1_SRAM_VA 0xd0000000
#define OMAP2_SRAM_PA 0x40200000
#define OMAP2_SRAM_VA 0xd0000000
#define OMAP1_SRAM_BASE 0xd0000000
#define OMAP1_SRAM_START 0x20000000
#define SRAM_BOOTLOADER_SZ 0x80
static unsigned long omap_sram_base;
......@@ -31,37 +34,40 @@ static unsigned long omap_sram_size;
static unsigned long omap_sram_ceil;
/*
* The amount of SRAM depends on the core type:
* 730 = 200K, 1510 = 512K, 5912 = 256K, 1610 = 16K, 1710 = 16K
* The amount of SRAM depends on the core type.
* Note that we cannot try to test for SRAM here because writes
* to secure SRAM will hang the system. Also the SRAM is not
* yet mapped at this point.
*/
void __init omap_detect_sram(void)
{
omap_sram_base = OMAP1_SRAM_BASE;
if (!cpu_is_omap24xx())
omap_sram_base = OMAP1_SRAM_VA;
else
omap_sram_base = OMAP2_SRAM_VA;
if (cpu_is_omap730())
omap_sram_size = 0x32000;
else if (cpu_is_omap1510())
omap_sram_size = 0x80000;
omap_sram_size = 0x32000; /* 200K */
else if (cpu_is_omap15xx())
omap_sram_size = 0x30000; /* 192K */
else if (cpu_is_omap1610() || cpu_is_omap1621() || cpu_is_omap1710())
omap_sram_size = 0x4000;
omap_sram_size = 0x4000; /* 16K */
else if (cpu_is_omap1611())
omap_sram_size = 0x3e800;
omap_sram_size = 0x3e800; /* 250K */
else if (cpu_is_omap2420())
omap_sram_size = 0xa0014; /* 640K */
else {
printk(KERN_ERR "Could not detect SRAM size\n");
omap_sram_size = 0x4000;
}
printk(KERN_INFO "SRAM size: 0x%lx\n", omap_sram_size);
omap_sram_ceil = omap_sram_base + omap_sram_size;
}
static struct map_desc omap_sram_io_desc[] __initdata = {
{ /* .length gets filled in at runtime */
.virtual = OMAP1_SRAM_BASE,
.pfn = __phys_to_pfn(OMAP1_SRAM_START),
.virtual = OMAP1_SRAM_VA,
.pfn = __phys_to_pfn(OMAP1_SRAM_PA),
.type = MT_DEVICE
}
};
......@@ -76,10 +82,19 @@ void __init omap_map_sram(void)
if (omap_sram_size == 0)
return;
if (cpu_is_omap24xx()) {
omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
omap_sram_io_desc[0].pfn = __phys_to_pfn(OMAP2_SRAM_PA);
}
omap_sram_io_desc[0].length = (omap_sram_size + PAGE_SIZE-1)/PAGE_SIZE;
omap_sram_io_desc[0].length *= PAGE_SIZE;
iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
omap_sram_io_desc[0].pfn, omap_sram_io_desc[0].virtual,
omap_sram_io_desc[0].length);
/*
* Looks like we need to preserve some bootloader code at the
* beginning of SRAM for jumping to flash for reboot to work...
......@@ -88,16 +103,6 @@ void __init omap_map_sram(void)
omap_sram_size - SRAM_BOOTLOADER_SZ);
}
static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl) = NULL;
void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
{
if (_omap_sram_reprogram_clock == NULL)
panic("Cannot use SRAM");
return _omap_sram_reprogram_clock(dpllctl, ckctl);
}
void * omap_sram_push(void * start, unsigned long size)
{
if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
......@@ -111,10 +116,94 @@ void * omap_sram_push(void * start, unsigned long size)
return (void *)omap_sram_ceil;
}
void __init omap_sram_init(void)
static void omap_sram_error(void)
{
panic("Uninitialized SRAM function\n");
}
#ifdef CONFIG_ARCH_OMAP1
static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl);
void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
{
if (!_omap_sram_reprogram_clock)
omap_sram_error();
return _omap_sram_reprogram_clock(dpllctl, ckctl);
}
int __init omap1_sram_init(void)
{
omap_detect_sram();
omap_map_sram();
_omap_sram_reprogram_clock = omap_sram_push(sram_reprogram_clock,
sram_reprogram_clock_sz);
return 0;
}
#else
#define omap1_sram_init() do {} while (0)
#endif
#ifdef CONFIG_ARCH_OMAP2
static void (*_omap2_sram_ddr_init)(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
u32 base_cs, u32 force_unlock);
void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
u32 base_cs, u32 force_unlock)
{
if (!_omap2_sram_ddr_init)
omap_sram_error();
return _omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl,
base_cs, force_unlock);
}
static void (*_omap2_sram_reprogram_sdrc)(u32 perf_level, u32 dll_val,
u32 mem_type);
void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val, u32 mem_type)
{
if (!_omap2_sram_reprogram_sdrc)
omap_sram_error();
return _omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type);
}
static u32 (*_omap2_set_prcm)(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass)
{
if (!_omap2_set_prcm)
omap_sram_error();
return _omap2_set_prcm(dpll_ctrl_val, sdrc_rfr_val, bypass);
}
int __init omap2_sram_init(void)
{
_omap2_sram_ddr_init = omap_sram_push(sram_ddr_init, sram_ddr_init_sz);
_omap2_sram_reprogram_sdrc = omap_sram_push(sram_reprogram_sdrc,
sram_reprogram_sdrc_sz);
_omap2_set_prcm = omap_sram_push(sram_set_prcm, sram_set_prcm_sz);
return 0;
}
#else
#define omap2_sram_init() do {} while (0)
#endif
int __init omap_sram_init(void)
{
omap_detect_sram();
omap_map_sram();
if (!cpu_is_omap24xx())
omap1_sram_init();
else
omap2_sram_init();
return 0;
}
/*
* linux/arch/arm/plat-omap/sram.h
*
* Interface for functions that need to be run in internal SRAM
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ARCH_ARM_OMAP_SRAM_H
#define __ARCH_ARM_OMAP_SRAM_H
extern void * omap_sram_push(void * start, unsigned long size);
extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
/* Do not use these */
extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl);
extern unsigned long sram_reprogram_clock_sz;
#endif
......@@ -91,6 +91,8 @@ EXPORT_SYMBOL(otg_set_transceiver);
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_ARCH_OMAP_OTG) || defined(CONFIG_ARCH_OMAP15XX)
static u32 __init omap_usb0_init(unsigned nwires, unsigned is_device)
{
u32 syscon1 = 0;
......@@ -271,6 +273,8 @@ static u32 __init omap_usb2_init(unsigned nwires, unsigned alt_pingroup)
return syscon1 << 24;
}
#endif
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_USB_GADGET_OMAP) || \
......@@ -494,7 +498,7 @@ static inline void omap_otg_init(struct omap_usb_config *config) {}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_ARCH_OMAP1510
#ifdef CONFIG_ARCH_OMAP15XX
#define ULPD_DPLL_CTRL_REG __REG16(ULPD_DPLL_CTRL)
#define DPLL_IOB (1 << 13)
......@@ -507,7 +511,6 @@ static inline void omap_otg_init(struct omap_usb_config *config) {}
static void __init omap_1510_usb_init(struct omap_usb_config *config)
{
int status;
unsigned int val;
omap_usb0_init(config->pins[0], is_usb0_device(config));
......@@ -539,6 +542,8 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config)
#ifdef CONFIG_USB_GADGET_OMAP
if (config->register_dev) {
int status;
udc_device.dev.platform_data = config;
status = platform_device_register(&udc_device);
if (status)
......@@ -549,6 +554,8 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config)
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
if (config->register_host) {
int status;
ohci_device.dev.platform_data = config;
status = platform_device_register(&ohci_device);
if (status)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment