Commit a695bc68 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm:
  PCMCIA: fix pxa2xx_lubbock modular build error
  [ARM] Update mach-types
  [ARM] pxa: fix no reference of cpu_is_pxa25x() in devices.c
  [ARM] pxa/cm-x300: add PWM backlight support
  revert "[ARM] pxa/cm-x300: add PWM backlight support"
  ARM: use flush_kernel_dcache_area() for dmabounce
  ARM: add size argument to __cpuc_flush_dcache_page
  ARM: 5848/1: kill flush_ioremap_region()
  ARM: cache-l2x0: make better use of background cache handling
  ARM: cache-l2x0: avoid taking spinlock for every iteration
  [ARM] Kirkwood: Add LaCie Network Space v2 support
  ARM: dove: fix the mm mmu flags of the pj4 procinfo
parents 6485536b 6665398a
...@@ -308,15 +308,11 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, ...@@ -308,15 +308,11 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
memcpy(ptr, buf->safe, size); memcpy(ptr, buf->safe, size);
/* /*
* DMA buffers must have the same cache properties * Since we may have written to a page cache page,
* as if they were really used for DMA - which means * we need to ensure that the data will be coherent
* data must be written back to RAM. Note that * with user mappings.
* we don't use dmac_flush_range() here for the
* bidirectional case because we know the cache
* lines will be coherent with the data written.
*/ */
dmac_clean_range(ptr, ptr + size); __cpuc_flush_kernel_dcache_area(ptr, size);
outer_clean_range(__pa(ptr), __pa(ptr) + size);
} }
free_safe_buffer(dev->archdata.dmabounce, buf); free_safe_buffer(dev->archdata.dmabounce, buf);
} }
......
...@@ -211,7 +211,7 @@ struct cpu_cache_fns { ...@@ -211,7 +211,7 @@ struct cpu_cache_fns {
void (*coherent_kern_range)(unsigned long, unsigned long); void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long); void (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_page)(void *); void (*flush_kern_dcache_area)(void *, size_t);
void (*dma_inv_range)(const void *, const void *); void (*dma_inv_range)(const void *, const void *);
void (*dma_clean_range)(const void *, const void *); void (*dma_clean_range)(const void *, const void *);
...@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache; ...@@ -236,7 +236,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_flush_user_range cpu_cache.flush_user_range
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
/* /*
* These are private to the dma-mapping API. Do not use directly. * These are private to the dma-mapping API. Do not use directly.
...@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache; ...@@ -255,14 +255,14 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
extern void __cpuc_coherent_user_range(unsigned long, unsigned long); extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
extern void __cpuc_flush_dcache_page(void *); extern void __cpuc_flush_dcache_area(void *, size_t);
/* /*
* These are private to the dma-mapping API. Do not use directly. * These are private to the dma-mapping API. Do not use directly.
...@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page) ...@@ -448,7 +448,7 @@ static inline void flush_kernel_dcache_page(struct page *page)
{ {
/* highmem pages are always flushed upon kunmap already */ /* highmem pages are always flushed upon kunmap already */
if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} }
#define flush_dcache_mmap_lock(mapping) \ #define flush_dcache_mmap_lock(mapping) \
...@@ -465,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page) ...@@ -465,13 +465,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
*/ */
#define flush_icache_page(vma,page) do { } while (0) #define flush_icache_page(vma,page) do { } while (0)
static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
unsigned offset, size_t size)
{
const void *start = (void __force *)virt + offset;
dmac_inv_range(start, start + size);
}
/* /*
* flush_cache_vmap() is used when creating mappings (eg, via vmap, * flush_cache_vmap() is used when creating mappings (eg, via vmap,
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
......
...@@ -52,6 +52,12 @@ config MACH_OPENRD_BASE ...@@ -52,6 +52,12 @@ config MACH_OPENRD_BASE
Say 'Y' here if you want your kernel to support the Say 'Y' here if you want your kernel to support the
Marvell OpenRD Base Board. Marvell OpenRD Base Board.
config MACH_NETSPACE_V2
bool "LaCie Network Space v2 NAS Board"
help
Say 'Y' here if you want your kernel to support the
LaCie Network Space v2 NAS.
endmenu endmenu
endif endif
...@@ -8,5 +8,6 @@ obj-$(CONFIG_MACH_SHEEVAPLUG) += sheevaplug-setup.o ...@@ -8,5 +8,6 @@ obj-$(CONFIG_MACH_SHEEVAPLUG) += sheevaplug-setup.o
obj-$(CONFIG_MACH_TS219) += ts219-setup.o tsx1x-common.o obj-$(CONFIG_MACH_TS219) += ts219-setup.o tsx1x-common.o
obj-$(CONFIG_MACH_TS41X) += ts41x-setup.o tsx1x-common.o obj-$(CONFIG_MACH_TS41X) += ts41x-setup.o tsx1x-common.o
obj-$(CONFIG_MACH_OPENRD_BASE) += openrd_base-setup.o obj-$(CONFIG_MACH_OPENRD_BASE) += openrd_base-setup.o
obj-$(CONFIG_MACH_NETSPACE_V2) += netspace_v2-setup.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o obj-$(CONFIG_CPU_IDLE) += cpuidle.o
/*
* arch/arm/mach-kirkwood/netspace_v2-setup.c
*
* LaCie Network Space v2 board setup
*
* Copyright (C) 2009 Simon Guinot <sguinot@lacie.com>
* Copyright (C) 2009 Benoît Canet <benoit.canet@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/flash.h>
#include <linux/spi/spi.h>
#include <linux/ata_platform.h>
#include <linux/mv643xx_eth.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
#include <linux/input.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/leds.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <mach/kirkwood.h>
#include <plat/time.h>
#include "common.h"
#include "mpp.h"
/*****************************************************************************
* 512KB SPI Flash on Boot Device (MACRONIX MX25L4005)
****************************************************************************/
static struct mtd_partition netspace_v2_flash_parts[] = {
{
.name = "u-boot",
.size = MTDPART_SIZ_FULL,
.offset = 0,
.mask_flags = MTD_WRITEABLE, /* force read-only */
},
};
static const struct flash_platform_data netspace_v2_flash = {
.type = "mx25l4005a",
.name = "spi_flash",
.parts = netspace_v2_flash_parts,
.nr_parts = ARRAY_SIZE(netspace_v2_flash_parts),
};
static struct spi_board_info __initdata netspace_v2_spi_slave_info[] = {
{
.modalias = "m25p80",
.platform_data = &netspace_v2_flash,
.irq = -1,
.max_speed_hz = 20000000,
.bus_num = 0,
.chip_select = 0,
},
};
/*****************************************************************************
* Ethernet
****************************************************************************/
static struct mv643xx_eth_platform_data netspace_v2_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
/*****************************************************************************
* I2C devices
****************************************************************************/
static struct at24_platform_data at24c04 = {
.byte_len = SZ_4K / 8,
.page_size = 16,
};
/*
* i2c addr | chip | description
* 0x50 | HT24LC04 | eeprom (512B)
*/
static struct i2c_board_info __initdata netspace_v2_i2c_info[] = {
{
I2C_BOARD_INFO("24c04", 0x50),
.platform_data = &at24c04,
}
};
/*****************************************************************************
* SATA
****************************************************************************/
static struct mv_sata_platform_data netspace_v2_sata_data = {
.n_ports = 2,
};
#define NETSPACE_V2_GPIO_SATA0_POWER 16
#define NETSPACE_V2_GPIO_SATA1_POWER 17
static void __init netspace_v2_sata_power_init(void)
{
int err;
err = gpio_request(NETSPACE_V2_GPIO_SATA0_POWER, "SATA0 power");
if (err == 0) {
err = gpio_direction_output(NETSPACE_V2_GPIO_SATA0_POWER, 1);
if (err)
gpio_free(NETSPACE_V2_GPIO_SATA0_POWER);
}
if (err)
pr_err("netspace_v2: failed to setup SATA0 power\n");
}
/*****************************************************************************
* GPIO keys
****************************************************************************/
#define NETSPACE_V2_PUSH_BUTTON 32
static struct gpio_keys_button netspace_v2_buttons[] = {
[0] = {
.code = KEY_POWER,
.gpio = NETSPACE_V2_PUSH_BUTTON,
.desc = "Power push button",
.active_low = 0,
},
};
static struct gpio_keys_platform_data netspace_v2_button_data = {
.buttons = netspace_v2_buttons,
.nbuttons = ARRAY_SIZE(netspace_v2_buttons),
};
static struct platform_device netspace_v2_gpio_buttons = {
.name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &netspace_v2_button_data,
},
};
/*****************************************************************************
* GPIO LEDs
****************************************************************************/
/*
* The blue front LED is wired to a CPLD and can blink in relation with the
* SATA activity.
*
* The following array detail the different LED registers and the combination
* of their possible values:
*
* cmd_led | slow_led | /SATA active | LED state
* | | |
* 1 | 0 | x | off
* - | 1 | x | on
* 0 | 0 | 1 | on
* 0 | 0 | 0 | blink (rate 300ms)
*/
#define NETSPACE_V2_GPIO_RED_LED 12
#define NETSPACE_V2_GPIO_BLUE_LED_SLOW 29
#define NETSPACE_V2_GPIO_BLUE_LED_CMD 30
static struct gpio_led netspace_v2_gpio_led_pins[] = {
{
.name = "ns_v2:red:fail",
.gpio = NETSPACE_V2_GPIO_RED_LED,
},
};
static struct gpio_led_platform_data netspace_v2_gpio_leds_data = {
.num_leds = ARRAY_SIZE(netspace_v2_gpio_led_pins),
.leds = netspace_v2_gpio_led_pins,
};
static struct platform_device netspace_v2_gpio_leds = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &netspace_v2_gpio_leds_data,
},
};
static void __init netspace_v2_gpio_leds_init(void)
{
platform_device_register(&netspace_v2_gpio_leds);
/*
* Configure the front blue LED to blink in relation with the SATA
* activity.
*/
if (gpio_request(NETSPACE_V2_GPIO_BLUE_LED_SLOW,
"SATA blue LED slow") != 0)
return;
if (gpio_direction_output(NETSPACE_V2_GPIO_BLUE_LED_SLOW, 0) != 0)
goto err_free_1;
if (gpio_request(NETSPACE_V2_GPIO_BLUE_LED_CMD,
"SATA blue LED command") != 0)
goto err_free_1;
if (gpio_direction_output(NETSPACE_V2_GPIO_BLUE_LED_CMD, 0) != 0)
goto err_free_2;
return;
err_free_2:
gpio_free(NETSPACE_V2_GPIO_BLUE_LED_CMD);
err_free_1:
gpio_free(NETSPACE_V2_GPIO_BLUE_LED_SLOW);
pr_err("netspace_v2: failed to configure SATA blue LED\n");
}
/*****************************************************************************
* Timer
****************************************************************************/
static void netspace_v2_timer_init(void)
{
kirkwood_tclk = 166666667;
orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
}
struct sys_timer netspace_v2_timer = {
.init = netspace_v2_timer_init,
};
/*****************************************************************************
* General Setup
****************************************************************************/
static unsigned int netspace_v2_mpp_config[] __initdata = {
MPP0_SPI_SCn,
MPP1_SPI_MOSI,
MPP2_SPI_SCK,
MPP3_SPI_MISO,
MPP4_NF_IO6,
MPP5_NF_IO7,
MPP6_SYSRST_OUTn,
MPP8_TW_SDA,
MPP9_TW_SCK,
MPP10_UART0_TXD,
MPP11_UART0_RXD,
MPP12_GPO, /* Red led */
MPP14_GPIO, /* USB fuse */
MPP16_GPIO, /* SATA 0 power */
MPP18_NF_IO0,
MPP19_NF_IO1,
MPP20_SATA1_ACTn,
MPP21_SATA0_ACTn,
MPP24_GPIO, /* USB mode select */
MPP25_GPIO, /* Fan rotation fail */
MPP26_GPIO, /* USB device vbus */
MPP28_GPIO, /* USB enable host vbus */
MPP29_GPIO, /* Blue led (slow register) */
MPP30_GPIO, /* Blue led (command register) */
MPP31_GPIO, /* Board power off */
MPP32_GPIO, /* Power button (0 = Released, 1 = Pushed) */
0
};
#define NETSPACE_V2_GPIO_POWER_OFF 31
static void netspace_v2_power_off(void)
{
gpio_set_value(NETSPACE_V2_GPIO_POWER_OFF, 1);
}
static void __init netspace_v2_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
kirkwood_init();
kirkwood_mpp_conf(netspace_v2_mpp_config);
netspace_v2_sata_power_init();
kirkwood_ehci_init();
kirkwood_ge00_init(&netspace_v2_ge00_data);
kirkwood_sata_init(&netspace_v2_sata_data);
kirkwood_uart0_init();
spi_register_board_info(netspace_v2_spi_slave_info,
ARRAY_SIZE(netspace_v2_spi_slave_info));
kirkwood_spi_init();
kirkwood_i2c_init();
i2c_register_board_info(0, netspace_v2_i2c_info,
ARRAY_SIZE(netspace_v2_i2c_info));
netspace_v2_gpio_leds_init();
platform_device_register(&netspace_v2_gpio_buttons);
if (gpio_request(NETSPACE_V2_GPIO_POWER_OFF, "power-off") == 0 &&
gpio_direction_output(NETSPACE_V2_GPIO_POWER_OFF, 0) == 0)
pm_power_off = netspace_v2_power_off;
else
pr_err("netspace_v2: failed to configure power-off GPIO\n");
}
MACHINE_START(NETSPACE_V2, "LaCie Network Space v2")
.phys_io = KIRKWOOD_REGS_PHYS_BASE,
.io_pg_offst = ((KIRKWOOD_REGS_VIRT_BASE) >> 18) & 0xfffc,
.boot_params = 0x00000100,
.init_machine = netspace_v2_init,
.map_io = kirkwood_map_io,
.init_irq = kirkwood_init_irq,
.timer = &netspace_v2_timer,
MACHINE_END
...@@ -110,6 +110,8 @@ config MACH_CM_X300 ...@@ -110,6 +110,8 @@ config MACH_CM_X300
bool "CompuLab CM-X300 modules" bool "CompuLab CM-X300 modules"
select PXA3xx select PXA3xx
select CPU_PXA300 select CPU_PXA300
select CPU_PXA310
select HAVE_PWM
config ARCH_GUMSTIX config ARCH_GUMSTIX
bool "Gumstix XScale 255 boards" bool "Gumstix XScale 255 boards"
...@@ -240,7 +242,6 @@ config MACH_COLIBRI300 ...@@ -240,7 +242,6 @@ config MACH_COLIBRI300
select PXA3xx select PXA3xx
select CPU_PXA300 select CPU_PXA300
select CPU_PXA310 select CPU_PXA310
select HAVE_PWM
config MACH_COLIBRI320 config MACH_COLIBRI320
bool "Toradex Colibri PXA320" bool "Toradex Colibri PXA320"
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <mach/hardware.h>
#include <mach/udc.h> #include <mach/udc.h>
#include <mach/pxafb.h> #include <mach/pxafb.h>
#include <mach/mmc.h> #include <mach/mmc.h>
...@@ -14,6 +13,7 @@ ...@@ -14,6 +13,7 @@
#include <mach/pxa2xx_spi.h> #include <mach/pxa2xx_spi.h>
#include <mach/camera.h> #include <mach/camera.h>
#include <mach/audio.h> #include <mach/audio.h>
#include <mach/hardware.h>
#include <plat/i2c.h> #include <plat/i2c.h>
#include <plat/pxa3xx_nand.h> #include <plat/pxa3xx_nand.h>
......
...@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range) ...@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(kaddr) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure that the data held in the page kaddr is written back * Ensure that the data held in the page kaddr is written back
* to the page in question. * to the page in question.
* *
* - kaddr - kernel address (guaranteed to be page aligned) * - addr - kernel address
* - size - size of region
*/ */
ENTRY(fa_flush_kern_dcache_page) ENTRY(fa_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns) ...@@ -213,7 +214,7 @@ ENTRY(fa_cache_fns)
.long fa_flush_user_cache_range .long fa_flush_user_cache_range
.long fa_coherent_kern_range .long fa_coherent_kern_range
.long fa_coherent_user_range .long fa_coherent_user_range
.long fa_flush_kern_dcache_page .long fa_flush_kern_dcache_area
.long fa_dma_inv_range .long fa_dma_inv_range
.long fa_dma_clean_range .long fa_dma_clean_range
.long fa_dma_flush_range .long fa_dma_flush_range
......
...@@ -28,69 +28,120 @@ ...@@ -28,69 +28,120 @@
static void __iomem *l2x0_base; static void __iomem *l2x0_base;
static DEFINE_SPINLOCK(l2x0_lock); static DEFINE_SPINLOCK(l2x0_lock);
static inline void sync_writel(unsigned long val, unsigned long reg, static inline void cache_wait(void __iomem *reg, unsigned long mask)
unsigned long complete_mask)
{ {
unsigned long flags;
spin_lock_irqsave(&l2x0_lock, flags);
writel(val, l2x0_base + reg);
/* wait for the operation to complete */ /* wait for the operation to complete */
while (readl(l2x0_base + reg) & complete_mask) while (readl(reg) & mask)
; ;
spin_unlock_irqrestore(&l2x0_lock, flags);
} }
static inline void cache_sync(void) static inline void cache_sync(void)
{ {
sync_writel(0, L2X0_CACHE_SYNC, 1); void __iomem *base = l2x0_base;
writel(0, base + L2X0_CACHE_SYNC);
cache_wait(base + L2X0_CACHE_SYNC, 1);
} }
static inline void l2x0_inv_all(void) static inline void l2x0_inv_all(void)
{ {
unsigned long flags;
/* invalidate all ways */ /* invalidate all ways */
sync_writel(0xff, L2X0_INV_WAY, 0xff); spin_lock_irqsave(&l2x0_lock, flags);
writel(0xff, l2x0_base + L2X0_INV_WAY);
cache_wait(l2x0_base + L2X0_INV_WAY, 0xff);
cache_sync(); cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
} }
static void l2x0_inv_range(unsigned long start, unsigned long end) static void l2x0_inv_range(unsigned long start, unsigned long end)
{ {
unsigned long addr; void __iomem *base = l2x0_base;
unsigned long flags;
spin_lock_irqsave(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) { if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1); start &= ~(CACHE_LINE_SIZE - 1);
sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE; start += CACHE_LINE_SIZE;
} }
if (end & (CACHE_LINE_SIZE - 1)) { if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1); end &= ~(CACHE_LINE_SIZE - 1);
sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel(end, base + L2X0_CLEAN_INV_LINE_PA);
} }
for (addr = start; addr < end; addr += CACHE_LINE_SIZE) while (start < end) {
sync_writel(addr, L2X0_INV_LINE_PA, 1); unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
cache_wait(base + L2X0_INV_LINE_PA, 1);
writel(start, base + L2X0_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_INV_LINE_PA, 1);
cache_sync(); cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
} }
static void l2x0_clean_range(unsigned long start, unsigned long end) static void l2x0_clean_range(unsigned long start, unsigned long end)
{ {
unsigned long addr; void __iomem *base = l2x0_base;
unsigned long flags;
spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1); start &= ~(CACHE_LINE_SIZE - 1);
for (addr = start; addr < end; addr += CACHE_LINE_SIZE) while (start < end) {
sync_writel(addr, L2X0_CLEAN_LINE_PA, 1); unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
writel(start, base + L2X0_CLEAN_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
cache_sync(); cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
} }
static void l2x0_flush_range(unsigned long start, unsigned long end) static void l2x0_flush_range(unsigned long start, unsigned long end)
{ {
unsigned long addr; void __iomem *base = l2x0_base;
unsigned long flags;
spin_lock_irqsave(&l2x0_lock, flags);
start &= ~(CACHE_LINE_SIZE - 1); start &= ~(CACHE_LINE_SIZE - 1);
for (addr = start; addr < end; addr += CACHE_LINE_SIZE) while (start < end) {
sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1); unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
spin_lock_irqsave(&l2x0_lock, flags);
}
}
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
cache_sync(); cache_sync();
spin_unlock_irqrestore(&l2x0_lock, flags);
} }
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
......
...@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range) ...@@ -72,14 +72,15 @@ ENTRY(v3_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *page, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(v3_flush_kern_dcache_page) ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */ /* FALLTHROUGH */
/* /*
...@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns) ...@@ -129,7 +130,7 @@ ENTRY(v3_cache_fns)
.long v3_flush_user_cache_range .long v3_flush_user_cache_range
.long v3_coherent_kern_range .long v3_coherent_kern_range
.long v3_coherent_user_range .long v3_coherent_user_range
.long v3_flush_kern_dcache_page .long v3_flush_kern_dcache_area
.long v3_dma_inv_range .long v3_dma_inv_range
.long v3_dma_clean_range .long v3_dma_clean_range
.long v3_dma_flush_range .long v3_dma_flush_range
......
...@@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range) ...@@ -82,14 +82,15 @@ ENTRY(v4_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(v4_flush_kern_dcache_page) ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */ /* FALLTHROUGH */
/* /*
...@@ -141,7 +142,7 @@ ENTRY(v4_cache_fns) ...@@ -141,7 +142,7 @@ ENTRY(v4_cache_fns)
.long v4_flush_user_cache_range .long v4_flush_user_cache_range
.long v4_coherent_kern_range .long v4_coherent_kern_range
.long v4_coherent_user_range .long v4_coherent_user_range
.long v4_flush_kern_dcache_page .long v4_flush_kern_dcache_area
.long v4_dma_inv_range .long v4_dma_inv_range
.long v4_dma_clean_range .long v4_dma_clean_range
.long v4_dma_flush_range .long v4_dma_flush_range
......
...@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range) ...@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(v4wb_flush_kern_dcache_page) ENTRY(v4wb_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
/* fall through */ /* fall through */
/* /*
...@@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns) ...@@ -224,7 +225,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_flush_user_cache_range .long v4wb_flush_user_cache_range
.long v4wb_coherent_kern_range .long v4wb_coherent_kern_range
.long v4wb_coherent_user_range .long v4wb_coherent_user_range
.long v4wb_flush_kern_dcache_page .long v4wb_flush_kern_dcache_area
.long v4wb_dma_inv_range .long v4wb_dma_inv_range
.long v4wb_dma_clean_range .long v4wb_dma_clean_range
.long v4wb_dma_flush_range .long v4wb_dma_flush_range
......
...@@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range) ...@@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(v4wt_flush_kern_dcache_page) ENTRY(v4wt_flush_kern_dcache_area)
mov r2, #0 mov r2, #0
mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
add r1, r0, #PAGE_SZ add r1, r0, r1
/* fallthrough */ /* fallthrough */
/* /*
...@@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns) ...@@ -180,7 +181,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_flush_user_cache_range .long v4wt_flush_user_cache_range
.long v4wt_coherent_kern_range .long v4wt_coherent_kern_range
.long v4wt_coherent_user_range .long v4wt_coherent_user_range
.long v4wt_flush_kern_dcache_page .long v4wt_flush_kern_dcache_area
.long v4wt_dma_inv_range .long v4wt_dma_inv_range
.long v4wt_dma_clean_range .long v4wt_dma_clean_range
.long v4wt_dma_flush_range .long v4wt_dma_flush_range
......
...@@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range) ...@@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range)
ENDPROC(v6_coherent_kern_range) ENDPROC(v6_coherent_kern_range)
/* /*
* v6_flush_kern_dcache_page(kaddr) * v6_flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure that the data held in the page kaddr is written back * Ensure that the data held in the page kaddr is written back
* to the page in question. * to the page in question.
* *
* - kaddr - kernel address (guaranteed to be page aligned) * - addr - kernel address
* - size - region size
*/ */
ENTRY(v6_flush_kern_dcache_page) ENTRY(v6_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: 1:
#ifdef HARVARD_CACHE #ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
...@@ -271,7 +272,7 @@ ENTRY(v6_cache_fns) ...@@ -271,7 +272,7 @@ ENTRY(v6_cache_fns)
.long v6_flush_user_cache_range .long v6_flush_user_cache_range
.long v6_coherent_kern_range .long v6_coherent_kern_range
.long v6_coherent_user_range .long v6_coherent_user_range
.long v6_flush_kern_dcache_page .long v6_flush_kern_dcache_area
.long v6_dma_inv_range .long v6_dma_inv_range
.long v6_dma_clean_range .long v6_dma_clean_range
.long v6_dma_flush_range .long v6_dma_flush_range
......
...@@ -186,16 +186,17 @@ ENDPROC(v7_coherent_kern_range) ...@@ -186,16 +186,17 @@ ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range) ENDPROC(v7_coherent_user_range)
/* /*
* v7_flush_kern_dcache_page(kaddr) * v7_flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure that the data held in the page kaddr is written back * Ensure that the data held in the page kaddr is written back
* to the page in question. * to the page in question.
* *
* - kaddr - kernel address (guaranteed to be page aligned) * - addr - kernel address
* - size - region size
*/ */
ENTRY(v7_flush_kern_dcache_page) ENTRY(v7_flush_kern_dcache_area)
dcache_line_size r2, r3 dcache_line_size r2, r3
add r1, r0, #PAGE_SZ add r1, r0, r1
1: 1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2 add r0, r0, r2
...@@ -203,7 +204,7 @@ ENTRY(v7_flush_kern_dcache_page) ...@@ -203,7 +204,7 @@ ENTRY(v7_flush_kern_dcache_page)
blo 1b blo 1b
dsb dsb
mov pc, lr mov pc, lr
ENDPROC(v7_flush_kern_dcache_page) ENDPROC(v7_flush_kern_dcache_area)
/* /*
* v7_dma_inv_range(start,end) * v7_dma_inv_range(start,end)
...@@ -279,7 +280,7 @@ ENTRY(v7_cache_fns) ...@@ -279,7 +280,7 @@ ENTRY(v7_cache_fns)
.long v7_flush_user_cache_range .long v7_flush_user_cache_range
.long v7_coherent_kern_range .long v7_coherent_kern_range
.long v7_coherent_user_range .long v7_coherent_user_range
.long v7_flush_kern_dcache_page .long v7_flush_kern_dcache_area
.long v7_dma_inv_range .long v7_dma_inv_range
.long v7_dma_clean_range .long v7_dma_clean_range
.long v7_dma_flush_range .long v7_dma_flush_range
......
...@@ -131,7 +131,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) ...@@ -131,7 +131,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/ */
if (addr) if (addr)
#endif #endif
__cpuc_flush_dcache_page(addr); __cpuc_flush_dcache_area(addr, PAGE_SIZE);
/* /*
* If this is a page cache page, and we have an aliasing VIPT cache, * If this is a page cache page, and we have an aliasing VIPT cache,
...@@ -258,5 +258,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l ...@@ -258,5 +258,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
* in this mapping of the page. FIXME: this is overkill * in this mapping of the page. FIXME: this is overkill
* since we actually ask for a write-back and invalidate. * since we actually ask for a write-back and invalidate.
*/ */
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} }
...@@ -79,7 +79,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type) ...@@ -79,7 +79,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
if (kvaddr >= (void *)FIXADDR_START) { if (kvaddr >= (void *)FIXADDR_START) {
__cpuc_flush_dcache_page((void *)vaddr); __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
......
...@@ -61,7 +61,7 @@ void setup_mm_for_reboot(char mode) ...@@ -61,7 +61,7 @@ void setup_mm_for_reboot(char mode)
void flush_dcache_page(struct page *page) void flush_dcache_page(struct page *page)
{ {
__cpuc_flush_dcache_page(page_address(page)); __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} }
EXPORT_SYMBOL(flush_dcache_page); EXPORT_SYMBOL(flush_dcache_page);
......
...@@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range) ...@@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - page - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm1020_flush_kern_dcache_page) ENTRY(arm1020_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
mcr p15, 0, ip, c7, c10, 4 @ drain WB mcr p15, 0, ip, c7, c10, 4 @ drain WB
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
...@@ -335,7 +336,7 @@ ENTRY(arm1020_cache_fns) ...@@ -335,7 +336,7 @@ ENTRY(arm1020_cache_fns)
.long arm1020_flush_user_cache_range .long arm1020_flush_user_cache_range
.long arm1020_coherent_kern_range .long arm1020_coherent_kern_range
.long arm1020_coherent_user_range .long arm1020_coherent_user_range
.long arm1020_flush_kern_dcache_page .long arm1020_flush_kern_dcache_area
.long arm1020_dma_inv_range .long arm1020_dma_inv_range
.long arm1020_dma_clean_range .long arm1020_dma_clean_range
.long arm1020_dma_flush_range .long arm1020_dma_flush_range
......
...@@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range) ...@@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - page - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm1020e_flush_kern_dcache_page) ENTRY(arm1020e_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -321,7 +322,7 @@ ENTRY(arm1020e_cache_fns) ...@@ -321,7 +322,7 @@ ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_user_cache_range .long arm1020e_flush_user_cache_range
.long arm1020e_coherent_kern_range .long arm1020e_coherent_kern_range
.long arm1020e_coherent_user_range .long arm1020e_coherent_user_range
.long arm1020e_flush_kern_dcache_page .long arm1020e_flush_kern_dcache_area
.long arm1020e_dma_inv_range .long arm1020e_dma_inv_range
.long arm1020e_dma_clean_range .long arm1020e_dma_clean_range
.long arm1020e_dma_flush_range .long arm1020e_dma_flush_range
......
...@@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range) ...@@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - page - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm1022_flush_kern_dcache_page) ENTRY(arm1022_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -310,7 +311,7 @@ ENTRY(arm1022_cache_fns) ...@@ -310,7 +311,7 @@ ENTRY(arm1022_cache_fns)
.long arm1022_flush_user_cache_range .long arm1022_flush_user_cache_range
.long arm1022_coherent_kern_range .long arm1022_coherent_kern_range
.long arm1022_coherent_user_range .long arm1022_coherent_user_range
.long arm1022_flush_kern_dcache_page .long arm1022_flush_kern_dcache_area
.long arm1022_dma_inv_range .long arm1022_dma_inv_range
.long arm1022_dma_clean_range .long arm1022_dma_clean_range
.long arm1022_dma_flush_range .long arm1022_dma_flush_range
......
...@@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range) ...@@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - page - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm1026_flush_kern_dcache_page) ENTRY(arm1026_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE #ifndef CONFIG_CPU_DCACHE_DISABLE
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -304,7 +305,7 @@ ENTRY(arm1026_cache_fns) ...@@ -304,7 +305,7 @@ ENTRY(arm1026_cache_fns)
.long arm1026_flush_user_cache_range .long arm1026_flush_user_cache_range
.long arm1026_coherent_kern_range .long arm1026_coherent_kern_range
.long arm1026_coherent_user_range .long arm1026_coherent_user_range
.long arm1026_flush_kern_dcache_page .long arm1026_flush_kern_dcache_area
.long arm1026_dma_inv_range .long arm1026_dma_inv_range
.long arm1026_dma_clean_range .long arm1026_dma_clean_range
.long arm1026_dma_flush_range .long arm1026_dma_flush_range
......
...@@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range) ...@@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm920_flush_kern_dcache_page) ENTRY(arm920_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -293,7 +294,7 @@ ENTRY(arm920_cache_fns) ...@@ -293,7 +294,7 @@ ENTRY(arm920_cache_fns)
.long arm920_flush_user_cache_range .long arm920_flush_user_cache_range
.long arm920_coherent_kern_range .long arm920_coherent_kern_range
.long arm920_coherent_user_range .long arm920_coherent_user_range
.long arm920_flush_kern_dcache_page .long arm920_flush_kern_dcache_area
.long arm920_dma_inv_range .long arm920_dma_inv_range
.long arm920_dma_clean_range .long arm920_dma_clean_range
.long arm920_dma_flush_range .long arm920_dma_flush_range
......
...@@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range) ...@@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm922_flush_kern_dcache_page) ENTRY(arm922_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -295,7 +296,7 @@ ENTRY(arm922_cache_fns) ...@@ -295,7 +296,7 @@ ENTRY(arm922_cache_fns)
.long arm922_flush_user_cache_range .long arm922_flush_user_cache_range
.long arm922_coherent_kern_range .long arm922_coherent_kern_range
.long arm922_coherent_user_range .long arm922_coherent_user_range
.long arm922_flush_kern_dcache_page .long arm922_flush_kern_dcache_area
.long arm922_dma_inv_range .long arm922_dma_inv_range
.long arm922_dma_clean_range .long arm922_dma_clean_range
.long arm922_dma_flush_range .long arm922_dma_flush_range
......
...@@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range) ...@@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm925_flush_kern_dcache_page) ENTRY(arm925_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -346,7 +347,7 @@ ENTRY(arm925_cache_fns) ...@@ -346,7 +347,7 @@ ENTRY(arm925_cache_fns)
.long arm925_flush_user_cache_range .long arm925_flush_user_cache_range
.long arm925_coherent_kern_range .long arm925_coherent_kern_range
.long arm925_coherent_user_range .long arm925_coherent_user_range
.long arm925_flush_kern_dcache_page .long arm925_flush_kern_dcache_area
.long arm925_dma_inv_range .long arm925_dma_inv_range
.long arm925_dma_clean_range .long arm925_dma_clean_range
.long arm925_dma_flush_range .long arm925_dma_flush_range
......
...@@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range) ...@@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm926_flush_kern_dcache_page) ENTRY(arm926_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -309,7 +310,7 @@ ENTRY(arm926_cache_fns) ...@@ -309,7 +310,7 @@ ENTRY(arm926_cache_fns)
.long arm926_flush_user_cache_range .long arm926_flush_user_cache_range
.long arm926_coherent_kern_range .long arm926_coherent_kern_range
.long arm926_coherent_user_range .long arm926_coherent_user_range
.long arm926_flush_kern_dcache_page .long arm926_flush_kern_dcache_area
.long arm926_dma_inv_range .long arm926_dma_inv_range
.long arm926_dma_clean_range .long arm926_dma_clean_range
.long arm926_dma_flush_range .long arm926_dma_flush_range
......
...@@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range) ...@@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range)
/* FALLTHROUGH */ /* FALLTHROUGH */
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(arm940_flush_kern_dcache_page) ENTRY(arm940_flush_kern_dcache_area)
mov ip, #0 mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
...@@ -238,7 +239,7 @@ ENTRY(arm940_cache_fns) ...@@ -238,7 +239,7 @@ ENTRY(arm940_cache_fns)
.long arm940_flush_user_cache_range .long arm940_flush_user_cache_range
.long arm940_coherent_kern_range .long arm940_coherent_kern_range
.long arm940_coherent_user_range .long arm940_coherent_user_range
.long arm940_flush_kern_dcache_page .long arm940_flush_kern_dcache_area
.long arm940_dma_inv_range .long arm940_dma_inv_range
.long arm940_dma_clean_range .long arm940_dma_clean_range
.long arm940_dma_flush_range .long arm940_dma_flush_range
......
...@@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range) ...@@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
* (same as arm926) * (same as arm926)
*/ */
ENTRY(arm946_flush_kern_dcache_page) ENTRY(arm946_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -280,7 +281,7 @@ ENTRY(arm946_cache_fns) ...@@ -280,7 +281,7 @@ ENTRY(arm946_cache_fns)
.long arm946_flush_user_cache_range .long arm946_flush_user_cache_range
.long arm946_coherent_kern_range .long arm946_coherent_kern_range
.long arm946_coherent_user_range .long arm946_coherent_user_range
.long arm946_flush_kern_dcache_page .long arm946_flush_kern_dcache_area
.long arm946_dma_inv_range .long arm946_dma_inv_range
.long arm946_dma_clean_range .long arm946_dma_clean_range
.long arm946_dma_flush_range .long arm946_dma_flush_range
......
...@@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range) ...@@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
.align 5 .align 5
ENTRY(feroceon_flush_kern_dcache_page) ENTRY(feroceon_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page) ...@@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page)
mov pc, lr mov pc, lr
.align 5 .align 5
ENTRY(feroceon_range_flush_kern_dcache_page) ENTRY(feroceon_range_flush_kern_dcache_area)
mrs r2, cpsr mrs r2, cpsr
add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
orr r3, r2, #PSR_I_BIT orr r3, r2, #PSR_I_BIT
...@@ -372,7 +373,7 @@ ENTRY(feroceon_cache_fns) ...@@ -372,7 +373,7 @@ ENTRY(feroceon_cache_fns)
.long feroceon_flush_user_cache_range .long feroceon_flush_user_cache_range
.long feroceon_coherent_kern_range .long feroceon_coherent_kern_range
.long feroceon_coherent_user_range .long feroceon_coherent_user_range
.long feroceon_flush_kern_dcache_page .long feroceon_flush_kern_dcache_area
.long feroceon_dma_inv_range .long feroceon_dma_inv_range
.long feroceon_dma_clean_range .long feroceon_dma_clean_range
.long feroceon_dma_flush_range .long feroceon_dma_flush_range
...@@ -383,7 +384,7 @@ ENTRY(feroceon_range_cache_fns) ...@@ -383,7 +384,7 @@ ENTRY(feroceon_range_cache_fns)
.long feroceon_flush_user_cache_range .long feroceon_flush_user_cache_range
.long feroceon_coherent_kern_range .long feroceon_coherent_kern_range
.long feroceon_coherent_user_range .long feroceon_coherent_user_range
.long feroceon_range_flush_kern_dcache_page .long feroceon_range_flush_kern_dcache_area
.long feroceon_range_dma_inv_range .long feroceon_range_dma_inv_range
.long feroceon_range_dma_clean_range .long feroceon_range_dma_clean_range
.long feroceon_range_dma_flush_range .long feroceon_range_dma_flush_range
......
...@@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range) ...@@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(mohawk_flush_kern_dcache_page) ENTRY(mohawk_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE add r0, r0, #CACHE_DLINESIZE
cmp r0, r1 cmp r0, r1
...@@ -273,7 +274,7 @@ ENTRY(mohawk_cache_fns) ...@@ -273,7 +274,7 @@ ENTRY(mohawk_cache_fns)
.long mohawk_flush_user_cache_range .long mohawk_flush_user_cache_range
.long mohawk_coherent_kern_range .long mohawk_coherent_kern_range
.long mohawk_coherent_user_range .long mohawk_coherent_user_range
.long mohawk_flush_kern_dcache_page .long mohawk_flush_kern_dcache_area
.long mohawk_dma_inv_range .long mohawk_dma_inv_range
.long mohawk_dma_clean_range .long mohawk_dma_clean_range
.long mohawk_dma_flush_range .long mohawk_dma_flush_range
......
...@@ -27,8 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all); ...@@ -27,8 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all);
EXPORT_SYMBOL(__cpuc_flush_user_all); EXPORT_SYMBOL(__cpuc_flush_user_all);
EXPORT_SYMBOL(__cpuc_flush_user_range); EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range); EXPORT_SYMBOL(__cpuc_coherent_kern_range);
EXPORT_SYMBOL(__cpuc_flush_dcache_page); EXPORT_SYMBOL(__cpuc_flush_dcache_area);
EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */
#else #else
EXPORT_SYMBOL(cpu_cache); EXPORT_SYMBOL(cpu_cache);
#endif #endif
......
...@@ -254,10 +254,9 @@ __pj4_v6_proc_info: ...@@ -254,10 +254,9 @@ __pj4_v6_proc_info:
.long 0x560f5810 .long 0x560f5810
.long 0xff0ffff0 .long 0xff0ffff0
.long PMD_TYPE_SECT | \ .long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ PMD_SECT_AP_READ | \
PMD_FLAGS
.long PMD_TYPE_SECT | \ .long PMD_TYPE_SECT | \
PMD_SECT_XN | \ PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \ PMD_SECT_AP_WRITE | \
......
...@@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range) ...@@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache. * the I cache.
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(xsc3_flush_kern_dcache_page) ENTRY(xsc3_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
cmp r0, r1 cmp r0, r1
...@@ -309,7 +310,7 @@ ENTRY(xsc3_cache_fns) ...@@ -309,7 +310,7 @@ ENTRY(xsc3_cache_fns)
.long xsc3_flush_user_cache_range .long xsc3_flush_user_cache_range
.long xsc3_coherent_kern_range .long xsc3_coherent_kern_range
.long xsc3_coherent_user_range .long xsc3_coherent_user_range
.long xsc3_flush_kern_dcache_page .long xsc3_flush_kern_dcache_area
.long xsc3_dma_inv_range .long xsc3_dma_inv_range
.long xsc3_dma_clean_range .long xsc3_dma_clean_range
.long xsc3_dma_flush_range .long xsc3_dma_flush_range
......
...@@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range) ...@@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range)
mov pc, lr mov pc, lr
/* /*
* flush_kern_dcache_page(void *page) * flush_kern_dcache_area(void *addr, size_t size)
* *
* Ensure no D cache aliasing occurs, either with itself or * Ensure no D cache aliasing occurs, either with itself or
* the I cache * the I cache
* *
* - addr - page aligned address * - addr - kernel address
* - size - region size
*/ */
ENTRY(xscale_flush_kern_dcache_page) ENTRY(xscale_flush_kern_dcache_area)
add r1, r0, #PAGE_SZ add r1, r0, r1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHELINESIZE add r0, r0, #CACHELINESIZE
...@@ -368,7 +369,7 @@ ENTRY(xscale_cache_fns) ...@@ -368,7 +369,7 @@ ENTRY(xscale_cache_fns)
.long xscale_flush_user_cache_range .long xscale_flush_user_cache_range
.long xscale_coherent_kern_range .long xscale_coherent_kern_range
.long xscale_coherent_user_range .long xscale_coherent_user_range
.long xscale_flush_kern_dcache_page .long xscale_flush_kern_dcache_area
.long xscale_dma_inv_range .long xscale_dma_inv_range
.long xscale_dma_clean_range .long xscale_dma_clean_range
.long xscale_dma_flush_range .long xscale_dma_flush_range
...@@ -392,7 +393,7 @@ ENTRY(xscale_80200_A0_A1_cache_fns) ...@@ -392,7 +393,7 @@ ENTRY(xscale_80200_A0_A1_cache_fns)
.long xscale_flush_user_cache_range .long xscale_flush_user_cache_range
.long xscale_coherent_kern_range .long xscale_coherent_kern_range
.long xscale_coherent_user_range .long xscale_coherent_user_range
.long xscale_flush_kern_dcache_page .long xscale_flush_kern_dcache_area
.long xscale_dma_flush_range .long xscale_dma_flush_range
.long xscale_dma_clean_range .long xscale_dma_clean_range
.long xscale_dma_flush_range .long xscale_dma_flush_range
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
# #
# http://www.arm.linux.org.uk/developer/machines/?action=new # http://www.arm.linux.org.uk/developer/machines/?action=new
# #
# Last update: Wed Nov 25 22:14:58 2009 # Last update: Wed Dec 16 20:06:34 2009
# #
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
# #
...@@ -1776,6 +1776,7 @@ cybook3 MACH_CYBOOK3 CYBOOK3 1784 ...@@ -1776,6 +1776,7 @@ cybook3 MACH_CYBOOK3 CYBOOK3 1784
wdg002 MACH_WDG002 WDG002 1785 wdg002 MACH_WDG002 WDG002 1785
sg560adsl MACH_SG560ADSL SG560ADSL 1786 sg560adsl MACH_SG560ADSL SG560ADSL 1786
nextio_n2800_ica MACH_NEXTIO_N2800_ICA NEXTIO_N2800_ICA 1787 nextio_n2800_ica MACH_NEXTIO_N2800_ICA NEXTIO_N2800_ICA 1787
dove_db MACH_DOVE_DB DOVE_DB 1788
marvell_newdb MACH_MARVELL_NEWDB MARVELL_NEWDB 1789 marvell_newdb MACH_MARVELL_NEWDB MARVELL_NEWDB 1789
vandihud MACH_VANDIHUD VANDIHUD 1790 vandihud MACH_VANDIHUD VANDIHUD 1790
magx_e8 MACH_MAGX_E8 MAGX_E8 1791 magx_e8 MACH_MAGX_E8 MAGX_E8 1791
...@@ -2536,3 +2537,44 @@ c3ax03 MACH_C3AX03 C3AX03 2549 ...@@ -2536,3 +2537,44 @@ c3ax03 MACH_C3AX03 C3AX03 2549
mxt_td60 MACH_MXT_TD60 MXT_TD60 2550 mxt_td60 MACH_MXT_TD60 MXT_TD60 2550
esyx MACH_ESYX ESYX 2551 esyx MACH_ESYX ESYX 2551
bulldog MACH_BULLDOG BULLDOG 2553 bulldog MACH_BULLDOG BULLDOG 2553
derell_me2000 MACH_DERELL_ME2000 DERELL_ME2000 2554
bcmring_base MACH_BCMRING_BASE BCMRING_BASE 2555
bcmring_evm MACH_BCMRING_EVM BCMRING_EVM 2556
bcmring_evm_jazz MACH_BCMRING_EVM_JAZZ BCMRING_EVM_JAZZ 2557
bcmring_sp MACH_BCMRING_SP BCMRING_SP 2558
bcmring_sv MACH_BCMRING_SV BCMRING_SV 2559
bcmring_sv_jazz MACH_BCMRING_SV_JAZZ BCMRING_SV_JAZZ 2560
bcmring_tablet MACH_BCMRING_TABLET BCMRING_TABLET 2561
bcmring_vp MACH_BCMRING_VP BCMRING_VP 2562
bcmring_evm_seikor MACH_BCMRING_EVM_SEIKOR BCMRING_EVM_SEIKOR 2563
bcmring_sp_wqvga MACH_BCMRING_SP_WQVGA BCMRING_SP_WQVGA 2564
bcmring_custom MACH_BCMRING_CUSTOM BCMRING_CUSTOM 2565
acer_s200 MACH_ACER_S200 ACER_S200 2566
bt270 MACH_BT270 BT270 2567
iseo MACH_ISEO ISEO 2568
cezanne MACH_CEZANNE CEZANNE 2569
lucca MACH_LUCCA LUCCA 2570
supersmart MACH_SUPERSMART SUPERSMART 2571
magnolia2 MACH_MAGNOLIA2 MAGNOLIA2 2573
emxx MACH_EMXX EMXX 2574
outlaw MACH_OUTLAW OUTLAW 2575
riot_bei2 MACH_RIOT_BEI2 RIOT_BEI2 2576
riot_vox MACH_RIOT_VOX RIOT_VOX 2577
riot_x37 MACH_RIOT_X37 RIOT_X37 2578
mega25mx MACH_MEGA25MX MEGA25MX 2579
benzina2 MACH_BENZINA2 BENZINA2 2580
ignite MACH_IGNITE IGNITE 2581
foggia MACH_FOGGIA FOGGIA 2582
arezzo MACH_AREZZO AREZZO 2583
leica_skywalker MACH_LEICA_SKYWALKER LEICA_SKYWALKER 2584
jacinto2_jamr MACH_JACINTO2_JAMR JACINTO2_JAMR 2585
gts_nova MACH_GTS_NOVA GTS_NOVA 2586
p3600 MACH_P3600 P3600 2587
dlt2 MACH_DLT2 DLT2 2588
df3120 MACH_DF3120 DF3120 2589
ecucore_9g20 MACH_ECUCORE_9G20 ECUCORE_9G20 2590
nautel_lpc3240 MACH_NAUTEL_LPC3240 NAUTEL_LPC3240 2591
glacier MACH_GLACIER GLACIER 2592
phrazer_bulldog MACH_PHRAZER_BULLDOG PHRAZER_BULLDOG 2593
omap3_bulldog MACH_OMAP3_BULLDOG OMAP3_BULLDOG 2594
pca101 MACH_PCA101 PCA101 2595
...@@ -20,14 +20,23 @@ ...@@ -20,14 +20,23 @@
#include <asm/io.h> #include <asm/io.h>
#include <mach/hardware.h> #include <mach/hardware.h>
#include <asm/cacheflush.h>
#include <asm/mach/flash.h> #include <asm/mach/flash.h>
#define CACHELINESIZE 32
static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from, static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from,
ssize_t len) ssize_t len)
{ {
flush_ioremap_region(map->phys, map->cached, from, len); unsigned long start = (unsigned long)map->cached + from;
unsigned long end = start + len;
start &= ~(CACHELINESIZE - 1);
while (start < end) {
/* invalidate D cache line */
asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start));
start += CACHELINESIZE;
}
} }
struct pxa2xx_flash_info { struct pxa2xx_flash_info {
......
...@@ -291,7 +291,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev) ...@@ -291,7 +291,7 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
skt->nr = ops->first + i; skt->nr = ops->first + i;
skt->ops = ops; skt->ops = ops;
skt->socket.owner = ops->owner; skt->socket.owner = ops->owner;
skt->socket.dev.parent = dev; skt->socket.dev.parent = &dev->dev;
skt->socket.pci_irq = NO_IRQ; skt->socket.pci_irq = NO_IRQ;
ret = pxa2xx_drv_pcmcia_add_one(skt); ret = pxa2xx_drv_pcmcia_add_one(skt);
...@@ -304,8 +304,8 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev) ...@@ -304,8 +304,8 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
soc_pcmcia_remove_one(&sinfo->skt[i]); soc_pcmcia_remove_one(&sinfo->skt[i]);
kfree(sinfo); kfree(sinfo);
} else { } else {
pxa2xx_configure_sockets(dev); pxa2xx_configure_sockets(&dev->dev);
dev_set_drvdata(dev, sinfo); dev_set_drvdata(&dev->dev, sinfo);
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment