Commit 7ef4de17 authored by Russell King's avatar Russell King Committed by Russell King

Merge branch 'highmem' into devel

Conflicts:

	arch/arm/mach-clps7500/include/mach/memory.h
parents f412b09f b5ee9002
......@@ -112,10 +112,8 @@
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
*/
#ifndef __virt_to_phys
#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
#endif
/*
* Convert a physical address to a Page Frame Number and back
......@@ -180,6 +178,11 @@ static inline void *phys_to_virt(unsigned long x)
* memory. Use of these is *deprecated* (and that doesn't mean
* use the __ prefixed forms instead.) See dma-mapping.h.
*/
#ifndef __virt_to_bus
#define __virt_to_bus __virt_to_phys
#define __bus_to_virt __phys_to_virt
#endif
static inline __deprecated unsigned long virt_to_bus(void *x)
{
return __virt_to_bus((unsigned long)x);
......
......@@ -108,30 +108,36 @@
#error Unknown user operations model
#endif
struct page;
struct cpu_user_fns {
void (*cpu_clear_user_page)(void *p, unsigned long user);
void (*cpu_copy_user_page)(void *to, const void *from,
unsigned long user);
void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
unsigned long vaddr);
};
#ifdef MULTI_USER
extern struct cpu_user_fns cpu_user;
#define __cpu_clear_user_page cpu_user.cpu_clear_user_page
#define __cpu_copy_user_page cpu_user.cpu_copy_user_page
#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage
#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage
#else
#define __cpu_clear_user_page __glue(_USER,_clear_user_page)
#define __cpu_copy_user_page __glue(_USER,_copy_user_page)
#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage)
#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage)
extern void __cpu_clear_user_page(void *p, unsigned long user);
extern void __cpu_copy_user_page(void *to, const void *from,
unsigned long user);
extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr);
#endif
#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr)
#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr)
#define clear_user_highpage(page,vaddr) \
__cpu_clear_user_highpage(page, vaddr)
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define copy_user_highpage(to,from,vaddr,vma) \
__cpu_copy_user_highpage(to, from, vaddr)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
......
......@@ -209,9 +209,11 @@ struct meminfo {
struct membank bank[NR_BANKS];
};
extern struct meminfo meminfo;
#define for_each_nodebank(iter,mi,no) \
for (iter = 0; iter < mi->nr_banks; iter++) \
if (mi->bank[iter].node == no)
for (iter = 0; iter < (mi)->nr_banks; iter++) \
if ((mi)->bank[iter].node == no)
#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
......
......@@ -59,7 +59,7 @@ static int __init fpe_setup(char *line)
__setup("fpe=", fpe_setup);
#endif
extern void paging_init(struct meminfo *, struct machine_desc *desc);
extern void paging_init(struct machine_desc *desc);
extern void reboot_setup(char *str);
extern void _text, _etext, __data_start, _edata, _end;
......@@ -112,7 +112,6 @@ static struct stack stacks[NR_CPUS];
char elf_platform[ELF_PLATFORM_SIZE];
EXPORT_SYMBOL(elf_platform);
static struct meminfo meminfo __initdata = { 0, };
static const char *cpu_name;
static const char *machine_name;
static char __initdata command_line[COMMAND_LINE_SIZE];
......@@ -367,21 +366,34 @@ static struct machine_desc * __init setup_machine(unsigned int nr)
return list;
}
static void __init arm_add_memory(unsigned long start, unsigned long size)
static int __init arm_add_memory(unsigned long start, unsigned long size)
{
struct membank *bank;
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
"ignoring memory at %#lx\n", start);
return -EINVAL;
}
/*
* Ensure that start/size are aligned to a page boundary.
* Size is appropriately rounded down, start is rounded up.
*/
size -= start & ~PAGE_MASK;
bank = &meminfo.bank[meminfo.nr_banks++];
bank->start = PAGE_ALIGN(start);
bank->size = size & PAGE_MASK;
bank->node = PHYS_TO_NID(start);
/*
* Check whether this memory region has non-zero size or
* invalid node number.
*/
if (bank->size == 0 || bank->node >= MAX_NUMNODES)
return -EINVAL;
meminfo.nr_banks++;
return 0;
}
/*
......@@ -539,14 +551,7 @@ __tagtable(ATAG_CORE, parse_tag_core);
static int __init parse_tag_mem32(const struct tag *tag)
{
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_WARNING
"Ignoring memory bank 0x%08x size %dKB\n",
tag->u.mem.start, tag->u.mem.size / 1024);
return -EINVAL;
}
arm_add_memory(tag->u.mem.start, tag->u.mem.size);
return 0;
return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
}
__tagtable(ATAG_MEM, parse_tag_mem32);
......@@ -718,7 +723,7 @@ void __init setup_arch(char **cmdline_p)
memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
parse_cmdline(cmdline_p, from);
paging_init(&meminfo, mdesc);
paging_init(mdesc);
request_standard_resources(&meminfo, mdesc);
#ifdef CONFIG_SMP
......
......@@ -14,9 +14,6 @@
#define PHYS_OFFSET UL(0xf0000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
/*
* The nodes are the followings:
*
......
......@@ -25,15 +25,4 @@
#define PHYS_OFFSET (AT91_SDRAM_BASE)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -26,25 +26,7 @@
*/
#define PHYS_OFFSET UL(0xc0000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#if defined(CONFIG_ARCH_CDB89712)
#define __virt_to_bus(x) (x)
#define __bus_to_virt(x) (x)
#elif defined (CONFIG_ARCH_AUTCPU12)
#define __virt_to_bus(x) (x)
#define __bus_to_virt(x) (x)
#else
#if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12)
#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
......
......@@ -55,10 +55,4 @@ __arch_adjust_zones(int node, unsigned long *size, unsigned long *holes)
#endif
/*
* Bus address is physical address
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif /* __ASM_ARCH_MEMORY_H */
......@@ -21,13 +21,6 @@
*/
#define PHYS_OFFSET UL(0x00000000)
/*
* We keep this 1:1 so that we don't interfere
* with the PCMCIA memory regions
*/
#define __virt_to_bus(x) (x)
#define __bus_to_virt(x) (x)
/*
* Cache flushing area - SRAM
*/
......
......@@ -7,8 +7,4 @@
#define PHYS_OFFSET UL(0x00000000)
#define __bus_to_virt(x) __phys_to_virt(x)
#define __virt_to_bus(x) __virt_to_phys(x)
#endif
......@@ -30,9 +30,18 @@
extern unsigned long __virt_to_bus(unsigned long);
extern unsigned long __bus_to_virt(unsigned long);
#endif
#define __virt_to_bus __virt_to_bus
#define __bus_to_virt __bus_to_virt
#elif defined(CONFIG_FOOTBRIDGE_HOST)
/*
* The footbridge is programmed to expose the system RAM at the corresponding
* address. So, if PAGE_OFFSET is 0xc0000000, RAM appears at 0xe0000000.
* If 0x80000000, then its exposed at 0xa0000000 on the bus. etc.
* The only requirement is that the RAM isn't placed at bus address 0 which
* would clash with VGA cards.
*/
#define __virt_to_bus(x) ((x) - 0xe0000000)
#define __bus_to_virt(x) ((x) + 0xe0000000)
......
......@@ -7,23 +7,6 @@
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
/*
* Page offset:
* ( 0xc0000000UL )
*/
#define PHYS_OFFSET UL(0x40000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*
* There is something to do here later !, Mar 2000, Jungjun Kim
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -23,14 +23,4 @@
#define PHYS_OFFSET UL(0x08000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define __virt_to_bus(x) (x - PAGE_OFFSET + PHYS_OFFSET)
#define __bus_to_virt(x) (x - PHYS_OFFSET + PAGE_OFFSET)
#endif
......@@ -24,15 +24,8 @@
* Physical DRAM offset.
*/
#define PHYS_OFFSET UL(0x00000000)
#define BUS_OFFSET UL(0x80000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define BUS_OFFSET UL(0x80000000)
#define __virt_to_bus(x) (x - PAGE_OFFSET + BUS_OFFSET)
#define __bus_to_virt(x) (x - BUS_OFFSET + PAGE_OFFSET)
......
......@@ -16,18 +16,6 @@
#define IOP13XX_PMMR_P_START (IOP13XX_PMMR_PHYS_MEM_BASE)
#define IOP13XX_PMMR_P_END (IOP13XX_PMMR_PHYS_MEM_BASE + IOP13XX_PMMR_SIZE)
/*
* Virtual view <-> PCI DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
/* RAM has 1:1 mapping on the PCIe/x Busses */
#define __virt_to_bus(x) (__virt_to_phys(x))
#define __bus_to_virt(x) (__phys_to_virt(x))
static inline dma_addr_t __virt_to_lbus(unsigned long x)
{
return x + IOP13XX_PMMR_PHYS_MEM_BASE - IOP13XX_PMMR_VIRT_MEM_BASE;
......@@ -55,7 +43,7 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
if (is_lbus_device(dev) && __is_lbus_dma(__dma)) \
__virt = __lbus_to_virt(__dma); \
else \
__virt = __bus_to_virt(__dma); \
__virt = __phys_to_virt(__dma); \
(void *)__virt; \
})
......@@ -66,7 +54,7 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
if (is_lbus_device(dev) && __is_lbus_virt(__virt)) \
__dma = __virt_to_lbus(__virt); \
else \
__dma = __virt_to_bus(__virt); \
__dma = __virt_to_phys(__virt); \
__dma; \
})
......
......@@ -12,15 +12,4 @@
*/
#define PHYS_OFFSET UL(0xa0000000)
/*
* Virtual view <-> PCI DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define __virt_to_bus(x) (__virt_to_phys(x))
#define __bus_to_virt(x) (__phys_to_virt(x))
#endif
......@@ -12,15 +12,4 @@
*/
#define PHYS_OFFSET UL(0x00000000)
/*
* Virtual view <-> PCI DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define __virt_to_bus(x) (__virt_to_phys(x))
#define __bus_to_virt(x) (__phys_to_virt(x))
#endif
......@@ -15,13 +15,6 @@
#define PHYS_OFFSET UL(0x00000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#include <mach/ixp2000-regs.h>
#define __virt_to_bus(v) \
......
......@@ -19,16 +19,6 @@
*/
#define PHYS_OFFSET (0x00000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#ifndef __ASSEMBLY__
#define __virt_to_bus(v) \
({ unsigned int ret; \
ret = ((__virt_to_phys(v) - 0x00000000) + \
......@@ -43,6 +33,3 @@
#define arch_is_coherent() 1
#endif
#endif
......@@ -25,16 +25,4 @@ void ixp4xx_adjust_zones(int node, unsigned long *size, unsigned long *holes);
#endif
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*
* These are dummies for now.
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -7,8 +7,4 @@
#define PHYS_OFFSET UL(0x00000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -37,11 +37,6 @@ extern struct bus_type platform_bus_type;
(dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); })
#define __arch_page_to_dma(dev, x) __arch_virt_to_dma(dev, page_address(x))
#else
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
#endif
......
......@@ -17,9 +17,6 @@
*/
#define PHYS_OFFSET UL(0xf0000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
/*
* Cache flushing area - ROM
*/
......
......@@ -19,16 +19,6 @@
*/
#define PHYS_OFFSET UL(0xc0000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#ifdef CONFIG_DISCONTIGMEM
/*
......
......@@ -7,8 +7,4 @@
#define PHYS_OFFSET UL(0x00000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -19,9 +19,5 @@
/* physical offset of RAM */
#define PHYS_OFFSET UL(0x10000000)
/* bus address and physical addresses are identical */
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -7,8 +7,4 @@
#define PHYS_OFFSET UL(0x00000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -22,15 +22,5 @@
#define PHYS_OFFSET UL(0x80000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -21,7 +21,4 @@
#define PHYS_OFFSET UL(0x00000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -9,8 +9,4 @@
#define PHYS_OFFSET UL(0x00000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -18,7 +18,4 @@
*/
#define PHYS_OFFSET (0x80000000)
#define __virt_to_bus(x) ((x) - PAGE_OFFSET + PHYS_OFFSET)
#define __bus_to_virt(x) ((x) + PAGE_OFFSET - PHYS_OFFSET)
#endif
......@@ -17,16 +17,6 @@
*/
#define PHYS_OFFSET UL(0xa0000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
/*
* The nodes are matched with the physical SDRAM banks as follows:
*
......
......@@ -565,7 +565,7 @@ static int mioa701_sys_suspend(struct sys_device *sysdev, pm_message_t state)
u32 *mem_resume_unknown = phys_to_virt(RESUME_UNKNOWN_ADDR);
/* Devices prepare suspend */
is_bt_on = gpio_get_value(GPIO83_BT_ON);
is_bt_on = !!gpio_get_value(GPIO83_BT_ON);
pxa2xx_mfp_set_lpm(GPIO83_BT_ON,
is_bt_on ? MFP_LPM_DRIVE_HIGH : MFP_LPM_DRIVE_LOW);
......
......@@ -24,6 +24,7 @@ ENTRY(mioa701_jumpaddr)
1:
mov r0, #0xa0000000 @ Don't suppose memory access works
orr r0, r0, #0x00200000 @ even if it's supposed to
orr r0, r0, #0x0000b000
mov r1, #0
str r1, [r0] @ Early disable resume for next boot
ldr r0, mioa701_jumpaddr @ (Murphy's Law)
......
......@@ -25,14 +25,4 @@
*/
#define PHYS_OFFSET UL(0x00000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
#endif
......@@ -23,13 +23,6 @@
*/
#define PHYS_OFFSET UL(0x10000000)
/*
* These are exactly the same on the RiscPC as the
* physical memory view.
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
/*
* Cache flushing area - ROM
*/
......
......@@ -17,7 +17,4 @@
#define PHYS_OFFSET UL(0x0C000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -13,7 +13,4 @@
#define PHYS_OFFSET UL(0x30000000)
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
#endif
......@@ -27,18 +27,6 @@ void sa1111_adjust_zones(int node, unsigned long *size, unsigned long *holes);
#endif
#endif
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*
* On the SA1100, bus addresses are equivalent to physical addresses.
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
/*
* Because of the wide memory address space between physical RAM banks on the
* SA1100, it's much convenient to use Linux's SparseMEM support to implement
......
......@@ -36,9 +36,6 @@ static inline void __arch_adjust_zones(int node, unsigned long *zone_size, unsig
#endif
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
/*
* Cache flushing area
*/
......
......@@ -25,14 +25,4 @@
*/
#define PHYS_OFFSET UL(0x00000000)
/*
* Virtual view <-> DMA view memory address translations
* virt_to_bus: Used to translate the virtual address to an
* address suitable to be passed to set_dma_addr
* bus_to_virt: Used to convert an address for DMA operations
* to an address that the kernel can use.
*/
#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
#endif
/*
* linux/arch/arm/lib/copypage-feroceon.S
*
* Copyright (C) 2008 Marvell Semiconductors
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles copy_user_page and clear_user_page on Feroceon
* more optimally than the generic implementations.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
.text
.align 5
ENTRY(feroceon_copy_user_page)
stmfd sp!, {r4-r9, lr}
mov ip, #PAGE_SZ
1: mov lr, r1
ldmia r1!, {r2 - r9}
pld [lr, #32]
pld [lr, #64]
pld [lr, #96]
pld [lr, #128]
pld [lr, #160]
pld [lr, #192]
pld [lr, #224]
stmia r0, {r2 - r9}
ldmia r1!, {r2 - r9}
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
add r0, r0, #32
stmia r0, {r2 - r9}
ldmia r1!, {r2 - r9}
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
add r0, r0, #32
stmia r0, {r2 - r9}
ldmia r1!, {r2 - r9}
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
add r0, r0, #32
stmia r0, {r2 - r9}
ldmia r1!, {r2 - r9}
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
add r0, r0, #32
stmia r0, {r2 - r9}
ldmia r1!, {r2 - r9}
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
add r0, r0, #32
stmia r0, {r2 - r9}
ldmia r1!, {r2 - r9}
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
add r0, r0, #32
stmia r0, {r2 - r9}
ldmia r1!, {r2 - r9}
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
add r0, r0, #32
stmia r0, {r2 - r9}
subs ip, ip, #(32 * 8)
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
add r0, r0, #32
bne 1b
mcr p15, 0, ip, c7, c10, 4 @ drain WB
ldmfd sp!, {r4-r9, pc}
.align 5
ENTRY(feroceon_clear_user_page)
stmfd sp!, {r4-r7, lr}
mov r1, #PAGE_SZ/32
mov r2, #0
mov r3, #0
mov r4, #0
mov r5, #0
mov r6, #0
mov r7, #0
mov ip, #0
mov lr, #0
1: stmia r0, {r2-r7, ip, lr}
subs r1, r1, #1
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
add r0, r0, #32
bne 1b
mcr p15, 0, r1, c7, c10, 4 @ drain WB
ldmfd sp!, {r4-r7, pc}
__INITDATA
.type feroceon_user_fns, #object
ENTRY(feroceon_user_fns)
.long feroceon_clear_user_page
.long feroceon_copy_user_page
.size feroceon_user_fns, . - feroceon_user_fns
/*
* linux/arch/arm/mm/copypage-feroceon.S
*
* Copyright (C) 2008 Marvell Semiconductors
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This handles copy_user_highpage and clear_user_page on Feroceon
* more optimally than the generic implementations.
*/
#include <linux/init.h>
#include <linux/highmem.h>
static void __attribute__((naked))
feroceon_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4-r9, lr} \n\
mov ip, %0 \n\
1: mov lr, r1 \n\
ldmia r1!, {r2 - r9} \n\
pld [lr, #32] \n\
pld [lr, #64] \n\
pld [lr, #96] \n\
pld [lr, #128] \n\
pld [lr, #160] \n\
pld [lr, #192] \n\
pld [lr, #224] \n\
stmia r0, {r2 - r9} \n\
ldmia r1!, {r2 - r9} \n\
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
add r0, r0, #32 \n\
stmia r0, {r2 - r9} \n\
ldmia r1!, {r2 - r9} \n\
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
add r0, r0, #32 \n\
stmia r0, {r2 - r9} \n\
ldmia r1!, {r2 - r9} \n\
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
add r0, r0, #32 \n\
stmia r0, {r2 - r9} \n\
ldmia r1!, {r2 - r9} \n\
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
add r0, r0, #32 \n\
stmia r0, {r2 - r9} \n\
ldmia r1!, {r2 - r9} \n\
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
add r0, r0, #32 \n\
stmia r0, {r2 - r9} \n\
ldmia r1!, {r2 - r9} \n\
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
add r0, r0, #32 \n\
stmia r0, {r2 - r9} \n\
ldmia r1!, {r2 - r9} \n\
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
add r0, r0, #32 \n\
stmia r0, {r2 - r9} \n\
subs ip, ip, #(32 * 8) \n\
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
add r0, r0, #32 \n\
bne 1b \n\
mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
ldmfd sp!, {r4-r9, pc}"
:
: "I" (PAGE_SIZE));
}
void feroceon_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
feroceon_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
}
void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
asm volatile ("\
mov r1, %2 \n\
mov r2, #0 \n\
mov r3, #0 \n\
mov r4, #0 \n\
mov r5, #0 \n\
mov r6, #0 \n\
mov r7, #0 \n\
mov ip, #0 \n\
mov lr, #0 \n\
1: stmia %0, {r2-r7, ip, lr} \n\
subs r1, r1, #1 \n\
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
add %0, %0, #32 \n\
bne 1b \n\
mcr p15, 0, r1, c7, c10, 4 @ drain WB"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0);
}
struct cpu_user_fns feroceon_user_fns __initdata = {
.cpu_clear_user_highpage = feroceon_clear_user_highpage,
.cpu_copy_user_highpage = feroceon_copy_user_highpage,
};
/*
* linux/arch/arm/lib/copypage.S
*
* Copyright (C) 1995-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ASM optimised string functions
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
.text
.align 5
/*
* ARMv3 optimised copy_user_page
*
* FIXME: do we need to handle cache stuff...
*/
ENTRY(v3_copy_user_page)
stmfd sp!, {r4, lr} @ 2
mov r2, #PAGE_SZ/64 @ 1
ldmia r1!, {r3, r4, ip, lr} @ 4+1
1: stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4+1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4+1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4
subs r2, r2, #1 @ 1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmneia r1!, {r3, r4, ip, lr} @ 4
bne 1b @ 1
ldmfd sp!, {r4, pc} @ 3
.align 5
/*
* ARMv3 optimised clear_user_page
*
* FIXME: do we need to handle cache stuff...
*/
ENTRY(v3_clear_user_page)
str lr, [sp, #-4]!
mov r1, #PAGE_SZ/64 @ 1
mov r2, #0 @ 1
mov r3, #0 @ 1
mov ip, #0 @ 1
mov lr, #0 @ 1
1: stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
subs r1, r1, #1 @ 1
bne 1b @ 1
ldr pc, [sp], #4
__INITDATA
.type v3_user_fns, #object
ENTRY(v3_user_fns)
.long v3_clear_user_page
.long v3_copy_user_page
.size v3_user_fns, . - v3_user_fns
/*
* linux/arch/arm/mm/copypage-v3.c
*
* Copyright (C) 1995-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/highmem.h>
/*
* ARMv3 optimised copy_user_highpage
*
* FIXME: do we need to handle cache stuff...
*/
static void __attribute__((naked))
v3_copy_user_page(void *kto, const void *kfrom)
{
asm("\n\
stmfd sp!, {r4, lr} @ 2\n\
mov r2, %2 @ 1\n\
ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
1: stmia %1!, {r3, r4, ip, lr} @ 4\n\
ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
stmia %1!, {r3, r4, ip, lr} @ 4\n\
ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
stmia %1!, {r3, r4, ip, lr} @ 4\n\
ldmia %0!, {r3, r4, ip, lr} @ 4\n\
subs r2, r2, #1 @ 1\n\
stmia %1!, {r3, r4, ip, lr} @ 4\n\
ldmneia %0!, {r3, r4, ip, lr} @ 4\n\
bne 1b @ 1\n\
ldmfd sp!, {r4, pc} @ 3"
:
: "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
}
void v3_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
v3_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
}
/*
* ARMv3 optimised clear_user_page
*
* FIXME: do we need to handle cache stuff...
*/
void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
asm volatile("\n\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
mov r3, #0 @ 1\n\
mov ip, #0 @ 1\n\
mov lr, #0 @ 1\n\
1: stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
subs r1, r1, #1 @ 1\n\
bne 1b @ 1"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0);
}
struct cpu_user_fns v3_user_fns __initdata = {
.cpu_clear_user_highpage = v3_clear_user_highpage,
.cpu_copy_user_highpage = v3_copy_user_highpage,
};
......@@ -15,8 +15,8 @@
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
......@@ -33,7 +33,7 @@
static DEFINE_SPINLOCK(minicache_lock);
/*
* ARMv4 mini-dcache optimised copy_user_page
* ARMv4 mini-dcache optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
......@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
* own copy_user_page that does the right thing.
* own copy_user_highpage that does the right thing.
*/
static void __attribute__((naked))
mc_copy_user_page(void *from, void *to)
......@@ -68,50 +68,53 @@ mc_copy_user_page(void *from, void *to)
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
}
void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
void v4_mc_copy_user_highpage(struct page *from, struct page *to,
unsigned long vaddr)
{
struct page *page = virt_to_page(kfrom);
void *kto = kmap_atomic(to, KM_USER1);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page_mapping(page), page);
if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
__flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
flush_tlb_kernel_page(0xffff8000);
mc_copy_user_page((void *)0xffff8000, kto);
spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1);
}
/*
* ARMv4 optimised clear_user_page
*/
void __attribute__((naked))
v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)
void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
asm volatile(
"str lr, [sp, #-4]!\n\
mov r1, %0 @ 1\n\
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
mov r3, #0 @ 1\n\
mov ip, #0 @ 1\n\
mov lr, #0 @ 1\n\
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
stmia r0!, {r2, r3, ip, lr} @ 4\n\
stmia r0!, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
stmia r0!, {r2, r3, ip, lr} @ 4\n\
stmia r0!, {r2, r3, ip, lr} @ 4\n\
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
subs r1, r1, #1 @ 1\n\
bne 1b @ 1\n\
ldr pc, [sp], #4"
:
: "I" (PAGE_SIZE / 64));
bne 1b @ 1"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0);
}
struct cpu_user_fns v4_mc_user_fns __initdata = {
.cpu_clear_user_page = v4_mc_clear_user_page,
.cpu_copy_user_page = v4_mc_copy_user_page,
.cpu_clear_user_highpage = v4_mc_clear_user_highpage,
.cpu_copy_user_highpage = v4_mc_copy_user_highpage,
};
/*
* linux/arch/arm/lib/copypage.S
*
* Copyright (C) 1995-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ASM optimised string functions
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
.text
.align 5
/*
* ARMv4 optimised copy_user_page
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
* own copy_user_page that does the right thing.
*/
ENTRY(v4wb_copy_user_page)
stmfd sp!, {r4, lr} @ 2
mov r2, #PAGE_SZ/64 @ 1
ldmia r1!, {r3, r4, ip, lr} @ 4
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4+1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4
subs r2, r2, #1 @ 1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmneia r1!, {r3, r4, ip, lr} @ 4
bne 1b @ 1
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
ldmfd sp!, {r4, pc} @ 3
.align 5
/*
* ARMv4 optimised clear_user_page
*
* Same story as above.
*/
ENTRY(v4wb_clear_user_page)
str lr, [sp, #-4]!
mov r1, #PAGE_SZ/64 @ 1
mov r2, #0 @ 1
mov r3, #0 @ 1
mov ip, #0 @ 1
mov lr, #0 @ 1
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
subs r1, r1, #1 @ 1
bne 1b @ 1
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
ldr pc, [sp], #4
__INITDATA
.type v4wb_user_fns, #object
ENTRY(v4wb_user_fns)
.long v4wb_clear_user_page
.long v4wb_copy_user_page
.size v4wb_user_fns, . - v4wb_user_fns
/*
* linux/arch/arm/mm/copypage-v4wb.c
*
* Copyright (C) 1995-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/highmem.h>
/*
* ARMv4 optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
static void __attribute__((naked))
v4wb_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, lr} @ 2\n\
mov r2, %0 @ 1\n\
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
stmia r0!, {r3, r4, ip, lr} @ 4\n\
ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
stmia r0!, {r3, r4, ip, lr} @ 4\n\
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
stmia r0!, {r3, r4, ip, lr} @ 4\n\
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
subs r2, r2, #1 @ 1\n\
stmia r0!, {r3, r4, ip, lr} @ 4\n\
ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
bne 1b @ 1\n\
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
ldmfd sp!, {r4, pc} @ 3"
:
: "I" (PAGE_SIZE / 64));
}
void v4wb_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
v4wb_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
}
/*
* ARMv4 optimised clear_user_page
*
* Same story as above.
*/
void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
mov r3, #0 @ 1\n\
mov ip, #0 @ 1\n\
mov lr, #0 @ 1\n\
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
subs r1, r1, #1 @ 1\n\
bne 1b @ 1\n\
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0);
}
struct cpu_user_fns v4wb_user_fns __initdata = {
.cpu_clear_user_highpage = v4wb_clear_user_highpage,
.cpu_copy_user_highpage = v4wb_copy_user_highpage,
};
/*
* linux/arch/arm/lib/copypage-v4.S
*
* Copyright (C) 1995-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* ASM optimised string functions
*
* This is for CPUs with a writethrough cache and 'flush ID cache' is
* the only supported cache operation.
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
.text
.align 5
/*
* ARMv4 optimised copy_user_page
*
* Since we have writethrough caches, we don't have to worry about
* dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date.
*/
ENTRY(v4wt_copy_user_page)
stmfd sp!, {r4, lr} @ 2
mov r2, #PAGE_SZ/64 @ 1
ldmia r1!, {r3, r4, ip, lr} @ 4
1: stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4+1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4
stmia r0!, {r3, r4, ip, lr} @ 4
ldmia r1!, {r3, r4, ip, lr} @ 4
subs r2, r2, #1 @ 1
stmia r0!, {r3, r4, ip, lr} @ 4
ldmneia r1!, {r3, r4, ip, lr} @ 4
bne 1b @ 1
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
ldmfd sp!, {r4, pc} @ 3
.align 5
/*
* ARMv4 optimised clear_user_page
*
* Same story as above.
*/
ENTRY(v4wt_clear_user_page)
str lr, [sp, #-4]!
mov r1, #PAGE_SZ/64 @ 1
mov r2, #0 @ 1
mov r3, #0 @ 1
mov ip, #0 @ 1
mov lr, #0 @ 1
1: stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
stmia r0!, {r2, r3, ip, lr} @ 4
subs r1, r1, #1 @ 1
bne 1b @ 1
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
ldr pc, [sp], #4
__INITDATA
.type v4wt_user_fns, #object
ENTRY(v4wt_user_fns)
.long v4wt_clear_user_page
.long v4wt_copy_user_page
.size v4wt_user_fns, . - v4wt_user_fns
/*
* linux/arch/arm/mm/copypage-v4wt.S
*
* Copyright (C) 1995-1999 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This is for CPUs with a writethrough cache and 'flush ID cache' is
* the only supported cache operation.
*/
#include <linux/init.h>
#include <linux/highmem.h>
/*
* ARMv4 optimised copy_user_highpage
*
* Since we have writethrough caches, we don't have to worry about
* dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date.
*/
static void __attribute__((naked))
v4wt_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, lr} @ 2\n\
mov r2, %0 @ 1\n\
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
stmia r0!, {r3, r4, ip, lr} @ 4\n\
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
stmia r0!, {r3, r4, ip, lr} @ 4\n\
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
subs r2, r2, #1 @ 1\n\
stmia r0!, {r3, r4, ip, lr} @ 4\n\
ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
bne 1b @ 1\n\
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
ldmfd sp!, {r4, pc} @ 3"
:
: "I" (PAGE_SIZE / 64));
}
void v4wt_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
v4wt_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
}
/*
* ARMv4 optimised clear_user_page
*
* Same story as above.
*/
void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
asm volatile("\
mov r1, %2 @ 1\n\
mov r2, #0 @ 1\n\
mov r3, #0 @ 1\n\
mov ip, #0 @ 1\n\
mov lr, #0 @ 1\n\
1: stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
stmia %0!, {r2, r3, ip, lr} @ 4\n\
subs r1, r1, #1 @ 1\n\
bne 1b @ 1\n\
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 64)
: "r1", "r2", "r3", "ip", "lr");
kunmap_atomic(kaddr, KM_USER0);
}
struct cpu_user_fns v4wt_user_fns __initdata = {
.cpu_clear_user_highpage = v4wt_clear_user_highpage,
.cpu_copy_user_highpage = v4wt_copy_user_highpage,
};
......@@ -10,8 +10,8 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
......@@ -33,41 +33,56 @@ static DEFINE_SPINLOCK(v6_lock);
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
*/
static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
static void v6_copy_user_highpage_nonaliasing(struct page *to,
struct page *from, unsigned long vaddr)
{
void *kto, *kfrom;
kfrom = kmap_atomic(from, KM_USER0);
kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
kunmap_atomic(kto, KM_USER1);
kunmap_atomic(kfrom, KM_USER0);
}
/*
* Clear the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of this page.
*/
static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
{
void *kaddr = kmap_atomic(page, KM_USER0);
clear_page(kaddr);
kunmap_atomic(kaddr, KM_USER0);
}
/*
* Copy the page, taking account of the cache colour.
*/
static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long from, to;
struct page *page = virt_to_page(kfrom);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page_mapping(page), page);
/*
* Discard data in the kernel mapping for the new page.
* FIXME: needs this MCRR to be supported.
*/
static void discard_old_kernel_data(void *kto)
{
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kto),
"r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
: "cc");
}
/*
* Copy the page, taking account of the cache colour.
*/
static void v6_copy_user_highpage_aliasing(struct page *to,
struct page *from, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
__flush_dcache_page(page_mapping(from), from);
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(to));
/*
* Now copy the page using the same cache colour as the
......@@ -75,16 +90,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
*/
spin_lock(&v6_lock);
set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0);
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0);
set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
from = from_address + (offset << PAGE_SHIFT);
to = to_address + (offset << PAGE_SHIFT);
kfrom = from_address + (offset << PAGE_SHIFT);
kto = to_address + (offset << PAGE_SHIFT);
flush_tlb_kernel_page(from);
flush_tlb_kernel_page(to);
flush_tlb_kernel_page(kfrom);
flush_tlb_kernel_page(kto);
copy_page((void *)to, (void *)from);
copy_page((void *)kto, (void *)kfrom);
spin_unlock(&v6_lock);
}
......@@ -94,20 +109,13 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
* so remap the kernel page into the same cache colour as the user
* page.
*/
static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
/*
* Discard data in the kernel mapping for the new page
* FIXME: needs this MCRR to be supported.
*/
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kaddr),
"r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
: "cc");
/* FIXME: not highmem safe */
discard_old_kernel_data(page_address(page));
/*
* Now clear the page using the same cache colour as
......@@ -115,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
*/
spin_lock(&v6_lock);
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0);
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
flush_tlb_kernel_page(to);
clear_page((void *)to);
......@@ -123,15 +131,15 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
}
struct cpu_user_fns v6_user_fns __initdata = {
.cpu_clear_user_page = v6_clear_user_page_nonaliasing,
.cpu_copy_user_page = v6_copy_user_page_nonaliasing,
.cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
.cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
};
static int __init v6_userpage_init(void)
{
if (cache_is_vipt_aliasing()) {
cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing;
cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
}
return 0;
......
/*
* linux/arch/arm/lib/copypage-xsc3.S
* linux/arch/arm/mm/copypage-xsc3.S
*
* Copyright (C) 2004 Intel Corp.
*
......@@ -10,10 +10,8 @@
* Adapted for 3rd gen XScale core, no more mini-dcache
* Author: Matt Gilbert (matthew.m.gilbert@intel.com)
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>
#include <linux/highmem.h>
/*
* General note:
......@@ -21,77 +19,95 @@
* since that will just eat through 8K of the cache.
*/
.text
.align 5
/*
* XSC3 optimised copy_user_page
* XSC3 optimised copy_user_highpage
* r0 = destination
* r1 = source
* r2 = virtual user address of ultimate destination page
*
* The source page may have some clean entries in the cache already, but we
* can safely ignore them - break_cow() will flush them out of the cache
* if we eventually end up using our copied page.
*
*/
ENTRY(xsc3_mc_copy_user_page)
stmfd sp!, {r4, r5, lr}
mov lr, #PAGE_SZ/64-1
pld [r1, #0]
pld [r1, #32]
1: pld [r1, #64]
pld [r1, #96]
static void __attribute__((naked))
xsc3_mc_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, r5, lr} \n\
mov lr, %0 \n\
\n\
pld [r1, #0] \n\
pld [r1, #32] \n\
1: pld [r1, #64] \n\
pld [r1, #96] \n\
\n\
2: ldrd r2, [r1], #8 \n\
mov ip, r0 \n\
ldrd r4, [r1], #8 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
strd r2, [r0], #8 \n\
ldrd r2, [r1], #8 \n\
strd r4, [r0], #8 \n\
ldrd r4, [r1], #8 \n\
strd r2, [r0], #8 \n\
strd r4, [r0], #8 \n\
ldrd r2, [r1], #8 \n\
mov ip, r0 \n\
ldrd r4, [r1], #8 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
strd r2, [r0], #8 \n\
ldrd r2, [r1], #8 \n\
subs lr, lr, #1 \n\
strd r4, [r0], #8 \n\
ldrd r4, [r1], #8 \n\
strd r2, [r0], #8 \n\
strd r4, [r0], #8 \n\
bgt 1b \n\
beq 2b \n\
\n\
ldmfd sp!, {r4, r5, pc}"
:
: "I" (PAGE_SIZE / 64 - 1));
}
2: ldrd r2, [r1], #8
mov ip, r0
ldrd r4, [r1], #8
mcr p15, 0, ip, c7, c6, 1 @ invalidate
strd r2, [r0], #8
ldrd r2, [r1], #8
strd r4, [r0], #8
ldrd r4, [r1], #8
strd r2, [r0], #8
strd r4, [r0], #8
ldrd r2, [r1], #8
mov ip, r0
ldrd r4, [r1], #8
mcr p15, 0, ip, c7, c6, 1 @ invalidate
strd r2, [r0], #8
ldrd r2, [r1], #8
subs lr, lr, #1
strd r4, [r0], #8
ldrd r4, [r1], #8
strd r2, [r0], #8
strd r4, [r0], #8
bgt 1b
beq 2b
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
void *kto, *kfrom;
ldmfd sp!, {r4, r5, pc}
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
xsc3_mc_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
}
.align 5
/*
* XScale optimised clear_user_page
* r0 = destination
* r1 = virtual user address of ultimate destination page
*/
ENTRY(xsc3_mc_clear_user_page)
mov r1, #PAGE_SZ/32
mov r2, #0
mov r3, #0
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line
strd r2, [r0], #8
strd r2, [r0], #8
strd r2, [r0], #8
strd r2, [r0], #8
subs r1, r1, #1
bne 1b
mov pc, lr
__INITDATA
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
asm volatile ("\
mov r1, %2 \n\
mov r2, #0 \n\
mov r3, #0 \n\
1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\
strd r2, [%0], #8 \n\
strd r2, [%0], #8 \n\
strd r2, [%0], #8 \n\
strd r2, [%0], #8 \n\
subs r1, r1, #1 \n\
bne 1b"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3");
kunmap_atomic(kaddr, KM_USER0);
}
.type xsc3_mc_user_fns, #object
ENTRY(xsc3_mc_user_fns)
.long xsc3_mc_clear_user_page
.long xsc3_mc_copy_user_page
.size xsc3_mc_user_fns, . - xsc3_mc_user_fns
struct cpu_user_fns xsc3_mc_user_fns __initdata = {
.cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
.cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
};
......@@ -15,8 +15,8 @@
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
......@@ -35,7 +35,7 @@
static DEFINE_SPINLOCK(minicache_lock);
/*
* XScale mini-dcache optimised copy_user_page
* XScale mini-dcache optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
......@@ -90,48 +90,53 @@ mc_copy_user_page(void *from, void *to)
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
}
void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr)
{
struct page *page = virt_to_page(kfrom);
void *kto = kmap_atomic(to, KM_USER1);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page_mapping(page), page);
if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
__flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
spin_unlock(&minicache_lock);
kunmap_atomic(kto, KM_USER1);
}
/*
* XScale optimised clear_user_page
*/
void __attribute__((naked))
xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
void
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
asm volatile(
"mov r1, %0 \n\
"mov r1, %2 \n\
mov r2, #0 \n\
mov r3, #0 \n\
1: mov ip, r0 \n\
strd r2, [r0], #8 \n\
strd r2, [r0], #8 \n\
strd r2, [r0], #8 \n\
strd r2, [r0], #8 \n\
1: mov ip, %0 \n\
strd r2, [%0], #8 \n\
strd r2, [%0], #8 \n\
strd r2, [%0], #8 \n\
strd r2, [%0], #8 \n\
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
subs r1, r1, #1 \n\
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
bne 1b \n\
mov pc, lr"
:
: "I" (PAGE_SIZE / 32));
bne 1b"
: "=r" (ptr)
: "0" (kaddr), "I" (PAGE_SIZE / 32)
: "r1", "r2", "r3", "ip");
kunmap_atomic(kaddr, KM_USER0);
}
struct cpu_user_fns xscale_mc_user_fns __initdata = {
.cpu_clear_user_page = xscale_mc_clear_user_page,
.cpu_copy_user_page = xscale_mc_copy_user_page,
.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
.cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
};
......@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/page-flags.h>
#include <asm/system.h>
#include <asm/pgtable.h>
......@@ -83,13 +84,14 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
break;
}
#ifndef CONFIG_HIGHMEM
/* We must not map this if we have highmem enabled */
if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
break;
pte = pte_offset_map(pmd, addr);
printk(", *pte=%08lx", pte_val(*pte));
printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE]));
pte_unmap(pte);
#endif
} while(0);
printk("\n");
......
......@@ -64,10 +64,11 @@ static int __init parse_tag_initrd2(const struct tag *tag)
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
/*
* This is used to pass memory configuration data from paging_init
* to mem_init, and by show_mem() to skip holes in the memory map.
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
* of holes in the memory map. It is populated by arm_add_memory().
*/
static struct meminfo meminfo = { 0, };
struct meminfo meminfo;
void show_mem(void)
{
......@@ -331,13 +332,12 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
free_area_init_node(node, zone_size, start_pfn, zhole_size);
}
void __init bootmem_init(struct meminfo *mi)
void __init bootmem_init(void)
{
struct meminfo *mi = &meminfo;
unsigned long memend_pfn = 0;
int node, initrd_node;
memcpy(&meminfo, mi, sizeof(meminfo));
/*
* Locate which node contains the ramdisk image, if any.
*/
......@@ -394,20 +394,22 @@ void __init bootmem_init(struct meminfo *mi)
max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
}
static inline void free_area(unsigned long addr, unsigned long end, char *s)
static inline int free_area(unsigned long pfn, unsigned long end, char *s)
{
unsigned int size = (end - addr) >> 10;
unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
for (; addr < end; addr += PAGE_SIZE) {
struct page *page = virt_to_page(addr);
for (; pfn < end; pfn++) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
init_page_count(page);
free_page(addr);
totalram_pages++;
__free_page(page);
pages++;
}
if (size && s)
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
return pages;
}
static inline void
......@@ -478,13 +480,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
*/
void __init mem_init(void)
{
unsigned int codepages, datapages, initpages;
unsigned int codesize, datasize, initsize;
int i, node;
codepages = &_etext - &_text;
datapages = &_end - &__data_start;
initpages = &__init_end - &__init_begin;
#ifndef CONFIG_DISCONTIGMEM
max_mapnr = virt_to_page(high_memory) - mem_map;
#endif
......@@ -501,7 +499,8 @@ void __init mem_init(void)
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL);
totalram_pages += free_area(PHYS_PFN_OFFSET,
__phys_to_pfn(__pa(swapper_pg_dir)), NULL);
#endif
/*
......@@ -509,18 +508,21 @@ void __init mem_init(void)
* real number of pages we have in this system
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
for (i = 0; i < meminfo.nr_banks; i++) {
num_physpages += bank_pfn_size(&meminfo.bank[i]);
printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
codesize = &_etext - &_text;
datasize = &_end - &__data_start;
initsize = &__init_end - &__init_begin;
printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
"%dK data, %dK init)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
codepages >> 10, datapages >> 10, initpages >> 10);
codesize >> 10, datasize >> 10, initsize >> 10);
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
extern int sysctl_overcommit_memory;
......@@ -535,11 +537,10 @@ void __init mem_init(void)
void free_initmem(void)
{
if (!machine_is_integrator() && !machine_is_cintegrator()) {
free_area((unsigned long)(&__init_begin),
(unsigned long)(&__init_end),
if (!machine_is_integrator() && !machine_is_cintegrator())
totalram_pages += free_area(__phys_to_pfn(__pa(&__init_begin)),
__phys_to_pfn(__pa(&__init_end)),
"init");
}
}
#ifdef CONFIG_BLK_DEV_INITRD
......@@ -549,7 +550,9 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
free_area(start, end, "initrd");
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
__phys_to_pfn(__pa(end)),
"initrd");
}
static int __init keepinitrd_setup(char *__unused)
......
......@@ -32,7 +32,7 @@ struct meminfo;
struct pglist_data;
void __init create_mapping(struct map_desc *md);
void __init bootmem_init(struct meminfo *mi);
void __init bootmem_init(void);
void reserve_node_zero(struct pglist_data *pgdat);
extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end;
......@@ -646,61 +646,79 @@ static void __init early_vmalloc(char **arg)
"vmalloc area too small, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
printk(KERN_WARNING
"vmalloc area is too big, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
}
__early_param("vmalloc=", early_vmalloc);
#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
static int __init check_membank_valid(struct membank *mb)
static void __init sanity_check_meminfo(void)
{
int i, j;
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
#ifdef CONFIG_HIGHMEM
/*
* Check whether this memory region has non-zero size or
* invalid node number.
* Split those memory banks which are partially overlapping
* the vmalloc area greatly simplifying things later.
*/
if (mb->size == 0 || mb->node >= MAX_NUMNODES)
return 0;
if (__va(bank->start) < VMALLOC_MIN &&
bank->size > VMALLOC_MIN - __va(bank->start)) {
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
"ignoring high memory\n");
} else {
memmove(bank + 1, bank,
(meminfo.nr_banks - i) * sizeof(*bank));
meminfo.nr_banks++;
i++;
bank[1].size -= VMALLOC_MIN - __va(bank->start);
bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
j++;
}
bank->size = VMALLOC_MIN - __va(bank->start);
}
#else
/*
* Check whether this memory region would entirely overlap
* Check whether this memory bank would entirely overlap
* the vmalloc area.
*/
if (phys_to_virt(mb->start) >= VMALLOC_MIN) {
if (__va(bank->start) >= VMALLOC_MIN) {
printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
"(vmalloc region overlap).\n",
mb->start, mb->start + mb->size - 1);
return 0;
bank->start, bank->start + bank->size - 1);
continue;
}
/*
* Check whether this memory region would partially overlap
* Check whether this memory bank would partially overlap
* the vmalloc area.
*/
if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) ||
phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) {
unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start);
if (__va(bank->start + bank->size) > VMALLOC_MIN ||
__va(bank->start + bank->size) < __va(bank->start)) {
unsigned long newsize = VMALLOC_MIN - __va(bank->start);
printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
"to -%.8lx (vmalloc region overlap).\n",
mb->start, mb->start + mb->size - 1,
mb->start + newsize - 1);
mb->size = newsize;
bank->start, bank->start + bank->size - 1,
bank->start + newsize - 1);
bank->size = newsize;
}
return 1;
}
static void __init sanity_check_meminfo(struct meminfo *mi)
{
int i, j;
for (i = 0, j = 0; i < mi->nr_banks; i++) {
if (check_membank_valid(&mi->bank[i]))
mi->bank[j++] = mi->bank[i];
#endif
j++;
}
mi->nr_banks = j;
meminfo.nr_banks = j;
}
static inline void prepare_page_table(struct meminfo *mi)
static inline void prepare_page_table(void)
{
unsigned long addr;
......@@ -721,7 +739,7 @@ static inline void prepare_page_table(struct meminfo *mi)
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the end of the vmalloc region.
*/
for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
addr < VMALLOC_END; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
}
......@@ -880,14 +898,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
void __init paging_init(struct machine_desc *mdesc)
{
void *zero_page;
build_mem_type_table();
sanity_check_meminfo(mi);
prepare_page_table(mi);
bootmem_init(mi);
sanity_check_meminfo();
prepare_page_table();
bootmem_init();
devicemaps_init(mdesc);
top_pmd = pmd_off_k(0xffff0000);
......
......@@ -41,27 +41,13 @@ void __init reserve_node_zero(pg_data_t *pgdat)
BOOTMEM_DEFAULT);
}
static void __init sanity_check_meminfo(struct meminfo *mi)
{
int i, j;
for (i = 0, j = 0; i < mi->nr_banks; i++) {
struct membank *mb = &mi->bank[i];
if (mb->size != 0 && mb->node < MAX_NUMNODES)
mi->bank[j++] = mi->bank[i];
}
mi->nr_banks = j;
}
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
void __init paging_init(struct machine_desc *mdesc)
{
sanity_check_meminfo(mi);
bootmem_init(mi);
bootmem_init();
}
/*
......
......@@ -33,8 +33,8 @@ EXPORT_SYMBOL(cpu_cache);
#ifdef CONFIG_MMU
#ifndef MULTI_USER
EXPORT_SYMBOL(__cpu_clear_user_page);
EXPORT_SYMBOL(__cpu_copy_user_page);
EXPORT_SYMBOL(__cpu_clear_user_highpage);
EXPORT_SYMBOL(__cpu_copy_user_highpage);
#else
EXPORT_SYMBOL(cpu_user);
#endif
......
......@@ -13,17 +13,4 @@
#include <mach/hardware.h>
/*
* Virtual view <-> DMA view memory address translations
* This macro is used to translate the virtual address to an address
* suitable to be passed to set_dma_addr()
*/
#define __virt_to_bus(a) __virt_to_phys(a)
/*
* Used to convert an address for DMA operations to an address that the
* kernel can use.
*/
#define __bus_to_virt(a) __phys_to_virt(a)
#endif /* __ASM_ARCH_MXC_MEMORY_H__ */
......@@ -101,6 +101,7 @@
#define OMAP24XX_GPIO_IRQSTATUS2 0x0028
#define OMAP24XX_GPIO_IRQENABLE2 0x002c
#define OMAP24XX_GPIO_IRQENABLE1 0x001c
#define OMAP24XX_GPIO_WAKE_EN 0x0020
#define OMAP24XX_GPIO_CTRL 0x0030
#define OMAP24XX_GPIO_OE 0x0034
#define OMAP24XX_GPIO_DATAIN 0x0038
......@@ -1551,7 +1552,7 @@ static int omap_gpio_suspend(struct sys_device *dev, pm_message_t mesg)
#endif
#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX)
case METHOD_GPIO_24XX:
wake_status = bank->base + OMAP24XX_GPIO_SETWKUENA;
wake_status = bank->base + OMAP24XX_GPIO_WAKE_EN;
wake_clear = bank->base + OMAP24XX_GPIO_CLEARWKUENA;
wake_set = bank->base + OMAP24XX_GPIO_SETWKUENA;
break;
......@@ -1574,7 +1575,7 @@ static int omap_gpio_resume(struct sys_device *dev)
{
int i;
if (!cpu_is_omap24xx() && !cpu_is_omap16xx())
if (!cpu_class_is_omap2() && !cpu_is_omap16xx())
return 0;
for (i = 0; i < gpio_bank_count; i++) {
......
......@@ -42,19 +42,8 @@
#define PHYS_OFFSET UL(0x80000000)
#endif
/*
* Conversion between SDRAM and fake PCI bus, used by USB
* NOTE: Physical address must be converted to Local Bus address
* on OMAP-1510 only
*/
/*
* Bus address is physical address, except for OMAP-1510 Local Bus.
*/
#define __virt_to_bus(x) __virt_to_phys(x)
#define __bus_to_virt(x) __phys_to_virt(x)
/*
* OMAP-1510 bus address is translated into a Local Bus address if the
* OMAP bus type is lbus. We do the address translation based on the
* device overriding the defaults used in the dma-mapping API.
......@@ -74,16 +63,16 @@
#define __arch_page_to_dma(dev, page) ({is_lbus_device(dev) ? \
(dma_addr_t)virt_to_lbus(page_address(page)) : \
(dma_addr_t)__virt_to_bus(page_address(page));})
(dma_addr_t)__virt_to_phys(page_address(page));})
#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
lbus_to_virt(addr) : \
__bus_to_virt(addr)); })
__phys_to_virt(addr)); })
#define __arch_virt_to_dma(dev, addr) ({ unsigned long __addr = (unsigned long)(addr); \
(dma_addr_t) (is_lbus_device(dev) ? \
virt_to_lbus(__addr) : \
__virt_to_bus(__addr)); })
__virt_to_phys(__addr)); })
#endif /* CONFIG_ARCH_OMAP15XX */
......
......@@ -128,7 +128,7 @@ void clk_deny_idle(struct clk *clk);
* clk_allow_idle - Counters previous clk_deny_idle
* @clk: clock signal handle
*/
void clk_deny_idle(struct clk *clk);
void clk_allow_idle(struct clk *clk);
extern void omap_pm_idle(void);
extern void omap_pm_suspend(void);
......
......@@ -141,7 +141,11 @@ static int is_vbus_present(void)
if (mach->gpio_vbus) {
int value = gpio_get_value(mach->gpio_vbus);
return mach->gpio_vbus_inverted ? !value : value;
if (mach->gpio_vbus_inverted)
return !value;
else
return !!value;
}
if (mach->udc_is_connected)
return mach->udc_is_connected();
......@@ -982,7 +986,7 @@ static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
struct pxa25x_udc *udc;
udc = container_of(_gadget, struct pxa25x_udc, gadget);
udc->vbus = (is_active != 0);
udc->vbus = is_active;
DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
pullup(udc);
return 0;
......@@ -1399,12 +1403,8 @@ lubbock_vbus_irq(int irq, void *_dev)
static irqreturn_t udc_vbus_irq(int irq, void *_dev)
{
struct pxa25x_udc *dev = _dev;
int vbus = gpio_get_value(dev->mach->gpio_vbus);
if (dev->mach->gpio_vbus_inverted)
vbus = !vbus;
pxa25x_udc_vbus_session(&dev->gadget, vbus);
pxa25x_udc_vbus_session(&dev->gadget, is_vbus_present());
return IRQ_HANDLED;
}
......
......@@ -23,7 +23,6 @@ objs-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
objs-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
objs-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
objs-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
objs-y$(CONFIG_MACH_SX1) += lcd_sx1.o
omapfb-objs := $(objs-yy)
/*
* LCD panel support for the Siemens SX1 mobile phone
*
* Current version : Vovan888@gmail.com, great help from FCA00000
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <mach/gpio.h>
#include <mach/omapfb.h>
#include <mach/mcbsp.h>
#include <mach/mux.h>
/*
* OMAP310 GPIO registers
*/
#define GPIO_DATA_INPUT 0xfffce000
#define GPIO_DATA_OUTPUT 0xfffce004
#define GPIO_DIR_CONTROL 0xfffce008
#define GPIO_INT_CONTROL 0xfffce00c
#define GPIO_INT_MASK 0xfffce010
#define GPIO_INT_STATUS 0xfffce014
#define GPIO_PIN_CONTROL 0xfffce018
#define A_LCD_SSC_RD 3
#define A_LCD_SSC_SD 7
#define _A_LCD_RESET 9
#define _A_LCD_SSC_CS 12
#define _A_LCD_SSC_A0 13
#define DSP_REG 0xE1017024
const unsigned char INIT_1[12] = {
0x1C, 0x02, 0x88, 0x00, 0x1E, 0xE0, 0x00, 0xDC, 0x00, 0x02, 0x00
};
const unsigned char INIT_2[127] = {
0x15, 0x00, 0x29, 0x00, 0x3E, 0x00, 0x51, 0x00,
0x65, 0x00, 0x7A, 0x00, 0x8D, 0x00, 0xA1, 0x00,
0xB6, 0x00, 0xC7, 0x00, 0xD8, 0x00, 0xEB, 0x00,
0xFB, 0x00, 0x0B, 0x01, 0x1B, 0x01, 0x27, 0x01,
0x34, 0x01, 0x41, 0x01, 0x4C, 0x01, 0x55, 0x01,
0x5F, 0x01, 0x68, 0x01, 0x70, 0x01, 0x78, 0x01,
0x7E, 0x01, 0x86, 0x01, 0x8C, 0x01, 0x94, 0x01,
0x9B, 0x01, 0xA1, 0x01, 0xA4, 0x01, 0xA9, 0x01,
0xAD, 0x01, 0xB2, 0x01, 0xB7, 0x01, 0xBC, 0x01,
0xC0, 0x01, 0xC4, 0x01, 0xC8, 0x01, 0xCB, 0x01,
0xCF, 0x01, 0xD2, 0x01, 0xD5, 0x01, 0xD8, 0x01,
0xDB, 0x01, 0xE0, 0x01, 0xE3, 0x01, 0xE6, 0x01,
0xE8, 0x01, 0xEB, 0x01, 0xEE, 0x01, 0xF1, 0x01,
0xF3, 0x01, 0xF8, 0x01, 0xF9, 0x01, 0xFC, 0x01,
0x00, 0x02, 0x03, 0x02, 0x07, 0x02, 0x09, 0x02,
0x0E, 0x02, 0x13, 0x02, 0x1C, 0x02, 0x00
};
const unsigned char INIT_3[15] = {
0x14, 0x26, 0x33, 0x3D, 0x45, 0x4D, 0x53, 0x59,
0x5E, 0x63, 0x67, 0x6D, 0x71, 0x78, 0xFF
};
static void epson_sendbyte(int flag, unsigned char byte)
{
int i, shifter = 0x80;
if (!flag)
gpio_set_value(_A_LCD_SSC_A0, 0);
mdelay(2);
gpio_set_value(A_LCD_SSC_RD, 1);
gpio_set_value(A_LCD_SSC_SD, flag);
OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
for (i = 0; i < 8; i++) {
OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2200);
gpio_set_value(A_LCD_SSC_SD, shifter & byte);
OMAP_MCBSP_WRITE(OMAP1510_MCBSP3_BASE, PCR0, 0x2202);
shifter >>= 1;
}
gpio_set_value(_A_LCD_SSC_A0, 1);
}
static void init_system(void)
{
omap_mcbsp_request(OMAP_MCBSP3);
omap_mcbsp_stop(OMAP_MCBSP3);
}
static void setup_GPIO(void)
{
/* new wave */
gpio_request(A_LCD_SSC_RD, "lcd_ssc_rd");
gpio_request(A_LCD_SSC_SD, "lcd_ssc_sd");
gpio_request(_A_LCD_RESET, "lcd_reset");
gpio_request(_A_LCD_SSC_CS, "lcd_ssc_cs");
gpio_request(_A_LCD_SSC_A0, "lcd_ssc_a0");
/* set GPIOs to output, with initial data */
gpio_direction_output(A_LCD_SSC_RD, 1);
gpio_direction_output(A_LCD_SSC_SD, 0);
gpio_direction_output(_A_LCD_RESET, 0);
gpio_direction_output(_A_LCD_SSC_CS, 1);
gpio_direction_output(_A_LCD_SSC_A0, 1);
}
static void display_init(void)
{
int i;
omap_cfg_reg(MCBSP3_CLKX);
mdelay(2);
setup_GPIO();
mdelay(2);
/* reset LCD */
gpio_set_value(A_LCD_SSC_SD, 1);
epson_sendbyte(0, 0x25);
gpio_set_value(_A_LCD_RESET, 0);
mdelay(10);
gpio_set_value(_A_LCD_RESET, 1);
gpio_set_value(_A_LCD_SSC_CS, 1);
mdelay(2);
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD, phase 1 */
epson_sendbyte(0, 0xCA);
for (i = 0; i < 10; i++)
epson_sendbyte(1, INIT_1[i]);
gpio_set_value(_A_LCD_SSC_CS, 1);
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD phase 2 */
epson_sendbyte(0, 0xCB);
for (i = 0; i < 125; i++)
epson_sendbyte(1, INIT_2[i]);
gpio_set_value(_A_LCD_SSC_CS, 1);
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD phase 2a */
epson_sendbyte(0, 0xCC);
for (i = 0; i < 14; i++)
epson_sendbyte(1, INIT_3[i]);
gpio_set_value(_A_LCD_SSC_CS, 1);
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD phase 3 */
epson_sendbyte(0, 0xBC);
epson_sendbyte(1, 0x08);
gpio_set_value(_A_LCD_SSC_CS, 1);
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD phase 4 */
epson_sendbyte(0, 0x07);
epson_sendbyte(1, 0x05);
gpio_set_value(_A_LCD_SSC_CS, 1);
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD phase 5 */
epson_sendbyte(0, 0x94);
gpio_set_value(_A_LCD_SSC_CS, 1);
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD phase 6 */
epson_sendbyte(0, 0xC6);
epson_sendbyte(1, 0x80);
gpio_set_value(_A_LCD_SSC_CS, 1);
mdelay(100); /* used to be 1000 */
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD phase 7 */
epson_sendbyte(0, 0x16);
epson_sendbyte(1, 0x02);
epson_sendbyte(1, 0x00);
epson_sendbyte(1, 0xB1);
epson_sendbyte(1, 0x00);
gpio_set_value(_A_LCD_SSC_CS, 1);
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD phase 8 */
epson_sendbyte(0, 0x76);
epson_sendbyte(1, 0x00);
epson_sendbyte(1, 0x00);
epson_sendbyte(1, 0xDB);
epson_sendbyte(1, 0x00);
gpio_set_value(_A_LCD_SSC_CS, 1);
gpio_set_value(_A_LCD_SSC_CS, 0);
/* init LCD phase 9 */
epson_sendbyte(0, 0xAF);
gpio_set_value(_A_LCD_SSC_CS, 1);
}
static int sx1_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
{
return 0;
}
static void sx1_panel_cleanup(struct lcd_panel *panel)
{
}
static void sx1_panel_disable(struct lcd_panel *panel)
{
printk(KERN_INFO "SX1: LCD panel disable\n");
sx1_setmmipower(0);
gpio_set_value(_A_LCD_SSC_CS, 1);
epson_sendbyte(0, 0x25);
gpio_set_value(_A_LCD_SSC_CS, 0);
epson_sendbyte(0, 0xAE);
gpio_set_value(_A_LCD_SSC_CS, 1);
mdelay(100);
gpio_set_value(_A_LCD_SSC_CS, 0);
epson_sendbyte(0, 0x95);
gpio_set_value(_A_LCD_SSC_CS, 1);
}
static int sx1_panel_enable(struct lcd_panel *panel)
{
printk(KERN_INFO "lcd_sx1: LCD panel enable\n");
init_system();
display_init();
sx1_setmmipower(1);
sx1_setbacklight(0x18);
sx1_setkeylight (0x06);
return 0;
}
static unsigned long sx1_panel_get_caps(struct lcd_panel *panel)
{
return 0;
}
struct lcd_panel sx1_panel = {
.name = "sx1",
.config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
OMAP_LCDC_INV_HSYNC | OMAP_LCDC_INV_PIX_CLOCK |
OMAP_LCDC_INV_OUTPUT_EN,
.x_res = 176,
.y_res = 220,
.data_lines = 16,
.bpp = 16,
.hsw = 5,
.hfp = 5,
.hbp = 5,
.vsw = 2,
.vfp = 1,
.vbp = 1,
.pixel_clock = 1500,
.init = sx1_panel_init,
.cleanup = sx1_panel_cleanup,
.enable = sx1_panel_enable,
.disable = sx1_panel_disable,
.get_caps = sx1_panel_get_caps,
};
static int sx1_panel_probe(struct platform_device *pdev)
{
omapfb_register_panel(&sx1_panel);
return 0;
}
static int sx1_panel_remove(struct platform_device *pdev)
{
return 0;
}
static int sx1_panel_suspend(struct platform_device *pdev, pm_message_t mesg)
{
return 0;
}
static int sx1_panel_resume(struct platform_device *pdev)
{
return 0;
}
struct platform_driver sx1_panel_driver = {
.probe = sx1_panel_probe,
.remove = sx1_panel_remove,
.suspend = sx1_panel_suspend,
.resume = sx1_panel_resume,
.driver = {
.name = "lcd_sx1",
.owner = THIS_MODULE,
},
};
static int sx1_panel_drv_init(void)
{
return platform_driver_register(&sx1_panel_driver);
}
static void sx1_panel_drv_cleanup(void)
{
platform_driver_unregister(&sx1_panel_driver);
}
module_init(sx1_panel_drv_init);
module_exit(sx1_panel_drv_cleanup);
......@@ -63,12 +63,14 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx)
#endif /* CONFIG_HIGHMEM */
/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
#ifndef clear_user_highpage
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_atomic(page, KM_USER0);
clear_user_page(addr, vaddr, page);
kunmap_atomic(addr, KM_USER0);
}
#endif
#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment