Commit 2fe9f798 authored by Linus Torvalds's avatar Linus Torvalds

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6

parents 76abf3e7 a131430c
...@@ -42,19 +42,15 @@ ...@@ -42,19 +42,15 @@
* executing (see inherit_locked_prom_mappings() rant). * executing (see inherit_locked_prom_mappings() rant).
*/ */
sparc64_vpte_nucleus: sparc64_vpte_nucleus:
/* Load 0xf0000000, which is LOW_OBP_ADDRESS. */ /* Note that kvmap below has verified that the address is
mov 0xf, %g5 * in the range MODULES_VADDR --> VMALLOC_END already. So
sllx %g5, 28, %g5 * here we need only check if it is an OBP address or not.
*/
/* Is addr >= LOW_OBP_ADDRESS? */ sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5 cmp %g4, %g5
blu,pn %xcc, sparc64_vpte_patchme1 blu,pn %xcc, sparc64_vpte_patchme1
mov 0x1, %g5 mov 0x1, %g5
/* Load 0x100000000, which is HI_OBP_ADDRESS. */
sllx %g5, 32, %g5 sllx %g5, 32, %g5
/* Is addr < HI_OBP_ADDRESS? */
cmp %g4, %g5 cmp %g4, %g5
blu,pn %xcc, obp_iaddr_patch blu,pn %xcc, obp_iaddr_patch
nop nop
...@@ -156,26 +152,29 @@ obp_daddr_patch: ...@@ -156,26 +152,29 @@ obp_daddr_patch:
* rather, use information saved during inherit_prom_mappings() using 8k * rather, use information saved during inherit_prom_mappings() using 8k
* pagesize. * pagesize.
*/ */
.align 32
kvmap: kvmap:
/* Load 0xf0000000, which is LOW_OBP_ADDRESS. */ sethi %hi(MODULES_VADDR), %g5
mov 0xf, %g5 cmp %g4, %g5
sllx %g5, 28, %g5 blu,pn %xcc, longpath
mov (VMALLOC_END >> 24), %g5
sllx %g5, 24, %g5
cmp %g4, %g5
bgeu,pn %xcc, longpath
nop
/* Is addr >= LOW_OBP_ADDRESS? */ kvmap_check_obp:
sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5 cmp %g4, %g5
blu,pn %xcc, vmalloc_addr blu,pn %xcc, kvmap_vmalloc_addr
mov 0x1, %g5 mov 0x1, %g5
/* Load 0x100000000, which is HI_OBP_ADDRESS. */
sllx %g5, 32, %g5 sllx %g5, 32, %g5
/* Is addr < HI_OBP_ADDRESS? */
cmp %g4, %g5 cmp %g4, %g5
blu,pn %xcc, obp_daddr_patch blu,pn %xcc, obp_daddr_patch
nop nop
vmalloc_addr: kvmap_vmalloc_addr:
/* If we get here, a vmalloc addr accessed, load kernel VPTE. */ /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
ldxa [%g3 + %g6] ASI_N, %g5 ldxa [%g3 + %g6] ASI_N, %g5
brgez,pn %g5, longpath brgez,pn %g5, longpath
nop nop
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <asm/psrcompat.h> #include <asm/psrcompat.h>
#include <asm/visasm.h> #include <asm/visasm.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/page.h>
/* Returning from ptrace is a bit tricky because the syscall return /* Returning from ptrace is a bit tricky because the syscall return
* low level code assumes any value returned which is negative and * low level code assumes any value returned which is negative and
...@@ -128,20 +129,20 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, ...@@ -128,20 +129,20 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
* is mapped to in the user's address space, we can skip the * is mapped to in the user's address space, we can skip the
* D-cache flush. * D-cache flush.
*/ */
if ((uaddr ^ kaddr) & (1UL << 13)) { if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
unsigned long start = __pa(kaddr); unsigned long start = __pa(kaddr);
unsigned long end = start + len; unsigned long end = start + len;
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
for (; start < end; start += 32) for (; start < end; start += 32)
spitfire_put_dcache_tag(va & 0x3fe0, 0x0); spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
} else { } else {
for (; start < end; start += 32) for (; start < end; start += 32)
__asm__ __volatile__( __asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t" "stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
: /* no outputs */ : /* no outputs */
: "r" (va), : "r" (start),
"i" (ASI_DCACHE_INVALIDATE)); "i" (ASI_DCACHE_INVALIDATE));
} }
} }
......
...@@ -17,7 +17,7 @@ kernel_unaligned_trap_fault: ...@@ -17,7 +17,7 @@ kernel_unaligned_trap_fault:
__do_int_store: __do_int_store:
rd %asi, %o4 rd %asi, %o4
wr %o3, 0, %asi wr %o3, 0, %asi
ldx [%o2], %g3 mov %o2, %g3
cmp %o1, 2 cmp %o1, 2
be,pn %icc, 2f be,pn %icc, 2f
cmp %o1, 4 cmp %o1, 4
......
...@@ -184,13 +184,14 @@ extern void do_int_load(unsigned long *dest_reg, int size, ...@@ -184,13 +184,14 @@ extern void do_int_load(unsigned long *dest_reg, int size,
unsigned long *saddr, int is_signed, int asi); unsigned long *saddr, int is_signed, int asi);
extern void __do_int_store(unsigned long *dst_addr, int size, extern void __do_int_store(unsigned long *dst_addr, int size,
unsigned long *src_val, int asi); unsigned long src_val, int asi);
static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
struct pt_regs *regs, int asi) struct pt_regs *regs, int asi, int orig_asi)
{ {
unsigned long zero = 0; unsigned long zero = 0;
unsigned long *src_val = &zero; unsigned long *src_val_p = &zero;
unsigned long src_val;
if (size == 16) { if (size == 16) {
size = 8; size = 8;
...@@ -198,7 +199,25 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, ...@@ -198,7 +199,25 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
(unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
(unsigned)fetch_reg(reg_num + 1, regs); (unsigned)fetch_reg(reg_num + 1, regs);
} else if (reg_num) { } else if (reg_num) {
src_val = fetch_reg_addr(reg_num, regs); src_val_p = fetch_reg_addr(reg_num, regs);
}
src_val = *src_val_p;
if (unlikely(asi != orig_asi)) {
switch (size) {
case 2:
src_val = swab16(src_val);
break;
case 4:
src_val = swab32(src_val);
break;
case 8:
src_val = swab64(src_val);
break;
case 16:
default:
BUG();
break;
};
} }
__do_int_store(dst_addr, size, src_val, asi); __do_int_store(dst_addr, size, src_val, asi);
} }
...@@ -276,6 +295,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u ...@@ -276,6 +295,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
kernel_mna_trap_fault(); kernel_mna_trap_fault();
} else { } else {
unsigned long addr; unsigned long addr;
int orig_asi, asi;
addr = compute_effective_address(regs, insn, addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f)); ((insn >> 25) & 0x1f));
...@@ -285,18 +305,48 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u ...@@ -285,18 +305,48 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
regs->tpc, dirstrings[dir], addr, size, regs->tpc, dirstrings[dir], addr, size,
regs->u_regs[UREG_RETPC]); regs->u_regs[UREG_RETPC]);
#endif #endif
orig_asi = asi = decode_asi(insn, regs);
switch (asi) {
case ASI_NL:
case ASI_AIUPL:
case ASI_AIUSL:
case ASI_PL:
case ASI_SL:
case ASI_PNFL:
case ASI_SNFL:
asi &= ~0x08;
break;
};
switch (dir) { switch (dir) {
case load: case load:
do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
size, (unsigned long *) addr, size, (unsigned long *) addr,
decode_signedness(insn), decode_signedness(insn), asi);
decode_asi(insn, regs)); if (unlikely(asi != orig_asi)) {
unsigned long val_in = *(unsigned long *) addr;
switch (size) {
case 2:
val_in = swab16(val_in);
break;
case 4:
val_in = swab32(val_in);
break;
case 8:
val_in = swab64(val_in);
break;
case 16:
default:
BUG();
break;
};
*(unsigned long *) addr = val_in;
}
break; break;
case store: case store:
do_int_store(((insn>>25)&0x1f), size, do_int_store(((insn>>25)&0x1f), size,
(unsigned long *) addr, regs, (unsigned long *) addr, regs,
decode_asi(insn, regs)); asi, orig_asi);
break; break;
default: default:
......
...@@ -174,7 +174,7 @@ int atyfb_xl_init(struct fb_info *info) ...@@ -174,7 +174,7 @@ int atyfb_xl_init(struct fb_info *info)
const struct xl_card_cfg_t * card = &card_cfg[xl_card]; const struct xl_card_cfg_t * card = &card_cfg[xl_card];
struct atyfb_par *par = (struct atyfb_par *) info->par; struct atyfb_par *par = (struct atyfb_par *) info->par;
union aty_pll pll; union aty_pll pll;
int i, err; int err;
u32 temp; u32 temp;
aty_st_8(CONFIG_STAT0, 0x85, par); aty_st_8(CONFIG_STAT0, 0x85, par);
...@@ -252,9 +252,12 @@ int atyfb_xl_init(struct fb_info *info) ...@@ -252,9 +252,12 @@ int atyfb_xl_init(struct fb_info *info)
aty_st_le32(0xEC, 0x00000000, par); aty_st_le32(0xEC, 0x00000000, par);
aty_st_le32(0xFC, 0x00000000, par); aty_st_le32(0xFC, 0x00000000, par);
#if defined (CONFIG_FB_ATY_GENERIC_LCD)
int i;
for (i=0; i<sizeof(lcd_tbl)/sizeof(lcd_tbl_t); i++) { for (i=0; i<sizeof(lcd_tbl)/sizeof(lcd_tbl_t); i++) {
aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par); aty_st_lcd(lcd_tbl[i].lcd_reg, lcd_tbl[i].val, par);
} }
#endif
aty_st_le16(CONFIG_STAT0, 0x00A4, par); aty_st_le16(CONFIG_STAT0, 0x00A4, par);
mdelay(10); mdelay(10);
......
...@@ -4,13 +4,6 @@ ...@@ -4,13 +4,6 @@
#include <linux/config.h> #include <linux/config.h>
#include <asm/page.h> #include <asm/page.h>
/* Flushing for D-cache alias handling is only needed if
* the page size is smaller than 16K.
*/
#if PAGE_SHIFT < 14
#define DCACHE_ALIASING_POSSIBLE
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/mm.h> #include <linux/mm.h>
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/page.h>
#ifndef MAX_HWIFS #ifndef MAX_HWIFS
# ifdef CONFIG_BLK_DEV_IDEPCI # ifdef CONFIG_BLK_DEV_IDEPCI
......
...@@ -21,6 +21,13 @@ ...@@ -21,6 +21,13 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
/* Flushing for D-cache alias handling is only needed if
* the page size is smaller than 16K.
*/
#if PAGE_SHIFT < 14
#define DCACHE_ALIASING_POSSIBLE
#endif
#ifdef __KERNEL__ #ifdef __KERNEL__
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm/cpudata.h> #include <asm/cpudata.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/page.h>
/* Page table allocation/freeing. */ /* Page table allocation/freeing. */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -24,21 +24,23 @@ ...@@ -24,21 +24,23 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/const.h> #include <asm/const.h>
/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 16MB). /* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB).
* The page copy blockops use 0x1000000 to 0x18000000 (16MB --> 24MB). * The page copy blockops can use 0x2000000 to 0x10000000.
* The PROM resides in an area spanning 0xf0000000 to 0x100000000. * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
* The vmalloc area spans 0x140000000 to 0x200000000. * The vmalloc area spans 0x100000000 to 0x200000000.
* Since modules need to be in the lowest 32-bits of the address space,
* we place them right before the OBP area from 0x10000000 to 0xf0000000.
* There is a single static kernel PMD which maps from 0x0 to address * There is a single static kernel PMD which maps from 0x0 to address
* 0x400000000. * 0x400000000.
*/ */
#define TLBTEMP_BASE _AC(0x0000000001000000,UL) #define TLBTEMP_BASE _AC(0x0000000002000000,UL)
#define MODULES_VADDR _AC(0x0000000002000000,UL) #define MODULES_VADDR _AC(0x0000000010000000,UL)
#define MODULES_LEN _AC(0x000000007e000000,UL) #define MODULES_LEN _AC(0x00000000e0000000,UL)
#define MODULES_END _AC(0x0000000080000000,UL) #define MODULES_END _AC(0x00000000f0000000,UL)
#define VMALLOC_START _AC(0x0000000140000000,UL)
#define VMALLOC_END _AC(0x0000000200000000,UL)
#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
#define VMALLOC_START _AC(0x0000000100000000,UL)
#define VMALLOC_END _AC(0x0000000200000000,UL)
/* XXX All of this needs to be rethought so we can take advantage /* XXX All of this needs to be rethought so we can take advantage
* XXX cheetah's full 64-bit virtual address space, ie. no more hole * XXX cheetah's full 64-bit virtual address space, ie. no more hole
......
...@@ -5,6 +5,10 @@ ...@@ -5,6 +5,10 @@
* linux/byteorder_generic.h * linux/byteorder_generic.h
* Generic Byte-reordering support * Generic Byte-reordering support
* *
* The "... p" macros, like le64_to_cpup, can be used with pointers
* to unaligned data, but there will be a performance penalty on
* some architectures. Use get_unaligned for unaligned data.
*
* Francois-Rene Rideau <fare@tunes.org> 19970707 * Francois-Rene Rideau <fare@tunes.org> 19970707
* gathered all the good ideas from all asm-foo/byteorder.h into one file, * gathered all the good ideas from all asm-foo/byteorder.h into one file,
* cleaned them up. * cleaned them up.
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment