Commit c4c4594b authored by Chris Zankel's avatar Chris Zankel

xtensa: clean up files to make them code-style compliant

Remove heading and trailing spaces, trim trailing lines, and wrap lines
that are longer than 80 characters.
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
parent 72100ed7
...@@ -106,4 +106,3 @@ zImage: vmlinux ...@@ -106,4 +106,3 @@ zImage: vmlinux
define archhelp define archhelp
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
endef endef
...@@ -32,15 +32,15 @@ ...@@ -32,15 +32,15 @@
/* All records are aligned to 4 bytes */ /* All records are aligned to 4 bytes */
typedef struct bp_tag { typedef struct bp_tag {
unsigned short id; /* tag id */ unsigned short id; /* tag id */
unsigned short size; /* size of this record excluding the structure*/ unsigned short size; /* size of this record excluding the structure*/
unsigned long data[0]; /* data */ unsigned long data[0]; /* data */
} bp_tag_t; } bp_tag_t;
typedef struct meminfo { typedef struct meminfo {
unsigned long type; unsigned long type;
unsigned long start; unsigned long start;
unsigned long end; unsigned long end;
} meminfo_t; } meminfo_t;
#define SYSMEM_BANKS_MAX 5 #define SYSMEM_BANKS_MAX 5
...@@ -49,14 +49,11 @@ typedef struct meminfo { ...@@ -49,14 +49,11 @@ typedef struct meminfo {
#define MEMORY_TYPE_NONE 0x2000 #define MEMORY_TYPE_NONE 0x2000
typedef struct sysmem_info { typedef struct sysmem_info {
int nr_banks; int nr_banks;
meminfo_t bank[SYSMEM_BANKS_MAX]; meminfo_t bank[SYSMEM_BANKS_MAX];
} sysmem_info_t; } sysmem_info_t;
extern sysmem_info_t sysmem; extern sysmem_info_t sysmem;
#endif #endif
#endif #endif
...@@ -174,4 +174,3 @@ ...@@ -174,4 +174,3 @@
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
.endm .endm
...@@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, ...@@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page*); extern void flush_dcache_page(struct page*);
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); extern void flush_cache_page(struct vm_area_struct*,
unsigned long, unsigned long);
#else #else
......
...@@ -36,8 +36,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); ...@@ -36,8 +36,9 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary * better 64-bit) boundary
*/ */
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int *src_err_ptr, int *dst_err_ptr); int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
/* /*
* Note: when you get a NULL pointer exception here this means someone * Note: when you get a NULL pointer exception here this means someone
...@@ -54,7 +55,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, ...@@ -54,7 +55,7 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst,
static inline static inline
__wsum csum_partial_copy_from_user(const void __user *src, void *dst, __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *err_ptr) int len, __wsum sum, int *err_ptr)
{ {
return csum_partial_copy_generic((__force const void *)src, dst, return csum_partial_copy_generic((__force const void *)src, dst,
len, sum, err_ptr, NULL); len, sum, err_ptr, NULL);
...@@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) ...@@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
/* Since the input registers which are loaded with iph and ihl /* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */ will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr) : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
"=&r" (endaddr)
: "1" (iph), "2" (ihl) : "1" (iph), "2" (ihl)
: "memory"); : "memory");
...@@ -168,7 +170,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, ...@@ -168,7 +170,7 @@ static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
static __inline__ __sum16 ip_compute_csum(const void *buff, int len) static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
{ {
return csum_fold (csum_partial(buff, len, 0)); return csum_fold (csum_partial(buff, len, 0));
} }
#define _HAVE_ARCH_IPV6_CSUM #define _HAVE_ARCH_IPV6_CSUM
...@@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ...@@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
* Copy and checksum to user * Copy and checksum to user
*/ */
#define HAVE_CSUM_COPY_USER #define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst, static __inline__ __wsum csum_and_copy_to_user(const void *src,
int len, __wsum sum, int *err_ptr) void __user *dst, int len,
__wsum sum, int *err_ptr)
{ {
if (access_ok(VERIFY_WRITE, dst, len)) if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
if (len) if (len)
*err_ptr = -EFAULT; *err_ptr = -EFAULT;
......
...@@ -134,7 +134,8 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -134,7 +134,8 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#endif #endif
} }
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) #define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/* /*
* This only works if the compiler isn't horribly bad at optimizing. * This only works if the compiler isn't horribly bad at optimizing.
......
...@@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void) ...@@ -30,7 +30,7 @@ static inline struct task_struct *get_current(void)
#define GET_CURRENT(reg,sp) \ #define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \ GET_THREAD_INFO(reg,sp); \
l32i reg, reg, TI_TASK \ l32i reg, reg, TI_TASK \
#endif #endif
......
...@@ -19,9 +19,9 @@ extern unsigned long loops_per_jiffy; ...@@ -19,9 +19,9 @@ extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops) static inline void __delay(unsigned long loops)
{ {
/* 2 cycles per loop. */ /* 2 cycles per loop. */
__asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b" __asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 2, 1b"
: "=r" (loops) : "0" (loops)); : "=r" (loops) : "0" (loops));
} }
static __inline__ u32 xtensa_get_ccount(void) static __inline__ u32 xtensa_get_ccount(void)
...@@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs) ...@@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs)
} }
#endif #endif
...@@ -100,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, ...@@ -100,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
} }
static inline void static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
enum dma_data_direction direction) size_t size, enum dma_data_direction direction)
{ {
consistent_sync((void *)bus_to_virt(dma_handle), size, direction); consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
} }
......
...@@ -168,11 +168,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *); ...@@ -168,11 +168,11 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
*/ */
#define ELF_PLAT_INIT(_r, load_addr) \ #define ELF_PLAT_INIT(_r, load_addr) \
do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \ do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
_r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \ _r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
_r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \ _r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
_r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \ _r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
} while (0) } while (0)
typedef struct { typedef struct {
xtregs_opt_t opt; xtregs_opt_t opt;
......
...@@ -14,4 +14,3 @@ ...@@ -14,4 +14,3 @@
extern void flush_cache_kmaps(void); extern void flush_cache_kmaps(void);
#endif #endif
...@@ -107,7 +107,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) ...@@ -107,7 +107,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
unsigned long asid = asid_cache; unsigned long asid = asid_cache;
......
...@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
{ {
return 0; return 0;
} }
......
...@@ -29,19 +29,19 @@ ...@@ -29,19 +29,19 @@
* PAGE_SHIFT determines the page size * PAGE_SHIFT determines the page size
*/ */
#define PAGE_SHIFT 12 #define PAGE_SHIFT 12
#define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1)) #define PAGE_MASK (~(PAGE_SIZE-1))
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#define MAX_MEM_PFN XCHAL_KSEG_SIZE #define MAX_MEM_PFN XCHAL_KSEG_SIZE
#else #else
#define PAGE_OFFSET 0 #define PAGE_OFFSET 0
#define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE) #define MAX_MEM_PFN (PLATFORM_DEFAULT_MEM_START + PLATFORM_DEFAULT_MEM_SIZE)
#endif #endif
#define PGTABLE_START 0x80000000 #define PGTABLE_START 0x80000000
/* /*
* Cache aliasing: * Cache aliasing:
...@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*); ...@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) #define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
# error CONFIG_DISCONTIGMEM not supported # error CONFIG_DISCONTIGMEM not supported
#endif #endif
......
...@@ -35,7 +35,7 @@ struct pci_space { ...@@ -35,7 +35,7 @@ struct pci_space {
struct pci_controller { struct pci_controller {
int index; /* used for pci_controller_num */ int index; /* used for pci_controller_num */
struct pci_controller *next; struct pci_controller *next;
struct pci_bus *bus; struct pci_bus *bus;
void *arch_data; void *arch_data;
int first_busno; int first_busno;
......
...@@ -53,7 +53,7 @@ struct pci_dev; ...@@ -53,7 +53,7 @@ struct pci_dev;
/* Map a range of PCI memory or I/O space for a device into user space */ /* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma, int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine); enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */ /* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1 #define HAVE_PCI_MMAP 1
......
...@@ -42,7 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) ...@@ -42,7 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
extern struct kmem_cache *pgtable_cache; extern struct kmem_cache *pgtable_cache;
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address) unsigned long address)
{ {
return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT); return kmem_cache_alloc(pgtable_cache, GFP_KERNEL|__GFP_REPEAT);
......
...@@ -284,7 +284,7 @@ struct vm_area_struct; ...@@ -284,7 +284,7 @@ struct vm_area_struct;
static inline int static inline int
ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep) pte_t *ptep)
{ {
pte_t pte = *ptep; pte_t pte = *ptep;
if (!pte_young(pte)) if (!pte_young(pte))
...@@ -304,8 +304,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) ...@@ -304,8 +304,8 @@ ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
static inline void static inline void
ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
pte_t pte = *ptep; pte_t pte = *ptep;
update_pte(ptep, pte_wrprotect(pte)); update_pte(ptep, pte_wrprotect(pte));
} }
/* to find an entry in a kernel page-table-directory */ /* to find an entry in a kernel page-table-directory */
...@@ -399,7 +399,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma, ...@@ -399,7 +399,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
*/ */
#define io_remap_pfn_range(vma,from,pfn,size,prot) \ #define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot) remap_pfn_range(vma, from, pfn, size, prot)
typedef pte_t *pte_addr_t; typedef pte_t *pte_addr_t;
......
...@@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void); ...@@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void);
extern void platform_calibrate_ccount (void); extern void platform_calibrate_ccount (void);
#endif /* _XTENSA_PLATFORM_H */ #endif /* _XTENSA_PLATFORM_H */
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
#define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000)) #define MAKE_PC_FROM_RA(ra,sp) (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
typedef struct { typedef struct {
unsigned long seg; unsigned long seg;
} mm_segment_t; } mm_segment_t;
struct thread_struct { struct thread_struct {
...@@ -145,10 +145,10 @@ struct thread_struct { ...@@ -145,10 +145,10 @@ struct thread_struct {
* set_thread_state in signal.c depends on it. * set_thread_state in signal.c depends on it.
*/ */
#define USER_PS_VALUE ((1 << PS_WOE_BIT) | \ #define USER_PS_VALUE ((1 << PS_WOE_BIT) | \
(1 << PS_CALLINC_SHIFT) | \ (1 << PS_CALLINC_SHIFT) | \
(USER_RING << PS_RING_SHIFT) | \ (USER_RING << PS_RING_SHIFT) | \
(1 << PS_UM_BIT) | \ (1 << PS_UM_BIT) | \
(1 << PS_EXCM_BIT)) (1 << PS_EXCM_BIT))
/* Clearing a0 terminates the backtrace. */ /* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \ #define start_thread(regs, new_pc, new_sp) \
......
...@@ -55,7 +55,7 @@ struct pt_regs { ...@@ -55,7 +55,7 @@ struct pt_regs {
# define arch_has_single_step() (1) # define arch_has_single_step() (1)
# define task_pt_regs(tsk) ((struct pt_regs*) \ # define task_pt_regs(tsk) ((struct pt_regs*) \
(task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0) # define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
# define instruction_pointer(regs) ((regs)->pc) # define instruction_pointer(regs) ((regs)->pc)
......
...@@ -109,4 +109,3 @@ ...@@ -109,4 +109,3 @@
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */ #define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */ #endif /* _XTENSA_SPECREG_H */
...@@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int, ...@@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int,
/* Should probably move to linux/syscalls.h */ /* Should probably move to linux/syscalls.h */
struct pollfd; struct pollfd;
asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec __user *tsp, void __user *sig); fd_set __user *exp, struct timespec __user *tsp,
void __user *sig);
asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
struct timespec __user *tsp, const sigset_t __user *sigmask, struct timespec __user *tsp,
size_t sigsetsize); const sigset_t __user *sigmask,
asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
size_t sigsetsize); asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
...@@ -180,7 +180,8 @@ ...@@ -180,7 +180,8 @@
#define segment_eq(a,b) ((a).seg == (b).seg) #define segment_eq(a,b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __user_ok(addr,size) \
(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
...@@ -234,10 +235,10 @@ do { \ ...@@ -234,10 +235,10 @@ do { \
int __cb; \ int __cb; \
retval = 0; \ retval = 0; \
switch (size) { \ switch (size) { \
case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \
case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \
case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \
case 8: { \ case 8: { \
__typeof__(*ptr) __v64 = x; \ __typeof__(*ptr) __v64 = x; \
retval = __copy_to_user(ptr,&__v64,8); \ retval = __copy_to_user(ptr,&__v64,8); \
break; \ break; \
...@@ -291,7 +292,7 @@ do { \ ...@@ -291,7 +292,7 @@ do { \
* __check_align_* macros still work. * __check_align_* macros still work.
*/ */
#define __put_user_asm(x, addr, err, align, insn, cb) \ #define __put_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %2, %3, 0 \n" \ "1: "insn" %2, %3, 0 \n" \
"2: \n" \ "2: \n" \
...@@ -301,8 +302,8 @@ do { \ ...@@ -301,8 +302,8 @@ do { \
" .long 2b \n" \ " .long 2b \n" \
"5: \n" \ "5: \n" \
" l32r %1, 4b \n" \ " l32r %1, 4b \n" \
" movi %0, %4 \n" \ " movi %0, %4 \n" \
" jx %1 \n" \ " jx %1 \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \ " .long 1b, 5b \n" \
...@@ -334,13 +335,13 @@ extern long __get_user_bad(void); ...@@ -334,13 +335,13 @@ extern long __get_user_bad(void);
do { \ do { \
int __cb; \ int __cb; \
retval = 0; \ retval = 0; \
switch (size) { \ switch (size) { \
case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \
case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \
case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \
case 8: retval = __copy_from_user(&x,ptr,8); break; \ case 8: retval = __copy_from_user(&x,ptr,8); break; \
default: (x) = __get_user_bad(); \ default: (x) = __get_user_bad(); \
} \ } \
} while (0) } while (0)
...@@ -349,7 +350,7 @@ do { \ ...@@ -349,7 +350,7 @@ do { \
* __check_align_* macros still work. * __check_align_* macros still work.
*/ */
#define __get_user_asm(x, addr, err, align, insn, cb) \ #define __get_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %2, %3, 0 \n" \ "1: "insn" %2, %3, 0 \n" \
"2: \n" \ "2: \n" \
...@@ -360,8 +361,8 @@ do { \ ...@@ -360,8 +361,8 @@ do { \
"5: \n" \ "5: \n" \
" l32r %1, 4b \n" \ " l32r %1, 4b \n" \
" movi %2, 0 \n" \ " movi %2, 0 \n" \
" movi %0, %4 \n" \ " movi %0, %4 \n" \
" jx %1 \n" \ " jx %1 \n" \
" .previous \n" \ " .previous \n" \
" .section __ex_table,\"a\" \n" \ " .section __ex_table,\"a\" \n" \
" .long 1b, 5b \n" \ " .long 1b, 5b \n" \
...@@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) ...@@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) #define __copy_to_user(to,from,n) \
#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) __generic_copy_to_user_nocheck((to),(from),(n))
#define __copy_from_user(to,from,n) \
__generic_copy_from_user_nocheck((to),(from),(n))
#define __copy_to_user_inatomic __copy_to_user #define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user #define __copy_from_user_inatomic __copy_from_user
......
...@@ -23,13 +23,13 @@ obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o ...@@ -23,13 +23,13 @@ obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
# #
# Replicate rules in scripts/Makefile.build # Replicate rules in scripts/Makefile.build
sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \ sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
-e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \ -e 's/\.text\.unlikely/.literal.unlikely .text.unlikely/g' \
-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g' -e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
quiet_cmd__cpp_lds_S = LDS $@ quiet_cmd__cpp_lds_S = LDS $@
cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \ cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
| sed $(sed-y) >$@ | sed $(sed-y) >$@
$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE $(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
$(call if_changed_dep,_cpp_lds_S) $(call if_changed_dep,_cpp_lds_S)
...@@ -442,7 +442,7 @@ ENTRY(fast_unaligned) ...@@ -442,7 +442,7 @@ ENTRY(fast_unaligned)
mov a1, a2 mov a1, a2
rsr a0, ps rsr a0, ps
bbsi.l a2, PS_UM_BIT, 1f # jump if user mode bbsi.l a2, PS_UM_BIT, 1f # jump if user mode
movi a0, _kernel_exception movi a0, _kernel_exception
jx a0 jx a0
...@@ -453,4 +453,3 @@ ENTRY(fast_unaligned) ...@@ -453,4 +453,3 @@ ENTRY(fast_unaligned)
ENDPROC(fast_unaligned) ENDPROC(fast_unaligned)
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
...@@ -92,7 +92,8 @@ int main(void) ...@@ -92,7 +92,8 @@ int main(void)
#endif #endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
thread.current_ds));
/* struct mm_struct */ /* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
...@@ -109,4 +110,3 @@ int main(void) ...@@ -109,4 +110,3 @@ int main(void)
return 0; return 0;
} }
...@@ -172,7 +172,7 @@ ENTRY(coprocessor_load) ...@@ -172,7 +172,7 @@ ENTRY(coprocessor_load)
ENDPROC(coprocessor_load) ENDPROC(coprocessor_load)
/* /*
* coprocessor_flush(struct task_info*, index) * coprocessor_flush(struct task_info*, index)
* a2 a3 * a2 a3
* coprocessor_restore(struct task_info*, index) * coprocessor_restore(struct task_info*, index)
* a2 a3 * a2 a3
...@@ -354,4 +354,3 @@ ENTRY(coprocessor_owner) ...@@ -354,4 +354,3 @@ ENTRY(coprocessor_owner)
END(coprocessor_owner) END(coprocessor_owner)
#endif /* XTENSA_HAVE_COPROCESSORS */ #endif /* XTENSA_HAVE_COPROCESSORS */
...@@ -874,7 +874,7 @@ ENTRY(fast_alloca) ...@@ -874,7 +874,7 @@ ENTRY(fast_alloca)
_bnei a0, 1, 1f # no 'movsp a1, ax': jump _bnei a0, 1, 1f # no 'movsp a1, ax': jump
/* Move the save area. This implies the use of the L32E /* Move the save area. This implies the use of the L32E
* and S32E instructions, because this move must be done with * and S32E instructions, because this move must be done with
* the user's PS.RING privilege levels, not with ring 0 * the user's PS.RING privilege levels, not with ring 0
* (kernel's) privileges currently active with PS.EXCM * (kernel's) privileges currently active with PS.EXCM
...@@ -1008,15 +1008,15 @@ ENDPROC(fast_syscall_user) ...@@ -1008,15 +1008,15 @@ ENDPROC(fast_syscall_user)
ENTRY(fast_syscall_unrecoverable) ENTRY(fast_syscall_unrecoverable)
/* Restore all states. */ /* Restore all states. */
l32i a0, a2, PT_AREG0 # restore a0 l32i a0, a2, PT_AREG0 # restore a0
xsr a2, depc # restore a2, depc xsr a2, depc # restore a2, depc
rsr a3, excsave1 rsr a3, excsave1
wsr a0, excsave1 wsr a0, excsave1
movi a0, unrecoverable_exception movi a0, unrecoverable_exception
callx0 a0 callx0 a0
ENDPROC(fast_syscall_unrecoverable) ENDPROC(fast_syscall_unrecoverable)
...@@ -1253,9 +1253,9 @@ fast_syscall_spill_registers_fixup: ...@@ -1253,9 +1253,9 @@ fast_syscall_spill_registers_fixup:
movi a3, exc_table movi a3, exc_table
rsr a0, exccause rsr a0, exccause
addx4 a0, a0, a3 # find entry in table addx4 a0, a0, a3 # find entry in table
l32i a0, a0, EXC_TABLE_FAST_USER # load handler l32i a0, a0, EXC_TABLE_FAST_USER # load handler
jx a0 jx a0
fast_syscall_spill_registers_fixup_return: fast_syscall_spill_registers_fixup_return:
...@@ -1457,7 +1457,7 @@ ENTRY(_spill_registers) ...@@ -1457,7 +1457,7 @@ ENTRY(_spill_registers)
rsr a0, ps rsr a0, ps
_bbci.l a0, PS_UM_BIT, 1f _bbci.l a0, PS_UM_BIT, 1f
/* User space: Setup a dummy frame and kill application. /* User space: Setup a dummy frame and kill application.
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
*/ */
......
...@@ -53,7 +53,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, ...@@ -53,7 +53,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
struct module *mod) struct module *mod)
{ {
unsigned int i; unsigned int i;
Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr; Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym; Elf32_Sym *sym;
unsigned char *location; unsigned char *location;
uint32_t value; uint32_t value;
......
...@@ -44,4 +44,3 @@ _F(void, calibrate_ccount, (void), ...@@ -44,4 +44,3 @@ _F(void, calibrate_ccount, (void),
ccount_per_jiffy = 10 * (1000000UL/HZ); ccount_per_jiffy = 10 * (1000000UL/HZ);
}); });
#endif #endif
...@@ -108,7 +108,7 @@ void coprocessor_flush_all(struct thread_info *ti) ...@@ -108,7 +108,7 @@ void coprocessor_flush_all(struct thread_info *ti)
void cpu_idle(void) void cpu_idle(void)
{ {
local_irq_enable(); local_irq_enable();
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
......
...@@ -154,7 +154,7 @@ int ptrace_setxregs(struct task_struct *child, void __user *uregs) ...@@ -154,7 +154,7 @@ int ptrace_setxregs(struct task_struct *child, void __user *uregs)
coprocessor_flush_all(ti); coprocessor_flush_all(ti);
coprocessor_release_all(ti); coprocessor_release_all(ti);
ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
sizeof(xtregs_coprocessor_t)); sizeof(xtregs_coprocessor_t));
#endif #endif
ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt, ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
...@@ -343,4 +343,3 @@ void do_syscall_trace_leave(struct pt_regs *regs) ...@@ -343,4 +343,3 @@ void do_syscall_trace_leave(struct pt_regs *regs)
&& (current->ptrace & PT_PTRACED)) && (current->ptrace & PT_PTRACED))
do_syscall_trace(); do_syscall_trace();
} }
...@@ -284,7 +284,7 @@ void __init init_arch(bp_tag_t *bp_start) ...@@ -284,7 +284,7 @@ void __init init_arch(bp_tag_t *bp_start)
/* Parse boot parameters */ /* Parse boot parameters */
if (bp_start) if (bp_start)
parse_bootparam(bp_start); parse_bootparam(bp_start);
#ifdef CONFIG_OF #ifdef CONFIG_OF
...@@ -460,7 +460,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -460,7 +460,7 @@ void __init setup_arch(char **cmdline_p)
initrd_is_mapped = mem_reserve(__pa(initrd_start), initrd_is_mapped = mem_reserve(__pa(initrd_start),
__pa(initrd_end), 0); __pa(initrd_end), 0);
initrd_below_start_ok = 1; initrd_below_start_ok = 1;
} else { } else {
initrd_start = 0; initrd_start = 0;
} }
#endif #endif
...@@ -539,7 +539,7 @@ c_show(struct seq_file *f, void *slot) ...@@ -539,7 +539,7 @@ c_show(struct seq_file *f, void *slot)
"core ID\t\t: " XCHAL_CORE_ID "\n" "core ID\t\t: " XCHAL_CORE_ID "\n"
"build ID\t: 0x%x\n" "build ID\t: 0x%x\n"
"byte order\t: %s\n" "byte order\t: %s\n"
"cpu MHz\t\t: %lu.%02lu\n" "cpu MHz\t\t: %lu.%02lu\n"
"bogomips\t: %lu.%02lu\n", "bogomips\t: %lu.%02lu\n",
XCHAL_BUILD_UNIQUE_ID, XCHAL_BUILD_UNIQUE_ID,
XCHAL_HAVE_BE ? "big" : "little", XCHAL_HAVE_BE ? "big" : "little",
...@@ -681,4 +681,3 @@ const struct seq_operations cpuinfo_op = ...@@ -681,4 +681,3 @@ const struct seq_operations cpuinfo_op =
}; };
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
...@@ -212,7 +212,7 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame) ...@@ -212,7 +212,7 @@ restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
if (err) if (err)
return err; return err;
/* The signal handler may have used coprocessors in which /* The signal handler may have used coprocessors in which
* case they are still enabled. We disable them to force a * case they are still enabled. We disable them to force a
* reloading of the original task's CP state by the lazy * reloading of the original task's CP state by the lazy
* context-switching mechanisms of CP exception handling. * context-switching mechanisms of CP exception handling.
...@@ -396,7 +396,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -396,7 +396,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
*/ */
/* Set up registers for signal handler */ /* Set up registers for signal handler */
start_thread(regs, (unsigned long) ka->sa.sa_handler, start_thread(regs, (unsigned long) ka->sa.sa_handler,
(unsigned long) frame); (unsigned long) frame);
/* Set up a stack frame for a call4 /* Set up a stack frame for a call4
...@@ -424,9 +424,9 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info, ...@@ -424,9 +424,9 @@ static int setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
return -EFAULT; return -EFAULT;
} }
asmlinkage long xtensa_sigaltstack(const stack_t __user *uss, asmlinkage long xtensa_sigaltstack(const stack_t __user *uss,
stack_t __user *uoss, stack_t __user *uoss,
long a2, long a3, long a4, long a5, long a2, long a3, long a4, long a5,
struct pt_regs *regs) struct pt_regs *regs)
{ {
return do_sigaltstack(uss, uoss, regs->areg[1]); return do_sigaltstack(uss, uoss, regs->areg[1]);
......
...@@ -52,4 +52,3 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, ...@@ -52,4 +52,3 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
{ {
return sys_fadvise64_64(fd, offset, len, advice); return sys_fadvise64_64(fd, offset, len, advice);
} }
...@@ -408,7 +408,8 @@ static inline void spill_registers(void) ...@@ -408,7 +408,8 @@ static inline void spill_registers(void)
"wsr a13, sar\n\t" "wsr a13, sar\n\t"
"wsr a14, ps\n\t" "wsr a14, ps\n\t"
:: "a" (&a0), "a" (&ps) :: "a" (&a0), "a" (&ps)
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
"memory");
} }
void show_trace(struct task_struct *task, unsigned long *sp) void show_trace(struct task_struct *task, unsigned long *sp)
...@@ -463,7 +464,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) ...@@ -463,7 +464,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
if (!sp) if (!sp)
sp = stack_pointer(task); sp = stack_pointer(task);
stack = sp; stack = sp;
printk("\nStack: "); printk("\nStack: ");
...@@ -534,5 +535,3 @@ void die(const char * str, struct pt_regs * regs, long err) ...@@ -534,5 +535,3 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(err); do_exit(err);
} }
...@@ -485,5 +485,3 @@ ENTRY_ALIGN64(_WindowUnderflow12) ...@@ -485,5 +485,3 @@ ENTRY_ALIGN64(_WindowUnderflow12)
ENDPROC(_WindowUnderflow12) ENDPROC(_WindowUnderflow12)
.text .text
...@@ -41,10 +41,11 @@ ...@@ -41,10 +41,11 @@
.text .text
ENTRY(csum_partial) ENTRY(csum_partial)
/*
* Experiments with Ethernet and SLIP connections show that buf /*
* is aligned on either a 2-byte or 4-byte boundary. * Experiments with Ethernet and SLIP connections show that buf
*/ * is aligned on either a 2-byte or 4-byte boundary.
*/
entry sp, 32 entry sp, 32
extui a5, a2, 0, 2 extui a5, a2, 0, 2
bnez a5, 8f /* branch if 2-byte aligned */ bnez a5, 8f /* branch if 2-byte aligned */
...@@ -409,4 +410,3 @@ ENDPROC(csum_partial_copy_generic) ...@@ -409,4 +410,3 @@ ENDPROC(csum_partial_copy_generic)
retw retw
.previous .previous
...@@ -210,8 +210,10 @@ memcpy: ...@@ -210,8 +210,10 @@ memcpy:
_beqz a4, .Ldone # avoid loading anything for zero-length copies _beqz a4, .Ldone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src # copy 16 bytes per iteration for word-aligned dst and unaligned src
ssa8 a3 # set shift amount from byte offset ssa8 a3 # set shift amount from byte offset
#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the
lint or ferret client, or 0 to save a few cycles */ /* set to 1 when running on ISS (simulator) with the
lint or ferret client, or 0 to save a few cycles */
#define SIM_CHECKS_ALIGNMENT 1
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
and a11, a3, a8 # save unalignment offset for below and a11, a3, a8 # save unalignment offset for below
sub a3, a3, a11 # align a3 sub a3, a3, a11 # align a3
......
...@@ -241,8 +241,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) ...@@ -241,8 +241,8 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
unsigned char header_type; unsigned char header_type;
struct pci_dev *dev = &pciauto_dev; struct pci_dev *dev = &pciauto_dev;
pciauto_dev.bus = &pciauto_bus; pciauto_dev.bus = &pciauto_bus;
pciauto_dev.sysdata = pci_ctrl; pciauto_dev.sysdata = pci_ctrl;
pciauto_bus.ops = pci_ctrl->ops; pciauto_bus.ops = pci_ctrl->ops;
/* /*
...@@ -345,8 +345,3 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) ...@@ -345,8 +345,3 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
} }
return sub_bus; return sub_bus;
} }
...@@ -166,7 +166,7 @@ __strncpy_user: ...@@ -166,7 +166,7 @@ __strncpy_user:
retw retw
.Lz1: # byte 1 is zero .Lz1: # byte 1 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */ #endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s) EX(s16i, a9, a11, 0, fixup_s)
addi a11, a11, 1 # advance dst pointer addi a11, a11, 1 # advance dst pointer
...@@ -174,7 +174,7 @@ __strncpy_user: ...@@ -174,7 +174,7 @@ __strncpy_user:
retw retw
.Lz2: # byte 2 is zero .Lz2: # byte 2 is zero
#ifdef __XTENSA_EB__ #ifdef __XTENSA_EB__
extui a9, a9, 16, 16 extui a9, a9, 16, 16
#endif /* __XTENSA_EB__ */ #endif /* __XTENSA_EB__ */
EX(s16i, a9, a11, 0, fixup_s) EX(s16i, a9, a11, 0, fixup_s)
movi a9, 0 movi a9, 0
......
...@@ -145,4 +145,3 @@ __strnlen_user: ...@@ -145,4 +145,3 @@ __strnlen_user:
lenfixup: lenfixup:
movi a2, 0 movi a2, 0
retw retw
...@@ -318,4 +318,3 @@ l_fixup: ...@@ -318,4 +318,3 @@ l_fixup:
/* Ignore memset return value in a6. */ /* Ignore memset return value in a6. */
/* a2 still contains bytes not copied. */ /* a2 still contains bytes not copied. */
retw retw
...@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page) ...@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page)
* For now, flush the whole cache. FIXME?? * For now, flush the whole cache. FIXME??
*/ */
void flush_cache_range(struct vm_area_struct* vma, void flush_cache_range(struct vm_area_struct* vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
__flush_invalidate_dcache_all(); __flush_invalidate_dcache_all();
...@@ -133,7 +133,7 @@ void flush_cache_range(struct vm_area_struct* vma, ...@@ -133,7 +133,7 @@ void flush_cache_range(struct vm_area_struct* vma,
*/ */
void flush_cache_page(struct vm_area_struct* vma, unsigned long address, void flush_cache_page(struct vm_area_struct* vma, unsigned long address,
unsigned long pfn) unsigned long pfn)
{ {
/* Note that we have to use the 'alias' address to avoid multi-hit */ /* Note that we have to use the 'alias' address to avoid multi-hit */
...@@ -166,14 +166,14 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ...@@ -166,14 +166,14 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
unsigned long paddr = (unsigned long) page_address(page); unsigned long paddr = (unsigned long) page_address(page);
unsigned long phys = page_to_phys(page); unsigned long phys = page_to_phys(page);
unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page(paddr); __flush_invalidate_dcache_page(paddr);
__flush_invalidate_dcache_page_alias(vaddr, phys); __flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(vaddr, phys); __invalidate_icache_page_alias(tmp, phys);
clear_bit(PG_arch_1, &page->flags); clear_bit(PG_arch_1, &page->flags);
} }
...@@ -195,7 +195,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ...@@ -195,7 +195,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src, unsigned long vaddr, void *dst, const void *src,
unsigned long len) unsigned long len)
{ {
...@@ -205,8 +205,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -205,8 +205,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
/* Flush and invalidate user page if aliased. */ /* Flush and invalidate user page if aliased. */
if (alias) { if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys); __flush_invalidate_dcache_page_alias(t, phys);
} }
/* Copy data */ /* Copy data */
...@@ -219,12 +219,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -219,12 +219,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
*/ */
if (alias) { if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_range((unsigned long) dst, len); __flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0) { if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_alias(temp, phys); __invalidate_icache_page_alias(t, phys);
}
} else if ((vma->vm_flags & VM_EXEC) != 0) { } else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len); __flush_dcache_range((unsigned long)dst,len);
...@@ -245,8 +244,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -245,8 +244,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
*/ */
if (alias) { if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys); __flush_invalidate_dcache_page_alias(t, phys);
} }
memcpy(dst, src, len); memcpy(dst, src, len);
......
...@@ -254,4 +254,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) ...@@ -254,4 +254,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
die("Oops", regs, sig); die("Oops", regs, sig);
do_exit(sig); do_exit(sig);
} }
...@@ -75,15 +75,15 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) ...@@ -75,15 +75,15 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
sysmem.nr_banks++; sysmem.nr_banks++;
} }
sysmem.bank[i].end = start; sysmem.bank[i].end = start;
} else if (end < sysmem.bank[i].end) {
sysmem.bank[i].start = end;
} else { } else {
if (end < sysmem.bank[i].end) /* remove entry */
sysmem.bank[i].start = end; sysmem.nr_banks--;
else { sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
/* remove entry */ sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
sysmem.nr_banks--;
sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
}
} }
return -1; return -1;
} }
......
...@@ -180,7 +180,7 @@ ENDPROC(clear_user_page) ...@@ -180,7 +180,7 @@ ENDPROC(clear_user_page)
ENTRY(copy_user_page) ENTRY(copy_user_page)
entry a1, 32 entry a1, 32
/* Mark page dirty and determine alias for destination. */ /* Mark page dirty and determine alias for destination. */
......
...@@ -82,7 +82,7 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -82,7 +82,7 @@ void flush_tlb_mm(struct mm_struct *mm)
#endif #endif
void flush_tlb_range (struct vm_area_struct *vma, void flush_tlb_range (struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long flags; unsigned long flags;
...@@ -100,7 +100,7 @@ void flush_tlb_range (struct vm_area_struct *vma, ...@@ -100,7 +100,7 @@ void flush_tlb_range (struct vm_area_struct *vma,
int oldpid = get_rasid_register(); int oldpid = get_rasid_register();
set_rasid_register (ASID_INSERT(mm->context)); set_rasid_register (ASID_INSERT(mm->context));
start &= PAGE_MASK; start &= PAGE_MASK;
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
while(start < end) { while(start < end) {
invalidate_itlb_mapping(start); invalidate_itlb_mapping(start);
invalidate_dtlb_mapping(start); invalidate_dtlb_mapping(start);
...@@ -130,7 +130,7 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) ...@@ -130,7 +130,7 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
local_save_flags(flags); local_save_flags(flags);
oldpid = get_rasid_register(); oldpid = get_rasid_register();
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
invalidate_itlb_mapping(page); invalidate_itlb_mapping(page);
...@@ -140,4 +140,3 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) ...@@ -140,4 +140,3 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags); local_irq_restore(flags);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment