Commit c4c4594b authored by Chris Zankel's avatar Chris Zankel

xtensa: clean up files to make them code-style compliant

Remove heading and trailing spaces, trim trailing lines, and wrap lines
that are longer than 80 characters.
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
parent 72100ed7
...@@ -106,4 +106,3 @@ zImage: vmlinux ...@@ -106,4 +106,3 @@ zImage: vmlinux
define archhelp define archhelp
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)' @echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
endef endef
...@@ -57,6 +57,3 @@ extern sysmem_info_t sysmem; ...@@ -57,6 +57,3 @@ extern sysmem_info_t sysmem;
#endif #endif
#endif #endif
...@@ -174,4 +174,3 @@ ...@@ -174,4 +174,3 @@
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
.endm .endm
...@@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt, ...@@ -104,7 +104,8 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page*); extern void flush_dcache_page(struct page*);
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long); extern void flush_cache_page(struct vm_area_struct*,
unsigned long, unsigned long);
#else #else
......
...@@ -36,7 +36,8 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum); ...@@ -36,7 +36,8 @@ asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
* better 64-bit) boundary * better 64-bit) boundary
*/ */
asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len, __wsum sum, asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr); int *src_err_ptr, int *dst_err_ptr);
/* /*
...@@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl) ...@@ -112,7 +113,8 @@ static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
/* Since the input registers which are loaded with iph and ihl /* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */ will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr) : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
"=&r" (endaddr)
: "1" (iph), "2" (ihl) : "1" (iph), "2" (ihl)
: "memory"); : "memory");
...@@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, ...@@ -238,11 +240,12 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
* Copy and checksum to user * Copy and checksum to user
*/ */
#define HAVE_CSUM_COPY_USER #define HAVE_CSUM_COPY_USER
static __inline__ __wsum csum_and_copy_to_user(const void *src, void __user *dst, static __inline__ __wsum csum_and_copy_to_user(const void *src,
int len, __wsum sum, int *err_ptr) void __user *dst, int len,
__wsum sum, int *err_ptr)
{ {
if (access_ok(VERIFY_WRITE, dst, len)) if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr); return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
if (len) if (len)
*err_ptr = -EFAULT; *err_ptr = -EFAULT;
......
...@@ -134,7 +134,8 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -134,7 +134,8 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#endif #endif
} }
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) #define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/* /*
* This only works if the compiler isn't horribly bad at optimizing. * This only works if the compiler isn't horribly bad at optimizing.
......
...@@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs) ...@@ -46,4 +46,3 @@ static __inline__ void udelay (unsigned long usecs)
} }
#endif #endif
...@@ -100,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, ...@@ -100,8 +100,8 @@ dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
} }
static inline void static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
enum dma_data_direction direction) size_t size, enum dma_data_direction direction)
{ {
consistent_sync((void *)bus_to_virt(dma_handle), size, direction); consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
} }
......
...@@ -14,4 +14,3 @@ ...@@ -14,4 +14,3 @@
extern void flush_cache_kmaps(void); extern void flush_cache_kmaps(void);
#endif #endif
...@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -2,7 +2,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
} }
static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) static inline int init_new_context(struct task_struct *tsk,struct mm_struct *mm)
{ {
return 0; return 0;
} }
......
...@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*); ...@@ -161,7 +161,9 @@ extern void copy_user_page(void*, void*, unsigned long, struct page*);
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr) #define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
#ifdef CONFIG_DISCONTIGMEM #ifdef CONFIG_DISCONTIGMEM
# error CONFIG_DISCONTIGMEM not supported # error CONFIG_DISCONTIGMEM not supported
#endif #endif
......
...@@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void); ...@@ -75,4 +75,3 @@ extern int platform_pcibios_fixup (void);
extern void platform_calibrate_ccount (void); extern void platform_calibrate_ccount (void);
#endif /* _XTENSA_PLATFORM_H */ #endif /* _XTENSA_PLATFORM_H */
...@@ -109,4 +109,3 @@ ...@@ -109,4 +109,3 @@
#define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */ #define DEBUGCAUSE_ICOUNT_BIT 0 /* ICOUNT would incr. to zero */
#endif /* _XTENSA_SPECREG_H */ #endif /* _XTENSA_SPECREG_H */
...@@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int, ...@@ -25,9 +25,10 @@ asmlinkage long xtensa_fadvise64_64(int, int,
/* Should probably move to linux/syscalls.h */ /* Should probably move to linux/syscalls.h */
struct pollfd; struct pollfd;
asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp, asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec __user *tsp, void __user *sig); fd_set __user *exp, struct timespec __user *tsp,
void __user *sig);
asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
struct timespec __user *tsp, const sigset_t __user *sigmask, struct timespec __user *tsp,
size_t sigsetsize); const sigset_t __user *sigmask,
asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
size_t sigsetsize); size_t sigsetsize);
asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
...@@ -180,7 +180,8 @@ ...@@ -180,7 +180,8 @@
#define segment_eq(a,b) ((a).seg == (b).seg) #define segment_eq(a,b) ((a).seg == (b).seg)
#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __user_ok(addr,size) \
(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
...@@ -291,7 +292,7 @@ do { \ ...@@ -291,7 +292,7 @@ do { \
* __check_align_* macros still work. * __check_align_* macros still work.
*/ */
#define __put_user_asm(x, addr, err, align, insn, cb) \ #define __put_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %2, %3, 0 \n" \ "1: "insn" %2, %3, 0 \n" \
"2: \n" \ "2: \n" \
...@@ -349,7 +350,7 @@ do { \ ...@@ -349,7 +350,7 @@ do { \
* __check_align_* macros still work. * __check_align_* macros still work.
*/ */
#define __get_user_asm(x, addr, err, align, insn, cb) \ #define __get_user_asm(x, addr, err, align, insn, cb) \
__asm__ __volatile__( \ __asm__ __volatile__( \
__check_align_##align \ __check_align_##align \
"1: "insn" %2, %3, 0 \n" \ "1: "insn" %2, %3, 0 \n" \
"2: \n" \ "2: \n" \
...@@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) ...@@ -421,8 +422,10 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n))
#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n))
#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) #define __copy_to_user(to,from,n) \
#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) __generic_copy_to_user_nocheck((to),(from),(n))
#define __copy_from_user(to,from,n) \
__generic_copy_from_user_nocheck((to),(from),(n))
#define __copy_to_user_inatomic __copy_to_user #define __copy_to_user_inatomic __copy_to_user
#define __copy_from_user_inatomic __copy_from_user #define __copy_from_user_inatomic __copy_from_user
......
...@@ -28,7 +28,7 @@ sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \ ...@@ -28,7 +28,7 @@ sed-y = -e 's/\*(\(\.[a-z]*it\|\.ref\|\)\.text)/*(\1.literal \1.text)/g' \
-e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g' -e 's/\*(\(\.text\.[a-z]*\))/*(\1.literal \1)/g'
quiet_cmd__cpp_lds_S = LDS $@ quiet_cmd__cpp_lds_S = LDS $@
cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \ cmd__cpp_lds_S = $(CPP) $(cpp_flags) -P -C -Uxtensa -D__ASSEMBLY__ $< \
| sed $(sed-y) >$@ | sed $(sed-y) >$@
$(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE $(obj)/vmlinux.lds: $(src)/vmlinux.lds.S FORCE
......
...@@ -453,4 +453,3 @@ ENTRY(fast_unaligned) ...@@ -453,4 +453,3 @@ ENTRY(fast_unaligned)
ENDPROC(fast_unaligned) ENDPROC(fast_unaligned)
#endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */
...@@ -92,7 +92,8 @@ int main(void) ...@@ -92,7 +92,8 @@ int main(void)
#endif #endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds)); DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, \
thread.current_ds));
/* struct mm_struct */ /* struct mm_struct */
DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users)); DEFINE(MM_USERS, offsetof(struct mm_struct, mm_users));
...@@ -109,4 +110,3 @@ int main(void) ...@@ -109,4 +110,3 @@ int main(void)
return 0; return 0;
} }
...@@ -354,4 +354,3 @@ ENTRY(coprocessor_owner) ...@@ -354,4 +354,3 @@ ENTRY(coprocessor_owner)
END(coprocessor_owner) END(coprocessor_owner)
#endif /* XTENSA_HAVE_COPROCESSORS */ #endif /* XTENSA_HAVE_COPROCESSORS */
...@@ -44,4 +44,3 @@ _F(void, calibrate_ccount, (void), ...@@ -44,4 +44,3 @@ _F(void, calibrate_ccount, (void),
ccount_per_jiffy = 10 * (1000000UL/HZ); ccount_per_jiffy = 10 * (1000000UL/HZ);
}); });
#endif #endif
...@@ -343,4 +343,3 @@ void do_syscall_trace_leave(struct pt_regs *regs) ...@@ -343,4 +343,3 @@ void do_syscall_trace_leave(struct pt_regs *regs)
&& (current->ptrace & PT_PTRACED)) && (current->ptrace & PT_PTRACED))
do_syscall_trace(); do_syscall_trace();
} }
...@@ -681,4 +681,3 @@ const struct seq_operations cpuinfo_op = ...@@ -681,4 +681,3 @@ const struct seq_operations cpuinfo_op =
}; };
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
...@@ -52,4 +52,3 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice, ...@@ -52,4 +52,3 @@ asmlinkage long xtensa_fadvise64_64(int fd, int advice,
{ {
return sys_fadvise64_64(fd, offset, len, advice); return sys_fadvise64_64(fd, offset, len, advice);
} }
...@@ -408,7 +408,8 @@ static inline void spill_registers(void) ...@@ -408,7 +408,8 @@ static inline void spill_registers(void)
"wsr a13, sar\n\t" "wsr a13, sar\n\t"
"wsr a14, ps\n\t" "wsr a14, ps\n\t"
:: "a" (&a0), "a" (&ps) :: "a" (&a0), "a" (&ps)
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory"); : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
"memory");
} }
void show_trace(struct task_struct *task, unsigned long *sp) void show_trace(struct task_struct *task, unsigned long *sp)
...@@ -534,5 +535,3 @@ void die(const char * str, struct pt_regs * regs, long err) ...@@ -534,5 +535,3 @@ void die(const char * str, struct pt_regs * regs, long err)
do_exit(err); do_exit(err);
} }
...@@ -485,5 +485,3 @@ ENTRY_ALIGN64(_WindowUnderflow12) ...@@ -485,5 +485,3 @@ ENTRY_ALIGN64(_WindowUnderflow12)
ENDPROC(_WindowUnderflow12) ENDPROC(_WindowUnderflow12)
.text .text
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
.text .text
ENTRY(csum_partial) ENTRY(csum_partial)
/* /*
* Experiments with Ethernet and SLIP connections show that buf * Experiments with Ethernet and SLIP connections show that buf
* is aligned on either a 2-byte or 4-byte boundary. * is aligned on either a 2-byte or 4-byte boundary.
...@@ -409,4 +410,3 @@ ENDPROC(csum_partial_copy_generic) ...@@ -409,4 +410,3 @@ ENDPROC(csum_partial_copy_generic)
retw retw
.previous .previous
...@@ -210,8 +210,10 @@ memcpy: ...@@ -210,8 +210,10 @@ memcpy:
_beqz a4, .Ldone # avoid loading anything for zero-length copies _beqz a4, .Ldone # avoid loading anything for zero-length copies
# copy 16 bytes per iteration for word-aligned dst and unaligned src # copy 16 bytes per iteration for word-aligned dst and unaligned src
ssa8 a3 # set shift amount from byte offset ssa8 a3 # set shift amount from byte offset
#define SIM_CHECKS_ALIGNMENT 1 /* set to 1 when running on ISS (simulator) with the
/* set to 1 when running on ISS (simulator) with the
lint or ferret client, or 0 to save a few cycles */ lint or ferret client, or 0 to save a few cycles */
#define SIM_CHECKS_ALIGNMENT 1
#if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT #if XCHAL_UNALIGNED_LOAD_EXCEPTION || SIM_CHECKS_ALIGNMENT
and a11, a3, a8 # save unalignment offset for below and a11, a3, a8 # save unalignment offset for below
sub a3, a3, a11 # align a3 sub a3, a3, a11 # align a3
......
...@@ -345,8 +345,3 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus) ...@@ -345,8 +345,3 @@ int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
} }
return sub_bus; return sub_bus;
} }
...@@ -145,4 +145,3 @@ __strnlen_user: ...@@ -145,4 +145,3 @@ __strnlen_user:
lenfixup: lenfixup:
movi a2, 0 movi a2, 0
retw retw
...@@ -318,4 +318,3 @@ l_fixup: ...@@ -318,4 +318,3 @@ l_fixup:
/* Ignore memset return value in a6. */ /* Ignore memset return value in a6. */
/* a2 still contains bytes not copied. */ /* a2 still contains bytes not copied. */
retw retw
...@@ -166,14 +166,14 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ...@@ -166,14 +166,14 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long vaddr = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
unsigned long paddr = (unsigned long) page_address(page); unsigned long paddr = (unsigned long) page_address(page);
unsigned long phys = page_to_phys(page); unsigned long phys = page_to_phys(page);
unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page(paddr); __flush_invalidate_dcache_page(paddr);
__flush_invalidate_dcache_page_alias(vaddr, phys); __flush_invalidate_dcache_page_alias(tmp, phys);
__invalidate_icache_page_alias(vaddr, phys); __invalidate_icache_page_alias(tmp, phys);
clear_bit(PG_arch_1, &page->flags); clear_bit(PG_arch_1, &page->flags);
} }
...@@ -205,8 +205,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -205,8 +205,8 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
/* Flush and invalidate user page if aliased. */ /* Flush and invalidate user page if aliased. */
if (alias) { if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys); __flush_invalidate_dcache_page_alias(t, phys);
} }
/* Copy data */ /* Copy data */
...@@ -219,12 +219,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -219,12 +219,11 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
*/ */
if (alias) { if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_range((unsigned long) dst, len); __flush_invalidate_dcache_range((unsigned long) dst, len);
if ((vma->vm_flags & VM_EXEC) != 0) { if ((vma->vm_flags & VM_EXEC) != 0)
__invalidate_icache_page_alias(temp, phys); __invalidate_icache_page_alias(t, phys);
}
} else if ((vma->vm_flags & VM_EXEC) != 0) { } else if ((vma->vm_flags & VM_EXEC) != 0) {
__flush_dcache_range((unsigned long)dst,len); __flush_dcache_range((unsigned long)dst,len);
...@@ -245,8 +244,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -245,8 +244,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
*/ */
if (alias) { if (alias) {
unsigned long temp = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK); unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
__flush_invalidate_dcache_page_alias(temp, phys); __flush_invalidate_dcache_page_alias(t, phys);
} }
memcpy(dst, src, len); memcpy(dst, src, len);
......
...@@ -254,4 +254,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) ...@@ -254,4 +254,3 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
die("Oops", regs, sig); die("Oops", regs, sig);
do_exit(sig); do_exit(sig);
} }
...@@ -75,16 +75,16 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) ...@@ -75,16 +75,16 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
sysmem.nr_banks++; sysmem.nr_banks++;
} }
sysmem.bank[i].end = start; sysmem.bank[i].end = start;
} else {
if (end < sysmem.bank[i].end) } else if (end < sysmem.bank[i].end) {
sysmem.bank[i].start = end; sysmem.bank[i].start = end;
else {
} else {
/* remove entry */ /* remove entry */
sysmem.nr_banks--; sysmem.nr_banks--;
sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start; sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end; sysmem.bank[i].end = sysmem.bank[sysmem.nr_banks].end;
} }
}
return -1; return -1;
} }
......
...@@ -140,4 +140,3 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) ...@@ -140,4 +140,3 @@ void flush_tlb_page (struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags); local_irq_restore(flags);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment