Commit ffa818b4 authored by Chen Liqin's avatar Chen Liqin

score: update inconsistent declare after .c was changed

parent d8aa899b
......@@ -20,4 +20,6 @@
#define IRQ_TIMER (7) /* Timer IRQ number of SPCT6600 */
extern void interrupt_exception_vector(void);
#endif /* _ASM_SCORE_IRQ_H */
#ifndef _ASM_SCORE_IRQ_REGS_H
#define _ASM_SCORE_IRQ_REGS_H
#include <asm-generic/irq_regs.h>
#include <linux/thread_info.h>
static inline struct pt_regs *get_irq_regs(void)
{
return current_thread_info()->regs;
}
#endif /* _ASM_SCORE_IRQ_REGS_H */
#ifndef _ASM_SCORE_LINKAGE_H
#define _ASM_SCORE_LINKAGE_H
#define __ALIGN .align 2
#define __ALIGN_STR ".align 2"
#endif /* _ASM_SCORE_LINKAGE_H */
......@@ -57,8 +57,8 @@ extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn;
extern unsigned long max_pfn;
#define __pa(vaddr) ((unsigned long) (vaddr))
#define __va(paddr) ((void *) (paddr))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define phys_to_pfn(phys) (PFN_DOWN(phys))
#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
......
......@@ -72,7 +72,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_pages(pte, PTE_ORDER);
}
#define __pte_free_tlb(tlb, pte) \
#define __pte_free_tlb(tlb, pte, buf) \
do { \
pgtable_page_dtor(pte); \
tlb_remove_page((tlb), pte); \
......
......@@ -106,7 +106,8 @@ static inline void pmd_clear(pmd_t *pmdp)
((swp_entry_t) { pte_val(pte)})
#define __swp_entry_to_pte(x) ((pte_t) {(x).val})
#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
#define pmd_phys(pmd) __pa((void *)pmd_val(pmd))
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
......@@ -129,14 +130,11 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define pgd_clear(pgdp) do { } while (0)
#define kern_addr_valid(addr) (1)
#define pmd_offset(a, b) ((void *) 0)
#define pmd_page_vaddr(pmd) pmd_val(pmd)
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
#define pud_offset(pgd, address) ((pud_t *) pgd)
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_CACHE)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_CACHE)
......@@ -165,15 +163,27 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
#define pgprot_noncached(x) (x)
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
unsigned long prot = pgprot_val(_prot);
prot = (prot & ~_CACHE_MASK);
return __pgprot(prot);
}
#define __swp_type(x) (0)
#define __swp_offset(x) (0)
#define __swp_entry(typ, off) ((swp_entry_t) { ((typ) | ((off) << 7)) })
#define __swp_type(x) ((x).val & 0x1f)
#define __swp_offset(x) ((x).val >> 11)
#define __swp_entry(type, offset) ((swp_entry_t){(type) | ((offset) << 11)})
#define ZERO_PAGE(vaddr) ({ BUG(); NULL; })
extern unsigned long empty_zero_page;
extern unsigned long zero_page_mask;
#define swapper_pg_dir ((pgd_t *) NULL)
#define ZERO_PAGE(vaddr) \
(virt_to_page((void *)(empty_zero_page + \
(((unsigned long)(vaddr)) & zero_page_mask))))
#define pgtable_cache_init() do {} while (0)
......@@ -248,6 +258,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
extern unsigned long pgd_current;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
......
......@@ -51,9 +51,11 @@
* system call/exception. As usual the registers k0/k1 aren't being saved.
*/
struct pt_regs {
unsigned long pad0[6];
unsigned long pad0[6]; /* stack arguments */
unsigned long orig_r4;
unsigned long orig_r7;
long is_syscall;
unsigned long regs[32];
unsigned long cel;
......@@ -68,12 +70,12 @@ struct pt_regs {
unsigned long cp0_psr;
unsigned long cp0_ecr;
unsigned long cp0_condition;
long is_syscall;
};
#ifdef __KERNEL__
struct task_struct;
/*
* Does the process account for user or for system time?
*/
......
#ifndef _ASM_SCORE_UACCESS_H
#define _ASM_SCORE_UACCESS_H
#ifndef __SCORE_UACCESS_H
#define __SCORE_UACCESS_H
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/thread_info.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
#define get_ds() (KERNEL_DS)
#define get_fs() (current_thread_info()->addr_limit)
#define segment_eq(a, b) ((a).seg == (b).seg)
/*
* Copyright (C) 2006 Atmark Techno, Inc.
* Is a address valid? This does a straighforward calculation rather
* than tests.
*
* Address valid if:
* - "addr" doesn't have any high-bits set
* - AND "size" doesn't have any high-bits set
* - AND "addr+size" doesn't have any high-bits set
* - OR we are in kernel mode.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* __ua_size() is a trick to avoid runtime checking of positive constant
* sizes; for those we already know at compile time that the size is ok.
*/
struct pt_regs;
extern int fixup_exception(struct pt_regs *regs);
#define __ua_size(size) \
((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
/*
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
* %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
* to write to a block, it is always safe to read from it.
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
* Context: User context only. This function may sleep.
*
* Checks if a pointer to a block of memory in user space is valid.
*
* Returns true (nonzero) if the memory block may be valid, false (zero)
* if it is definitely invalid.
*
* Note that, depending on architecture, this function probably just
* checks that the pointer is in the user space range - after calling
* this function, memory access functions may still return -EFAULT.
*/
#define __access_ok(addr, size) \
(((long)((get_fs().seg) & \
((addr) | ((addr) + (size)) | \
__ua_size(size)))) == 0)
#define access_ok(type, addr, size) \
likely(__access_ok((unsigned long)(addr), (size)))
/*
* put_user: - Write a simple value into user space.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Returns zero on success, or -EFAULT on error.
*/
#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
/*
* get_user: - Get a simple variable from user space.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
/*
* __put_user: - Write a simple value into user space, with less checking.
* @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
* to the result of dereferencing @ptr.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Returns zero on success, or -EFAULT on error.
*/
#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
/*
* __get_user: - Get a simple variable from user space, with less checking.
* @x: Variable to store result.
* @ptr: Source address, in user space.
*
* Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and the result of
* dereferencing @ptr must be assignable to @x without a cast.
*
* Caller must check the pointer with access_ok() before calling this
* function.
*
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct __user *)(x))
/*
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
*/
extern void __get_user_unknown(void);
#define __get_user_common(val, size, ptr) \
do { \
switch (size) { \
case 1: \
__get_user_asm(val, "lb", ptr); \
break; \
case 2: \
__get_user_asm(val, "lh", ptr); \
break; \
case 4: \
__get_user_asm(val, "lw", ptr); \
break; \
case 8: \
if ((copy_from_user((void *)&val, ptr, 8)) == 0) \
__gu_err = 0; \
else \
__gu_err = -EFAULT; \
break; \
default: \
__get_user_unknown(); \
break; \
} \
} while (0)
#define __get_user_nocheck(x, ptr, size) \
({ \
long __gu_err = 0; \
__get_user_common((x), size, ptr); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
\
if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
__get_user_common((x), size, __gu_ptr); \
\
__gu_err; \
})
#define __get_user_asm(val, insn, addr) \
{ \
long __gu_tmp; \
\
__asm__ __volatile__( \
"1:" insn " %1, %3\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3:li %0, %4\n" \
"j 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
".word 1b, 3b\n" \
".previous\n" \
: "=r" (__gu_err), "=r" (__gu_tmp) \
: "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \
\
(val) = (__typeof__(*(addr))) __gu_tmp; \
}
/*
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
*/
#define __put_user_nocheck(val, ptr, size) \
({ \
__typeof__(*(ptr)) __pu_val; \
long __pu_err = 0; \
\
__pu_val = (val); \
switch (size) { \
case 1: \
__put_user_asm("sb", ptr); \
break; \
case 2: \
__put_user_asm("sh", ptr); \
break; \
case 4: \
__put_user_asm("sw", ptr); \
break; \
case 8: \
if ((__copy_to_user((void *)ptr, &__pu_val, 8)) == 0) \
__pu_err = 0; \
else \
__pu_err = -EFAULT; \
break; \
default: \
__put_user_unknown(); \
break; \
} \
__pu_err; \
})
#define __put_user_check(val, ptr, size) \
({ \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
__typeof__(*(ptr)) __pu_val = (val); \
long __pu_err = -EFAULT; \
\
if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
switch (size) { \
case 1: \
__put_user_asm("sb", __pu_addr); \
break; \
case 2: \
__put_user_asm("sh", __pu_addr); \
break; \
case 4: \
__put_user_asm("sw", __pu_addr); \
break; \
case 8: \
if ((__copy_to_user((void *)__pu_addr, &__pu_val, 8)) == 0)\
__pu_err = 0; \
else \
__pu_err = -EFAULT; \
break; \
default: \
__put_user_unknown(); \
break; \
} \
} \
__pu_err; \
})
#define __put_user_asm(insn, ptr) \
__asm__ __volatile__( \
"1:" insn " %2, %3\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3:li %0, %4\n" \
"j 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
".word 1b, 3b\n" \
".previous\n" \
: "=r" (__pu_err) \
: "0" (0), "r" (__pu_val), "o" (__m(ptr)), \
"i" (-EFAULT));
extern void __put_user_unknown(void);
extern int __copy_tofrom_user(void *to, const void *from, unsigned long len);
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long len)
{
unsigned long over;
if (access_ok(VERIFY_READ, from, len))
return __copy_tofrom_user(to, from, len);
if ((unsigned long)from < TASK_SIZE) {
over = (unsigned long)from + len - TASK_SIZE;
return __copy_tofrom_user(to, from, len - over) + over;
}
return len;
}
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long len)
{
unsigned long over;
if (access_ok(VERIFY_WRITE, to, len))
return __copy_tofrom_user(to, from, len);
if ((unsigned long)to < TASK_SIZE) {
over = (unsigned long)to + len - TASK_SIZE;
return __copy_tofrom_user(to, from, len - over) + over;
}
return len;
}
#ifndef __ASSEMBLY__
#define __copy_from_user(to, from, len) \
__copy_tofrom_user((to), (from), (len))
#define __range_ok(addr, size) \
((((unsigned long __force)(addr) >= 0x80000000) \
|| ((unsigned long)(size) > 0x80000000) \
|| (((unsigned long __force)(addr) + (unsigned long)(size)) > 0x80000000)))
#define __copy_to_user(to, from, len) \
__copy_tofrom_user((to), (from), (len))
#define __access_ok(addr, size) \
(__range_ok((addr), (size)) == 0)
static inline unsigned long
__copy_to_user_inatomic(void *to, const void *from, unsigned long len)
{
return __copy_to_user(to, from, len);
}
#include <asm-generic/uaccess.h>
static inline unsigned long
__copy_from_user_inatomic(void *to, const void *from, unsigned long len)
{
return __copy_from_user(to, from, len);
}
#define __copy_in_user(to, from, len) __copy_from_user(to, from, len)
static inline unsigned long
copy_in_user(void *to, const void *from, unsigned long len)
{
if (access_ok(VERIFY_READ, from, len) &&
access_ok(VERFITY_WRITE, to, len))
return copy_from_user(to, from, len);
}
/*
* __clear_user: - Zero a block of memory in user space, with less checking.
* @to: Destination address, in user space.
* @n: Number of bytes to zero.
*
* Zero a block of memory in user space. Caller must check
* the specified block with access_ok() before calling this function.
*
* Returns number of bytes that could not be cleared.
* On success, this will be zero.
*/
extern unsigned long __clear_user(void __user *src, unsigned long size);
static inline unsigned long clear_user(char *src, unsigned long size)
{
if (access_ok(VERIFY_WRITE, src, size))
return __clear_user(src, size);
return -EFAULT;
}
/*
* __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
* @dst: Destination address, in kernel space. This buffer must be at
* least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
* Copies a NUL-terminated string from userspace to kernel space.
* Caller must check the specified block with access_ok() before calling
* this function.
*
* On success, returns the length of the string (not including the trailing
* NUL).
*
* If access to userspace fails, returns -EFAULT (some data may have been
* copied).
*
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
extern int __strncpy_from_user(char *dst, const char *src, long len);
static inline int strncpy_from_user(char *dst, const char *src, long len)
{
if (access_ok(VERIFY_READ, src, 1))
return __strncpy_from_user(dst, src, len);
return -EFAULT;
}
extern int __strlen_user(const char *src);
static inline long strlen_user(const char __user *src)
{
return __strlen_user(src);
}
extern int __strnlen_user(const char *str, long len);
static inline long strnlen_user(const char __user *str, long len)
{
if (!access_ok(VERIFY_READ, str, 0))
return 0;
else
return __strnlen_user(str, len);
}
struct exception_table_entry {
unsigned long insn;
unsigned long fixup;
};
extern int fixup_exception(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif /* __SCORE_UACCESS_H */
#endif /* _ASM_SCORE_UACCESS_H */
#ifndef _ASM_SCORE_USER_H
#define _ASM_SCORE_USER_H
struct user_regs_struct {
unsigned long regs[32];
unsigned long cel;
unsigned long ceh;
unsigned long sr0; /* cnt */
unsigned long sr1; /* lcr */
unsigned long sr2; /* scr */
unsigned long cp0_epc;
unsigned long cp0_ema;
unsigned long cp0_psr;
unsigned long cp0_ecr;
unsigned long cp0_condition;
};
#endif /* _ASM_SCORE_USER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment