Commit c8d8170f authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.linux-xtensa.org/kernel/xtensa-feed

* git://git.linux-xtensa.org/kernel/xtensa-feed:
  Xtensa: use asm-generic/fcntl.h
  [XTENSA] Remove non-rt signal handling
  [XTENSA] Move common sections into bss sections
  [XTENSA] clean-up header files
  [XTENSA] Use generic 64-bit division
  [XTENSA] Remove multi-exported symbols from xtensa_ksyms.c
  [XTENSA] fix sources using deprecated assembler directive
  [XTENSA] Spelling fixes in arch/xtensa
  [XTENSA] fix bit operations in bitops.h
parents 34750bb1 df5e3870
......@@ -39,6 +39,7 @@ int main(void)
DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
......
......@@ -125,8 +125,9 @@ _user_exception:
movi a2, 0
rsr a3, SAR
wsr a2, ICOUNTLEVEL
xsr a2, ICOUNTLEVEL
s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
......@@ -276,8 +277,9 @@ _kernel_exception:
movi a2, 0
rsr a3, SAR
wsr a2, ICOUNTLEVEL
xsr a2, ICOUNTLEVEL
s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
......@@ -330,14 +332,16 @@ _kernel_exception:
common_exception:
/* Save EXCVADDR, DEBUGCAUSE, and PC, and clear LCOUNT */
/* Save some registers, disable loops and clear the syscall flag. */
rsr a2, DEBUGCAUSE
rsr a3, EPC_1
s32i a2, a1, PT_DEBUGCAUSE
s32i a3, a1, PT_PC
movi a2, -1
rsr a3, EXCVADDR
s32i a2, a1, PT_SYSCALL
movi a2, 0
s32i a3, a1, PT_EXCVADDR
xsr a2, LCOUNT
......@@ -450,27 +454,8 @@ common_exception_return:
/* Restore the state of the task and return from the exception. */
/* If we are returning from a user exception, and the process
* to run next has PT_SINGLESTEP set, we want to setup
* ICOUNT and ICOUNTLEVEL to step one instruction.
* PT_SINGLESTEP is set by sys_ptrace (ptrace.c)
*/
4: /* a2 holds GET_CURRENT(a2,a1) */
l32i a3, a2, TI_TASK
l32i a3, a3, TASK_PTRACE
bbci.l a3, PT_SINGLESTEP_BIT, 1f # jump if single-step flag is not set
movi a3, -2 # PT_SINGLESTEP flag is set,
movi a4, 1 # icountlevel of 1 means it won't
wsr a3, ICOUNT # start counting until after rfe
wsr a4, ICOUNTLEVEL # so setup icount & icountlevel.
isync
1:
#if XCHAL_EXTRA_SA_SIZE
/* For user exceptions, restore the extra state from the user's TCB. */
......@@ -665,6 +650,13 @@ common_exception_exit:
wsr a3, LEND
wsr a2, LCOUNT
/* We control single stepping through the ICOUNTLEVEL register. */
l32i a2, a1, PT_ICOUNTLEVEL
movi a3, -2
wsr a2, ICOUNTLEVEL
wsr a3, ICOUNT
/* Check if it was double exception. */
l32i a0, a1, PT_DEPC
......
......@@ -19,6 +19,8 @@
#include <asm/page.h>
#include <asm/cacheasm.h>
#include <linux/linkage.h>
/*
* This module contains the entry code for kernel images. It performs the
* minimal setup needed to call the generic C routines.
......@@ -227,13 +229,14 @@ _startup:
should_never_return:
j should_never_return
/* Define some common data structures here. We define them
* here in this assembly file due to their unusual alignment
* requirements.
*/
.comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
.comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
.comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
.comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
/*
* BSS section
*/
.section ".bss.page_aligned", "w"
ENTRY(swapper_pg_dir)
.fill PAGE_SIZE, 1, 0
ENTRY(empty_zero_page)
.fill PAGE_SIZE, 1, 0
......@@ -401,7 +401,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
* Also, think for a moment about likes of floppy.c that
* include architecture specific parts. They may want to redefine ins/outs.
*
* We do not use horroble macroses here because we want to
* We do not use horrible macros here because we want to
* advance pointer by sizeof(size).
*/
void outsb(unsigned long addr, const void *src, unsigned long count) {
......
......@@ -41,6 +41,7 @@
#include <asm/platform.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/param.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
......
This diff is collapsed.
......@@ -84,9 +84,7 @@ SECTIONS
{
/* The .head.text section must be the first section! */
*(.head.text)
*(.literal)
TEXT_TEXT
*(.srom.text)
*(.literal .text)
VMLINUX_SYMBOL(__sched_text_start) = .;
*(.sched.literal .sched.text)
VMLINUX_SYMBOL(__sched_text_end) = .;
......@@ -96,6 +94,7 @@ SECTIONS
}
_etext = .;
PROVIDE (etext = .);
. = ALIGN(16);
......@@ -103,32 +102,6 @@ SECTIONS
/* Relocation table */
. = ALIGN(16);
__boot_reloc_table_start = ABSOLUTE(.);
__relocate : {
RELOCATE_ENTRY(_WindowVectors_text,
.WindowVectors.text);
#if 0
RELOCATE_ENTRY(_KernelExceptionVector_literal,
.KernelExceptionVector.literal);
#endif
RELOCATE_ENTRY(_KernelExceptionVector_text,
.KernelExceptionVector.text);
#if 0
RELOCATE_ENTRY(_UserExceptionVector_literal,
.UserExceptionVector.literal);
#endif
RELOCATE_ENTRY(_UserExceptionVector_text,
.UserExceptionVector.text);
RELOCATE_ENTRY(_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal);
RELOCATE_ENTRY(_DoubleExceptionVector_text,
.DoubleExceptionVector.text);
}
__boot_reloc_table_end = ABSOLUTE(.) ;
.fixup : { *(.fixup) }
. = ALIGN(16);
......@@ -145,8 +118,7 @@ SECTIONS
_fdata = .;
.data :
{
DATA_DATA
CONSTRUCTORS
*(.data) CONSTRUCTORS
. = ALIGN(XCHAL_ICACHE_LINESIZE);
*(.data.cacheline_aligned)
}
......@@ -174,6 +146,22 @@ SECTIONS
__tagtable_begin = .;
*(.taglist)
__tagtable_end = .;
. = ALIGN(16);
__boot_reloc_table_start = ABSOLUTE(.);
RELOCATE_ENTRY(_WindowVectors_text,
.WindowVectors.text);
RELOCATE_ENTRY(_KernelExceptionVector_text,
.KernelExceptionVector.text);
RELOCATE_ENTRY(_UserExceptionVector_text,
.UserExceptionVector.text);
RELOCATE_ENTRY(_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal);
RELOCATE_ENTRY(_DoubleExceptionVector_text,
.DoubleExceptionVector.text);
__boot_reloc_table_end = ABSOLUTE(.) ;
}
. = ALIGN(XCHAL_ICACHE_LINESIZE);
......@@ -194,16 +182,6 @@ SECTIONS
SECURITY_INIT
. = ALIGN(4);
__start___ftr_fixup = .;
__ftr_fixup : { *(__ftr_fixup) }
__stop___ftr_fixup = .;
. = ALIGN(4096);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(4096);
......@@ -212,6 +190,12 @@ SECTIONS
__initramfs_end = .;
#endif
. = ALIGN(4096);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
/* We need this dummy segment here */
. = ALIGN(4);
......@@ -273,9 +257,9 @@ SECTIONS
/* BSS section */
_bss_start = .;
.sbss : { *(.sbss) *(.scommon) }
.bss : { *(COMMON) *(.bss) }
.bss : { *(.bss.page_aligned) *(.bss) }
_bss_end = .;
_end = .;
/* only used by the boot loader */
......@@ -293,16 +277,16 @@ SECTIONS
*(.ResetVector.text)
}
/* Sections to be discarded */
/DISCARD/ :
{
*(.text.exit)
*(.text.exit.literal)
*(.data.exit)
*(.exit.literal .exit.text)
*(.exit.data)
*(.exitcall.exit)
}
.xt.lit : { *(.xt.lit) }
.xt.prop : { *(.xt.prop) }
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
......
......@@ -38,21 +38,10 @@
/*
* String functions
*/
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(kernel_thread);
/*
......
......@@ -25,18 +25,18 @@
/*
* char *__strncpy_user(char *dst, const char *src, size_t len)
*/
.text
.begin literal
.align 4
.Lmask0:
.byte 0xff, 0x00, 0x00, 0x00
.Lmask1:
.byte 0x00, 0xff, 0x00, 0x00
.Lmask2:
.byte 0x00, 0x00, 0xff, 0x00
.Lmask3:
.byte 0x00, 0x00, 0x00, 0xff
.end literal
#ifdef __XTENSA_EB__
# define MASK0 0xff000000
# define MASK1 0x00ff0000
# define MASK2 0x0000ff00
# define MASK3 0x000000ff
#else
# define MASK0 0x000000ff
# define MASK1 0x0000ff00
# define MASK2 0x00ff0000
# define MASK3 0xff000000
#endif
# Register use
# a0/ return address
......@@ -53,6 +53,7 @@
# a11/ dst
# a12/ tmp
.text
.align 4
.global __strncpy_user
.type __strncpy_user,@function
......@@ -61,10 +62,10 @@ __strncpy_user:
# a2/ dst, a3/ src, a4/ len
mov a11, a2 # leave dst in return value register
beqz a4, .Lret # if len is zero
l32r a5, .Lmask0 # mask for byte 0
l32r a6, .Lmask1 # mask for byte 1
l32r a7, .Lmask2 # mask for byte 2
l32r a8, .Lmask3 # mask for byte 3
movi a5, MASK0 # mask for byte 0
movi a6, MASK1 # mask for byte 1
movi a7, MASK2 # mask for byte 2
movi a8, MASK3 # mask for byte 3
bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned
bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned
.Lsrcaligned: # return here when src is word-aligned
......
......@@ -24,18 +24,18 @@
/*
* size_t __strnlen_user(const char *s, size_t len)
*/
.text
.begin literal
.align 4
.Lmask0:
.byte 0xff, 0x00, 0x00, 0x00
.Lmask1:
.byte 0x00, 0xff, 0x00, 0x00
.Lmask2:
.byte 0x00, 0x00, 0xff, 0x00
.Lmask3:
.byte 0x00, 0x00, 0x00, 0xff
.end literal
#ifdef __XTENSA_EB__
# define MASK0 0xff000000
# define MASK1 0x00ff0000
# define MASK2 0x0000ff00
# define MASK3 0x000000ff
#else
# define MASK0 0x000000ff
# define MASK1 0x0000ff00
# define MASK2 0x00ff0000
# define MASK3 0xff000000
#endif
# Register use:
# a2/ src
......@@ -48,6 +48,7 @@
# a9/ tmp
# a10/ tmp
.text
.align 4
.global __strnlen_user
.type __strnlen_user,@function
......@@ -56,10 +57,10 @@ __strnlen_user:
# a2/ s, a3/ len
addi a4, a2, -4 # because we overincrement at the end;
# we compensate with load offsets of 4
l32r a5, .Lmask0 # mask for byte 0
l32r a6, .Lmask1 # mask for byte 1
l32r a7, .Lmask2 # mask for byte 2
l32r a8, .Lmask3 # mask for byte 3
movi a5, MASK0 # mask for byte 0
movi a6, MASK1 # mask for byte 1
movi a7, MASK2 # mask for byte 2
movi a8, MASK3 # mask for byte 3
bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned
bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned
......
......@@ -205,7 +205,7 @@ void __init init_mmu (void)
/* Writing zeros to the <t>TLBCFG special registers ensure
* that valid values exist in the register. For existing
* PGSZID<w> fields, zero selects the first element of the
* page-size array. For nonexistant PGSZID<w> fields, zero is
* page-size array. For nonexistent PGSZID<w> fields, zero is
* the best value to write. Also, when changing PGSZID<w>
* fields, the corresponding TLB must be flushed.
*/
......
......@@ -473,7 +473,7 @@ static int iss_net_open(struct net_device *dev)
netif_start_queue(dev);
/* clear buffer - it can happen that the host side of the interface
* is full when we gethere. In this case, new data is never queued,
* is full when we get here. In this case, new data is never queued,
* SIGIOs never arrive, and the net never works.
*/
while ((err = iss_net_rx(dev)) > 0)
......
......@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_BITOPS_H
......@@ -31,53 +31,30 @@
#if XCHAL_HAVE_NSA
static __inline__ int __cntlz (unsigned long x)
static inline unsigned long __cntlz (unsigned long x)
{
int lz;
asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
return 31 - lz;
return lz;
}
#else
static __inline__ int __cntlz (unsigned long x)
{
unsigned long sum, x1, x2, x4, x8, x16;
x1 = x & 0xAAAAAAAA;
x2 = x & 0xCCCCCCCC;
x4 = x & 0xF0F0F0F0;
x8 = x & 0xFF00FF00;
x16 = x & 0xFFFF0000;
sum = x2 ? 2 : 0;
sum += (x16 != 0) * 16;
sum += (x8 != 0) * 8;
sum += (x4 != 0) * 4;
sum += (x1 != 0);
return sum;
}
#endif
/*
* ffz: Find first zero in word. Undefined if no zero exists.
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
static __inline__ int ffz(unsigned long x)
static inline int ffz(unsigned long x)
{
if ((x = ~x) == 0)
return 32;
return __cntlz(x & -x);
return 31 - __cntlz(~x & -~x);
}
/*
* __ffs: Find first bit set in word. Return 0 for bit 0
*/
static __inline__ int __ffs(unsigned long x)
static inline int __ffs(unsigned long x)
{
return __cntlz(x & -x);
return 31 - __cntlz(x & -x);
}
/*
......@@ -86,9 +63,9 @@ static __inline__ int __ffs(unsigned long x)
* differs in spirit from the above ffz (man ffs).
*/
static __inline__ int ffs(unsigned long x)
static inline int ffs(unsigned long x)
{
return __cntlz(x & -x) + 1;
return 32 - __cntlz(x & -x);
}
/*
......@@ -96,20 +73,36 @@ static __inline__ int ffs(unsigned long x)
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static __inline__ int fls (unsigned int x)
static inline int fls (unsigned int x)
{
return __cntlz(x);
return 32 - __cntlz(x);
}
#else
/* Use the generic implementation if we don't have the nsa/nsau instructions. */
# include <asm-generic/bitops/ffs.h>
# include <asm-generic/bitops/__ffs.h>
# include <asm-generic/bitops/ffz.h>
# include <asm-generic/bitops/fls.h>
#endif
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#ifdef __XTENSA_EL__
# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr))
# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr))
# define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr), (unsigned long*)(addr))
# define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr), (unsigned long*)(addr))
#elif defined(__XTENSA_EB__)
# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr))
# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr))
# define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr) ^ 0x18, (unsigned long*)(addr))
# define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr) ^ 0x18, (unsigned long*)(addr))
#else
# error processor byte order undefined!
#endif
......
......@@ -12,6 +12,7 @@
#define _XTENSA_BYTEORDER_H
#include <asm/types.h>
#include <linux/compiler.h>
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
......@@ -78,4 +79,4 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
# error processor byte order undefined!
#endif
#endif /* __ASM_XTENSA_BYTEORDER_H */
#endif /* _XTENSA_BYTEORDER_H */
......@@ -64,6 +64,7 @@ typedef struct {
# define COPROCESSOR_INFO_SIZE 8
# endif
#endif
#endif /* XCHAL_HAVE_CP */
#ifndef __ASSEMBLY__
......@@ -74,8 +75,11 @@ extern void save_coprocessor_registers(void*, int);
# else
# define release_coprocessors(task)
# endif
#endif
#endif
typedef unsigned char cp_state_t[XTENSA_CP_EXTRA_SIZE]
__attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN)));
#endif /* !__ASSEMBLY__ */
#endif /* _XTENSA_COPROCESSOR_H */
......@@ -5,21 +5,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_DIV64_H
#define _XTENSA_DIV64_H
#include <linux/types.h>
#include <asm-generic/div64.h>
#define do_div(n,base) ({ \
int __res = n % ((unsigned int) base); \
n /= (unsigned int) base; \
__res; })
static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
{
return dividend / divisor;
}
#endif
#endif /* _XTENSA_DIV64_H */
......@@ -13,7 +13,6 @@
#ifndef _XTENSA_ELF_H
#define _XTENSA_ELF_H
#include <asm/variant/core.h>
#include <asm/ptrace.h>
/* Xtensa processor ELF architecture-magic number */
......@@ -49,7 +48,7 @@ typedef struct {
elf_greg_t lcount;
elf_greg_t sar;
elf_greg_t syscall;
elf_greg_t ar[XCHAL_NUM_AREGS];
elf_greg_t ar[64];
} xtensa_gregset_t;
#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
......
/*
* include/asm-xtensa/fcntl.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_FCNTL_H
#define _XTENSA_FCNTL_H
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
#define O_ACCMODE 0003
#define O_RDONLY 00
#define O_WRONLY 01
#define O_RDWR 02
#define O_CREAT 0100 /* not fcntl */
#define O_EXCL 0200 /* not fcntl */
#define O_NOCTTY 0400 /* not fcntl */
#define O_TRUNC 01000 /* not fcntl */
#define O_APPEND 02000
#define O_NONBLOCK 04000
#define O_NDELAY O_NONBLOCK
#define O_SYNC 010000
#define FASYNC 020000 /* fcntl, for BSD compatibility */
#define O_DIRECT 040000 /* direct disk access hint */
#define O_LARGEFILE 0100000
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#define F_SETFD 2 /* set/clear close_on_exec */
#define F_GETFL 3 /* get file->f_flags */
#define F_SETFL 4 /* set file->f_flags */
#define F_GETLK 5
#define F_SETLK 6
#define F_SETLKW 7
#define F_SETOWN 8 /* for sockets. */
#define F_GETOWN 9 /* for sockets. */
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
/* for posix fcntl() and lockf() */
#define F_RDLCK 0
#define F_WRLCK 1
#define F_UNLCK 2
/* for old implementation of bsd flock () */
#define F_EXLCK 4 /* or 3 */
#define F_SHLCK 8 /* or 4 */
/* for leases */
#define F_INPROGRESS 16
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB 4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */
#define LOCK_MAND 32 /* This is a mandatory flock */
#define LOCK_READ 64 /* ... Which allows concurrent read operations */
#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
struct flock {
short l_type;
short l_whence;
off_t l_start;
off_t l_len;
pid_t l_pid;
};
struct flock64 {
short l_type;
short l_whence;
loff_t l_start;
loff_t l_len;
pid_t l_pid;
};
#define F_LINUX_SPECIFIC_BASE 1024
#endif /* _XTENSA_FCNTL_H */
#include <asm-generic/fcntl.h>
......@@ -14,6 +14,7 @@
#define _XTENSA_MMU_CONTEXT_H
#include <linux/stringify.h>
#include <linux/sched.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
......
......@@ -131,6 +131,6 @@ void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#include <asm-generic/memory_model.h>
#endif /* __KERNEL__ */
#endif /* _XTENSA_PAGE_H */
......@@ -11,15 +11,13 @@
#ifndef _XTENSA_PARAM_H
#define _XTENSA_PARAM_H
#include <asm/variant/core.h>
#ifdef __KERNEL__
# define HZ 100 /* internal timer frequency */
# define USER_HZ 100 /* for user interfaces in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
#endif
#define EXEC_PAGESIZE (1 << XCHAL_MMU_MIN_PTE_PAGE_SIZE)
#define EXEC_PAGESIZE 4096
#ifndef NGROUPS
#define NGROUPS 32
......
......@@ -11,8 +11,6 @@
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
#include <asm/variant/core.h>
/*
* Kernel stack
*
......@@ -101,7 +99,8 @@ struct pt_regs {
unsigned long windowbase; /* 48 */
unsigned long windowstart; /* 52 */
unsigned long syscall; /* 56 */
int reserved[2]; /* 64 */
unsigned long icountlevel; /* 60 */
int reserved[1]; /* 64 */
/* Make sure the areg field is 16 bytes aligned. */
int align[0] __attribute__ ((aligned(16)));
......@@ -113,6 +112,9 @@ struct pt_regs {
};
#ifdef __KERNEL__
#include <asm/variant/core.h>
# define task_pt_regs(tsk) ((struct pt_regs*) \
(task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
......
......@@ -9,8 +9,6 @@
#ifndef _XTENSA_SHMPARAM_H
#define _XTENSA_SHMPARAM_H
#include <asm/processor.h>
/*
* Xtensa can have variable size caches, and if
* the size of single way is larger than the page size,
......
......@@ -5,21 +5,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2003 Tensilica Inc.
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_SIGCONTEXT_H
#define _XTENSA_SIGCONTEXT_H
#define _ASMLANGUAGE
#include <asm/processor.h>
#include <asm/coprocessor.h>
struct _cpstate {
unsigned char _cpstate[XTENSA_CP_EXTRA_SIZE];
} __attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN)));
struct sigcontext {
unsigned long oldmask;
......@@ -27,18 +18,13 @@ struct sigcontext {
/* CPU registers */
unsigned long sc_pc;
unsigned long sc_ps;
unsigned long sc_wmask;
unsigned long sc_windowbase;
unsigned long sc_windowstart;
unsigned long sc_lbeg;
unsigned long sc_lend;
unsigned long sc_lcount;
unsigned long sc_sar;
unsigned long sc_depc;
unsigned long sc_dareg0;
unsigned long sc_treg[4];
unsigned long sc_areg[XCHAL_NUM_AREGS];
struct _cpstate *sc_cpstate;
unsigned long sc_acclo;
unsigned long sc_acchi;
unsigned long sc_a[16];
};
#endif /* __ASM_XTENSA_SIGCONTEXT_H */
#endif /* _XTENSA_SIGCONTEXT_H */
......@@ -116,6 +116,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
#define TIF_IRET 5 /* return with iret */
#define TIF_MEMDIE 6
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
......@@ -125,6 +126,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_IRET (1<<TIF_IRET)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
......
......@@ -485,8 +485,8 @@ __SYSCALL(217, sys_sched_get_priority_min, 1)
__SYSCALL(218, sys_sched_rr_get_interval, 2)
#define __NR_sched_yield 219
__SYSCALL(219, sys_sched_yield, 0)
#define __NR_sigreturn 222
__SYSCALL(222, xtensa_sigreturn, 0)
#define __NR_available222 222
__SYSCALL(222, sys_ni_syscall, 0)
/* Signal Handling */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment