Commit c8d8170f authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.linux-xtensa.org/kernel/xtensa-feed

* git://git.linux-xtensa.org/kernel/xtensa-feed:
  Xtensa: use asm-generic/fcntl.h
  [XTENSA] Remove non-rt signal handling
  [XTENSA] Move common sections into bss sections
  [XTENSA] clean-up header files
  [XTENSA] Use generic 64-bit division
  [XTENSA] Remove multi-exported symbols from xtensa_ksyms.c
  [XTENSA] fix sources using deprecated assembler directive
  [XTENSA] Spelling fixes in arch/xtensa
  [XTENSA] fix bit operations in bitops.h
parents 34750bb1 df5e3870
......@@ -39,6 +39,7 @@ int main(void)
DEFINE(PT_LEND, offsetof (struct pt_regs, lend));
DEFINE(PT_LCOUNT, offsetof (struct pt_regs, lcount));
DEFINE(PT_SAR, offsetof (struct pt_regs, sar));
DEFINE(PT_ICOUNTLEVEL, offsetof (struct pt_regs, icountlevel));
DEFINE(PT_SYSCALL, offsetof (struct pt_regs, syscall));
DEFINE(PT_AREG, offsetof (struct pt_regs, areg[0]));
DEFINE(PT_AREG0, offsetof (struct pt_regs, areg[0]));
......
......@@ -125,8 +125,9 @@ _user_exception:
movi a2, 0
rsr a3, SAR
wsr a2, ICOUNTLEVEL
xsr a2, ICOUNTLEVEL
s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
......@@ -276,8 +277,9 @@ _kernel_exception:
movi a2, 0
rsr a3, SAR
wsr a2, ICOUNTLEVEL
xsr a2, ICOUNTLEVEL
s32i a3, a1, PT_SAR
s32i a2, a1, PT_ICOUNTLEVEL
/* Rotate ws so that the current windowbase is at bit0. */
/* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */
......@@ -330,14 +332,16 @@ _kernel_exception:
common_exception:
/* Save EXCVADDR, DEBUGCAUSE, and PC, and clear LCOUNT */
/* Save some registers, disable loops and clear the syscall flag. */
rsr a2, DEBUGCAUSE
rsr a3, EPC_1
s32i a2, a1, PT_DEBUGCAUSE
s32i a3, a1, PT_PC
movi a2, -1
rsr a3, EXCVADDR
s32i a2, a1, PT_SYSCALL
movi a2, 0
s32i a3, a1, PT_EXCVADDR
xsr a2, LCOUNT
......@@ -450,27 +454,8 @@ common_exception_return:
/* Restore the state of the task and return from the exception. */
/* If we are returning from a user exception, and the process
* to run next has PT_SINGLESTEP set, we want to setup
* ICOUNT and ICOUNTLEVEL to step one instruction.
* PT_SINGLESTEP is set by sys_ptrace (ptrace.c)
*/
4: /* a2 holds GET_CURRENT(a2,a1) */
l32i a3, a2, TI_TASK
l32i a3, a3, TASK_PTRACE
bbci.l a3, PT_SINGLESTEP_BIT, 1f # jump if single-step flag is not set
movi a3, -2 # PT_SINGLESTEP flag is set,
movi a4, 1 # icountlevel of 1 means it won't
wsr a3, ICOUNT # start counting until after rfe
wsr a4, ICOUNTLEVEL # so setup icount & icountlevel.
isync
1:
#if XCHAL_EXTRA_SA_SIZE
/* For user exceptions, restore the extra state from the user's TCB. */
......@@ -665,6 +650,13 @@ common_exception_exit:
wsr a3, LEND
wsr a2, LCOUNT
/* We control single stepping through the ICOUNTLEVEL register. */
l32i a2, a1, PT_ICOUNTLEVEL
movi a3, -2
wsr a2, ICOUNTLEVEL
wsr a3, ICOUNT
/* Check if it was double exception. */
l32i a0, a1, PT_DEPC
......
......@@ -19,6 +19,8 @@
#include <asm/page.h>
#include <asm/cacheasm.h>
#include <linux/linkage.h>
/*
* This module contains the entry code for kernel images. It performs the
* minimal setup needed to call the generic C routines.
......@@ -227,13 +229,14 @@ _startup:
should_never_return:
j should_never_return
/* Define some common data structures here. We define them
* here in this assembly file due to their unusual alignment
* requirements.
*/
.comm swapper_pg_dir,PAGE_SIZE,PAGE_SIZE
.comm empty_bad_page_table,PAGE_SIZE,PAGE_SIZE
.comm empty_bad_page,PAGE_SIZE,PAGE_SIZE
.comm empty_zero_page,PAGE_SIZE,PAGE_SIZE
/*
* BSS section
*/
.section ".bss.page_aligned", "w"
ENTRY(swapper_pg_dir)
.fill PAGE_SIZE, 1, 0
ENTRY(empty_zero_page)
.fill PAGE_SIZE, 1, 0
......@@ -401,7 +401,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
* Also, think for a moment about likes of floppy.c that
* include architecture specific parts. They may want to redefine ins/outs.
*
* We do not use horroble macroses here because we want to
* We do not use horrible macros here because we want to
* advance pointer by sizeof(size).
*/
void outsb(unsigned long addr, const void *src, unsigned long count) {
......
......@@ -41,6 +41,7 @@
#include <asm/platform.h>
#include <asm/page.h>
#include <asm/setup.h>
#include <asm/param.h>
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = { 0, 24, 0, 0, 0, 80, 0, 0, 0, 24, 1, 16};
......
// TODO coprocessor stuff
/*
* linux/arch/xtensa/kernel/signal.c
* arch/xtensa/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
*
* Joe Taylor <joe@tensilica.com>
* Chris Zankel <chris@zankel.net>
* Default platform functions.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005, 2006 Tensilica Inc.
* Copyright (C) 1991, 1992 Linus Torvalds
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
*
* Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com>
*/
#include <asm/variant/core.h>
#include <asm/coprocessor.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/personality.h>
#include <linux/freezer.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/coprocessor.h>
#include <asm/unistd.h>
#define DEBUG_SIG 0
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options,
struct rusage * ru);
asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
extern struct task_struct *coproc_owners[];
extern void release_all_cp (struct task_struct *);
/*
* Atomically swap in the new signal mask, and wait for a signal.
struct rt_sigframe
{
struct siginfo info;
struct ucontext uc;
cp_state_t cpstate;
unsigned char retcode[6];
unsigned int window[4];
};
/*
* Flush register windows stored in pt_regs to stack.
* Returns 1 for errors.
*
* Note that windowbase, windowstart, and wmask are not updated!
*/
int xtensa_sigsuspend(struct pt_regs *regs)
int
flush_window_regs_user(struct pt_regs *regs)
{
old_sigset_t mask = (old_sigset_t) regs->areg[3];
sigset_t saveset;
const unsigned long ws = regs->windowstart;
const unsigned long wb = regs->windowbase;
unsigned long sp = 0;
unsigned long wm;
int err = 1;
int base;
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
/* Return if no other frames. */
regs->areg[2] = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal(regs, &saveset))
return -EINTR;
}
}
if (regs->wmask == 1)
return 0;
asmlinkage int
xtensa_rt_sigsuspend(struct pt_regs *regs)
{
sigset_t *unewset = (sigset_t *) regs->areg[4];
size_t sigsetsize = (size_t) regs->areg[3];
sigset_t saveset, newset;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
/* Rotate windowmask and skip empty frames. */
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
wm = (ws >> wb) | (ws << (XCHAL_NUM_AREGS / 4 - wb));
base = (XCHAL_NUM_AREGS / 4) - (regs->wmask >> 4);
/* For call8 or call12 frames, we need the previous stack pointer. */
regs->areg[2] = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal(regs, &saveset))
return -EINTR;
}
}
if ((regs->wmask & 2) == 0)
if (__get_user(sp, (int*)(regs->areg[base * 4 + 1] - 12)))
goto errout;
asmlinkage int
xtensa_sigaction(int sig, const struct old_sigaction *act,
struct old_sigaction *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
/* Spill frames to stack. */
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
return -EFAULT;
__get_user(new_ka.sa.sa_flags, &act->sa_flags);
__get_user(mask, &act->sa_mask);
siginitset(&new_ka.sa.sa_mask, mask);
}
while (base < XCHAL_NUM_AREGS / 4) {
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
int m = (wm >> base);
int inc = 0;
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
return -EFAULT;
__put_user(old_ka.sa.sa_flags, &oact->sa_flags);
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
/* Save registers a4..a7 (call8) or a4...a11 (call12) */
return ret;
}
if (m & 2) { /* call4 */
inc = 1;
asmlinkage int
xtensa_sigaltstack(struct pt_regs *regs)
{
const stack_t *uss = (stack_t *) regs->areg[4];
stack_t *uoss = (stack_t *) regs->areg[3];
} else if (m & 4) { /* call8 */
if (copy_to_user((void*)(sp - 32),
&regs->areg[(base + 1) * 4], 16))
goto errout;
inc = 2;
if (regs->depc > 64)
panic ("Double exception sys_sigreturn\n");
} else if (m & 8) { /* call12 */
if (copy_to_user((void*)(sp - 48),
&regs->areg[(base + 1) * 4], 32))
goto errout;
inc = 3;
}
/* Save current frame a0..a3 under next SP */
return do_sigaltstack(uss, uoss, regs->areg[1]);
}
sp = regs->areg[((base + inc) * 4 + 1) % XCHAL_NUM_AREGS];
if (copy_to_user((void*)(sp - 16), &regs->areg[base * 4], 16))
goto errout;
/* Get current stack pointer for next loop iteration. */
sp = regs->areg[base * 4 + 1];
base += inc;
}
return 0;
errout:
return err;
}
/*
* Do a signal return; undo the signal stack.
* Note: We don't copy double exception 'regs', we have to finish double exc.
* first before we return to signal handler! This dbl.exc.handler might cause
* another double exception, but I think we are fine as the situation is the
* same as if we had returned to the signal handerl and got an interrupt
* immediately...
*/
struct sigframe
{
struct sigcontext sc;
struct _cpstate cpstate;
unsigned long extramask[_NSIG_WORDS-1];
unsigned char retcode[6];
unsigned int reserved[4]; /* Reserved area for chaining */
unsigned int window[4]; /* Window of 4 registers for initial context */
};
struct rt_sigframe
static int
setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate,
struct pt_regs *regs, unsigned long mask)
{
struct siginfo info;
struct ucontext uc;
struct _cpstate cpstate;
unsigned char retcode[6];
unsigned int reserved[4]; /* Reserved area for chaining */
unsigned int window[4]; /* Window of 4 registers for initial context */
};
int err = 0;
extern void release_all_cp (struct task_struct *);
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
COPY(pc);
COPY(ps);
COPY(lbeg);
COPY(lend);
COPY(lcount);
COPY(sar);
#undef COPY
err |= flush_window_regs_user(regs);
err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
// FIXME restore_cpextra
static inline int
restore_cpextra (struct _cpstate *buf)
{
#if 0
/* The signal handler may have used coprocessors in which
* case they are still enabled. We disable them to force a
* reloading of the original task's CP state by the lazy
* context-switching mechanisms of CP exception handling.
* Also, we essentially discard any coprocessor state that the
* signal handler created. */
// err |= __copy_to_user (sc->sc_a, regs->areg, XCHAL_NUM_AREGS * 4)
struct task_struct *tsk = current;
release_all_cp(tsk);
return __copy_from_user(tsk->thread.cpextra, buf, XTENSA_CP_EXTRA_SIZE);
#if XCHAL_HAVE_CP
# error Coprocessors unsupported
err |= save_cpextra(cpstate);
err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate);
#endif
return 0;
}
/* Note: We don't copy double exception 'tregs', we have to finish double exc. first before we return to signal handler! This dbl.exc.handler might cause another double exception, but I think we are fine as the situation is the same as if we had returned to the signal handerl and got an interrupt immediately...
*/
/* non-iBCS2 extensions.. */
err |= __put_user(mask, &sc->oldmask);
return err;
}
static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc)
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
{
struct thread_struct *thread;
unsigned int err = 0;
unsigned long ps;
struct _cpstate *buf;
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(pc);
COPY(depc);
COPY(wmask);
COPY(lbeg);
COPY(lend);
COPY(lcount);
COPY(sar);
COPY(windowbase);
COPY(windowstart);
#undef COPY
/* All registers were flushed to stack. Start with a prestine frame. */
regs->wmask = 1;
regs->windowbase = 0;
regs->windowstart = 1;
/* For PS, restore only PS.CALLINC.
* Assume that all other bits are either the same as for the signal
* handler, or the user mode value doesn't matter (e.g. PS.OWB).
*/
err |= __get_user(ps, &sc->sc_ps);
regs->ps = (regs->ps & ~PS_CALLINC_MASK)
| (ps & PS_CALLINC_MASK);
regs->ps = (regs->ps & ~PS_CALLINC_MASK) | (ps & PS_CALLINC_MASK);
/* Additional corruption checks */
if ((regs->windowbase >= (XCHAL_NUM_AREGS/4))
|| ((regs->windowstart & ~((1<<(XCHAL_NUM_AREGS/4)) - 1)) != 0) )
err = 1;
if ((regs->lcount > 0)
&& ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
&& ((regs->lbeg > TASK_SIZE) || (regs->lend > TASK_SIZE)) )
err = 1;
/* Restore extended register state.
* See struct thread_struct in processor.h.
*/
thread = &current->thread;
err |= __copy_from_user (regs->areg, sc->sc_areg, XCHAL_NUM_AREGS*4);
err |= __get_user(buf, &sc->sc_cpstate);
if (buf) {
if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
goto badframe;
err |= restore_cpextra(buf);
}
regs->syscall = -1; /* disable syscall checks */
return err;
badframe:
return 1;
}
static inline void
flush_my_cpstate(struct task_struct *tsk)
{
unsigned long flags;
local_irq_save(flags);
#if 0 // FIXME
for (i = 0; i < XCHAL_CP_NUM; i++) {
if (tsk == coproc_owners[i]) {
xthal_validate_cp(i);
xthal_save_cpregs(tsk->thread.cpregs_ptr[i], i);
err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
/* Invalidate and "disown" the cp to allow
* callers the chance to reset cp state in the
* task_struct. */
#if XCHAL_HAVE_CP
# error Coprocessors unsupported
/* The signal handler may have used coprocessors in which
* case they are still enabled. We disable them to force a
* reloading of the original task's CP state by the lazy
* context-switching mechanisms of CP exception handling.
* Also, we essentially discard any coprocessor state that the
* signal handler created. */
xthal_invalidate_cp(i);
coproc_owners[i] = 0;
}
if (!err) {
struct task_struct *tsk = current;
release_all_cp(tsk);
err |= __copy_from_user(tsk->thread.cpextra, sc->sc_cpstate,
XTENSA_CP_EXTRA_SIZE);
}
#endif
local_irq_restore(flags);
}
/* Return codes:
0: nothing saved
1: stuff to save, successful
-1: stuff to save, error happened
*/
static int
save_cpextra (struct _cpstate *buf)
{
#if XCHAL_CP_NUM == 0
return 0;
#else
/* FIXME: If a task has never used a coprocessor, there is
* no need to save and restore anything. Tracking this
* information would allow us to optimize this section.
* Perhaps we can use current->used_math or (current->flags &
* PF_USEDFPU) or define a new field in the thread
* structure. */
/* We flush any live, task-owned cp state to the task_struct,
* then copy it all to the sigframe. Then we clear all
* cp/extra state in the task_struct, effectively
* clearing/resetting all cp/extra state for the signal
* handler (cp-exception handling will load these new values
* into the cp/extra registers.) This step is important for
* things like a floating-point cp, where the OS must reset
* the FCR to the default rounding mode. */
int err = 0;
struct task_struct *tsk = current;
flush_my_cpstate(tsk);
/* Note that we just copy everything: 'extra' and 'cp' state together.*/
err |= __copy_to_user(buf, tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
memset(tsk->thread.cp_save, 0, XTENSA_CP_EXTRA_SIZE);
#if (XTENSA_CP_EXTRA_SIZE == 0)
#error Sanity check on memset above, cpextra_size should not be zero.
#endif
return err ? -1 : 1;
#endif
}
static int
setup_sigcontext(struct sigcontext *sc, struct _cpstate *cpstate,
struct pt_regs *regs, unsigned long mask)
{
struct thread_struct *thread;
int err = 0;
//printk("setup_sigcontext\n");
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
COPY(pc);
COPY(ps);
COPY(depc);
COPY(wmask);
COPY(lbeg);
COPY(lend);
COPY(lcount);
COPY(sar);
COPY(windowbase);
COPY(windowstart);
#undef COPY
/* Save extended register state.
* See struct thread_struct in processor.h.
*/
thread = &current->thread;
err |= __copy_to_user (sc->sc_areg, regs->areg, XCHAL_NUM_AREGS * 4);
err |= save_cpextra(cpstate);
err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate);
/* non-iBCS2 extensions.. */
err |= __put_user(mask, &sc->oldmask);
regs->syscall = -1; /* disable syscall checks */
return err;
}
asmlinkage int xtensa_sigreturn(struct pt_regs *regs)
{
struct sigframe *frame = (struct sigframe *)regs->areg[1];
sigset_t set;
if (regs->depc > 64)
panic ("Double exception sys_sigreturn\n");
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_NSIG_WORDS > 1
&& __copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (restore_sigcontext(regs, &frame->sc))
goto badframe;
return regs->areg[2];
badframe:
force_sig(SIGSEGV, current);
return 0;
}
/*
* Do a signal return; undo the signal stack.
*/
asmlinkage int xtensa_rt_sigreturn(struct pt_regs *regs)
asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
long a4, long a5, struct pt_regs *regs)
{
struct rt_sigframe *frame = (struct rt_sigframe *)regs->areg[1];
struct rt_sigframe __user *frame;
sigset_t set;
stack_t st;
int ret;
if (regs->depc > 64)
{
printk("!!!!!!! DEPC !!!!!!!\n");
return 0;
}
panic("rt_sigreturn in double exception!\n");
frame = (struct rt_sigframe __user *) regs->areg[1];
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
......@@ -407,13 +249,11 @@ asmlinkage int xtensa_rt_sigreturn(struct pt_regs *regs)
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
goto badframe;
ret = regs->areg[2];
if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
if (do_sigaltstack(&frame->uc.uc_stack, NULL, regs->areg[1]) == -EFAULT)
goto badframe;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
do_sigaltstack(&st, NULL, regs->areg[1]);
return ret;
......@@ -422,77 +262,50 @@ asmlinkage int xtensa_rt_sigreturn(struct pt_regs *regs)
return 0;
}
/*
* Set up a signal frame.
*/
/*
* Determine which stack to use..
* Set up a signal frame.
*/
static inline void *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void *)((sp - frame_size) & -16ul);
}
#define USE_SIGRETURN 0
#define USE_RT_SIGRETURN 1
static int
gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
gen_return_code(unsigned char *codemem)
{
unsigned int retcall;
int err = 0;
#if 0
/* Ignoring SA_RESTORER for now; it's supposed to be obsolete,
* and the xtensa glibc doesn't use it.
/*
* The 12-bit immediate is really split up within the 24-bit MOVI
* instruction. As long as the above system call numbers fit within
* 8-bits, the following code works fine. See the Xtensa ISA for
* details.
*/
if (ka->sa.sa_flags & SA_RESTORER) {
regs->pr = (unsigned long) ka->sa.sa_restorer;
} else
#endif /* 0 */
{
#if (__NR_sigreturn > 255) || (__NR_rt_sigreturn > 255)
/* The 12-bit immediate is really split up within the 24-bit MOVI
* instruction. As long as the above system call numbers fit within
* 8-bits, the following code works fine. See the Xtensa ISA for
* details.
*/
#error Generating the MOVI instruction below breaks!
#if __NR_rt_sigreturn > 255
# error Generating the MOVI instruction below breaks!
#endif
retcall = use_rt_sigreturn ? __NR_rt_sigreturn : __NR_sigreturn;
#ifdef __XTENSA_EB__ /* Big Endian version */
/* Generate instruction: MOVI a2, retcall */
err |= __put_user(0x22, &codemem[0]);
err |= __put_user(0x0a, &codemem[1]);
err |= __put_user(retcall, &codemem[2]);
/* Generate instruction: SYSCALL */
err |= __put_user(0x00, &codemem[3]);
err |= __put_user(0x05, &codemem[4]);
err |= __put_user(0x00, &codemem[5]);
/* Generate instruction: MOVI a2, __NR_rt_sigreturn */
err |= __put_user(0x22, &codemem[0]);
err |= __put_user(0x0a, &codemem[1]);
err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
/* Generate instruction: SYSCALL */
err |= __put_user(0x00, &codemem[3]);
err |= __put_user(0x05, &codemem[4]);
err |= __put_user(0x00, &codemem[5]);
#elif defined __XTENSA_EL__ /* Little Endian version */
/* Generate instruction: MOVI a2, retcall */
err |= __put_user(0x22, &codemem[0]);
err |= __put_user(0xa0, &codemem[1]);
err |= __put_user(retcall, &codemem[2]);
/* Generate instruction: SYSCALL */
err |= __put_user(0x00, &codemem[3]);
err |= __put_user(0x50, &codemem[4]);
err |= __put_user(0x00, &codemem[5]);
/* Generate instruction: MOVI a2, __NR_rt_sigreturn */
err |= __put_user(0x22, &codemem[0]);
err |= __put_user(0xa0, &codemem[1]);
err |= __put_user(__NR_rt_sigreturn, &codemem[2]);
/* Generate instruction: SYSCALL */
err |= __put_user(0x00, &codemem[3]);
err |= __put_user(0x50, &codemem[4]);
err |= __put_user(0x00, &codemem[5]);
#else
#error Must use compiler for Xtensa processors.
# error Must use compiler for Xtensa processors.
#endif
}
/* Flush generated code out of the data cache */
......@@ -504,97 +317,29 @@ gen_return_code(unsigned char *codemem, unsigned int use_rt_sigreturn)
return err;
}
static void
set_thread_state(struct pt_regs *regs, void *stack, unsigned char *retaddr,
void *handler, unsigned long arg1, void *arg2, void *arg3)
{
/* Set up registers for signal handler */
start_thread(regs, (unsigned long) handler, (unsigned long) stack);
/* Set up a stack frame for a call4
* Note: PS.CALLINC is set to one by start_thread
*/
regs->areg[4] = (((unsigned long) retaddr) & 0x3fffffff) | 0x40000000;
regs->areg[6] = arg1;
regs->areg[7] = (unsigned long) arg2;
regs->areg[8] = (unsigned long) arg3;
}
static void setup_frame(int sig, struct k_sigaction *ka,
static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct sigframe *frame;
struct rt_sigframe *frame;
int err = 0;
int signal;
unsigned long sp, ra;
frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
if (regs->depc > 64)
{
printk("!!!!!!! DEPC !!!!!!!\n");
return;
}
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
goto give_sigsegv;
signal = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
err |= setup_sigcontext(&frame->sc, &frame->cpstate, regs, set->sig[0]);
sp = regs->areg[1];
if (_NSIG_WORDS > 1) {
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! on_sig_stack(sp)) {
sp = current->sas_ss_sp + current->sas_ss_size;
}
/* Create sys_sigreturn syscall in stack frame */
err |= gen_return_code(frame->retcode, USE_SIGRETURN);
if (err)
goto give_sigsegv;
/* Create signal handler execution context.
* Return context not modified until this point.
*/
set_thread_state(regs, frame, frame->retcode,
ka->sa.sa_handler, signal, &frame->sc, NULL);
/* Set access mode to USER_DS. Nomenclature is outdated, but
* functionality is used in uaccess.h
*/
set_fs(USER_DS);
#if DEBUG_SIG
printk("SIG deliver (%s:%d): signal=%d sp=%p pc=%08x\n",
current->comm, current->pid, signal, frame, regs->pc);
#endif
return;
give_sigsegv:
if (sig == SIGSEGV)
ka->sa.sa_handler = SIG_DFL;
force_sig(SIGSEGV, current);
}
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe *frame;
int err = 0;
int signal;
frame = (void *)((sp - sizeof(*frame)) & -16ul);
frame = get_sigframe(ka, regs->areg[1], sizeof(*frame));
if (regs->depc > 64)
panic ("Double exception sys_sigreturn\n");
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
goto give_sigsegv;
}
signal = current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
......@@ -602,9 +347,12 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig;
err |= copy_siginfo_to_user(&frame->info, info);
if (ka->sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, info);
}
/* Create the user context. */
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user((void *)current->sas_ss_sp,
......@@ -617,16 +365,31 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Create sys_rt_sigreturn syscall in stack frame */
err |= gen_return_code(frame->retcode, USE_RT_SIGRETURN);
if (err)
err |= gen_return_code(frame->retcode);
if (err) {
goto give_sigsegv;
}
/* Create signal handler execution context.
/*
* Create signal handler execution context.
* Return context not modified until this point.
*/
set_thread_state(regs, frame, frame->retcode,
ka->sa.sa_handler, signal, &frame->info, &frame->uc);
/* Set up registers for signal handler */
start_thread(regs, (unsigned long) ka->sa.sa_handler,
(unsigned long) frame);
/* Set up a stack frame for a call4
* Note: PS.CALLINC is set to one by start_thread
*/
ra = (unsigned long) frame->retcode;
regs->areg[4] = (((unsigned long) ra) & 0x3fffffff) | 0x40000000;
regs->areg[6] = (unsigned long) signal;
regs->areg[7] = (unsigned long) &frame->info;
regs->areg[8] = (unsigned long) &frame->uc;
/* Set access mode to USER_DS. Nomenclature is outdated, but
* functionality is used in uaccess.h
......@@ -646,6 +409,48 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
force_sig(SIGSEGV, current);
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage long xtensa_rt_sigsuspend(sigset_t __user *unewset,
size_t sigsetsize,
long a2, long a3, long a4, long a5,
struct pt_regs *regs)
{
sigset_t saveset, newset;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(sigset_t))
return -EINVAL;
if (copy_from_user(&newset, unewset, sizeof(newset)))
return -EFAULT;
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
saveset = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
regs->areg[2] = -EINTR;
while (1) {
current->state = TASK_INTERRUPTIBLE;
schedule();
if (do_signal(regs, &saveset))
return -EINTR;
}
}
asmlinkage long xtensa_sigaltstack(const stack_t __user *uss,
stack_t __user *uoss,
long a2, long a3, long a4, long a5,
struct pt_regs *regs)
{
return do_sigaltstack(uss, uoss, regs->areg[1]);
}
/*
......@@ -663,51 +468,89 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
int signr;
struct k_sigaction ka;
if (!user_mode(regs))
return 0;
if (try_to_freeze())
goto no_signal;
if (!oldset)
oldset = &current->blocked;
task_pt_regs(current)->icountlevel = 0;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
/* Are we from a system call? */
if (regs->syscall >= 0) {
/* If so, check system call restarting.. */
switch (regs->areg[2]) {
case ERESTARTNOHAND:
case ERESTART_RESTARTBLOCK:
regs->areg[2] = -EINTR;
break;
if (signr > 0) {
/* Are we from a system call? */
if ((signed)regs->syscall >= 0) {
case ERESTARTSYS:
if (!(ka.sa.sa_flags & SA_RESTART)) {
/* If so, check system call restarting.. */
switch (regs->areg[2]) {
case -ERESTARTNOHAND:
case -ERESTART_RESTARTBLOCK:
regs->areg[2] = -EINTR;
break;
}
/* fallthrough */
case ERESTARTNOINTR:
regs->areg[2] = regs->syscall;
regs->pc -= 3;
case -ERESTARTSYS:
if (!(ka.sa.sa_flags & SA_RESTART)) {
regs->areg[2] = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
regs->areg[2] = regs->syscall;
regs->pc -= 3;
break;
default:
/* nothing to do */
if (regs->areg[2] != 0)
break;
}
}
}
if (signr == 0)
return 0; /* no signals delivered */
/* Whee! Actually deliver the signal. */
/* Set up the stack frame */
setup_frame(signr, &ka, &info, oldset, regs);
/* Whee! Actually deliver the signal. */
if (ka.sa.sa_flags & SA_ONESHOT)
ka.sa.sa_handler = SIG_DFL;
/* Set up the stack frame */
if (ka.sa.sa_flags & SA_SIGINFO)
setup_rt_frame(signr, &ka, &info, oldset, regs);
else
setup_frame(signr, &ka, oldset, regs);
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
if (!(ka.sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked, signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
if (current->ptrace & PT_SINGLESTEP)
task_pt_regs(current)->icountlevel = 1;
if (ka.sa.sa_flags & SA_ONESHOT)
ka.sa.sa_handler = SIG_DFL;
return 1;
}
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked, &current->blocked, &ka.sa.sa_mask);
if (!(ka.sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked, signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return 1;
no_signal:
/* Did we come from a system call? */
if ((signed) regs->syscall >= 0) {
/* Restart the system call - no handlers present */
switch (regs->areg[2]) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->areg[2] = regs->syscall;
regs->pc -= 3;
break;
case -ERESTART_RESTARTBLOCK:
regs->areg[2] = __NR_restart_syscall;
regs->pc -= 3;
break;
}
}
if (current->ptrace & PT_SINGLESTEP)
task_pt_regs(current)->icountlevel = 1;
return 0;
}
......@@ -84,9 +84,7 @@ SECTIONS
{
/* The .head.text section must be the first section! */
*(.head.text)
*(.literal)
TEXT_TEXT
*(.srom.text)
*(.literal .text)
VMLINUX_SYMBOL(__sched_text_start) = .;
*(.sched.literal .sched.text)
VMLINUX_SYMBOL(__sched_text_end) = .;
......@@ -96,6 +94,7 @@ SECTIONS
}
_etext = .;
PROVIDE (etext = .);
. = ALIGN(16);
......@@ -103,32 +102,6 @@ SECTIONS
/* Relocation table */
. = ALIGN(16);
__boot_reloc_table_start = ABSOLUTE(.);
__relocate : {
RELOCATE_ENTRY(_WindowVectors_text,
.WindowVectors.text);
#if 0
RELOCATE_ENTRY(_KernelExceptionVector_literal,
.KernelExceptionVector.literal);
#endif
RELOCATE_ENTRY(_KernelExceptionVector_text,
.KernelExceptionVector.text);
#if 0
RELOCATE_ENTRY(_UserExceptionVector_literal,
.UserExceptionVector.literal);
#endif
RELOCATE_ENTRY(_UserExceptionVector_text,
.UserExceptionVector.text);
RELOCATE_ENTRY(_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal);
RELOCATE_ENTRY(_DoubleExceptionVector_text,
.DoubleExceptionVector.text);
}
__boot_reloc_table_end = ABSOLUTE(.) ;
.fixup : { *(.fixup) }
. = ALIGN(16);
......@@ -145,8 +118,7 @@ SECTIONS
_fdata = .;
.data :
{
DATA_DATA
CONSTRUCTORS
*(.data) CONSTRUCTORS
. = ALIGN(XCHAL_ICACHE_LINESIZE);
*(.data.cacheline_aligned)
}
......@@ -174,6 +146,22 @@ SECTIONS
__tagtable_begin = .;
*(.taglist)
__tagtable_end = .;
. = ALIGN(16);
__boot_reloc_table_start = ABSOLUTE(.);
RELOCATE_ENTRY(_WindowVectors_text,
.WindowVectors.text);
RELOCATE_ENTRY(_KernelExceptionVector_text,
.KernelExceptionVector.text);
RELOCATE_ENTRY(_UserExceptionVector_text,
.UserExceptionVector.text);
RELOCATE_ENTRY(_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal);
RELOCATE_ENTRY(_DoubleExceptionVector_text,
.DoubleExceptionVector.text);
__boot_reloc_table_end = ABSOLUTE(.) ;
}
. = ALIGN(XCHAL_ICACHE_LINESIZE);
......@@ -194,16 +182,6 @@ SECTIONS
SECURITY_INIT
. = ALIGN(4);
__start___ftr_fixup = .;
__ftr_fixup : { *(__ftr_fixup) }
__stop___ftr_fixup = .;
. = ALIGN(4096);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(4096);
......@@ -212,6 +190,12 @@ SECTIONS
__initramfs_end = .;
#endif
. = ALIGN(4096);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
__per_cpu_end = .;
/* We need this dummy segment here */
. = ALIGN(4);
......@@ -273,9 +257,9 @@ SECTIONS
/* BSS section */
_bss_start = .;
.sbss : { *(.sbss) *(.scommon) }
.bss : { *(COMMON) *(.bss) }
.bss : { *(.bss.page_aligned) *(.bss) }
_bss_end = .;
_end = .;
/* only used by the boot loader */
......@@ -293,16 +277,16 @@ SECTIONS
*(.ResetVector.text)
}
/* Sections to be discarded */
/DISCARD/ :
{
*(.text.exit)
*(.text.exit.literal)
*(.data.exit)
*(.exit.literal .exit.text)
*(.exit.data)
*(.exitcall.exit)
}
.xt.lit : { *(.xt.lit) }
.xt.prop : { *(.xt.prop) }
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
......
......@@ -38,21 +38,10 @@
/*
* String functions
*/
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(enable_irq);
EXPORT_SYMBOL(disable_irq);
EXPORT_SYMBOL(kernel_thread);
/*
......
......@@ -25,18 +25,18 @@
/*
* char *__strncpy_user(char *dst, const char *src, size_t len)
*/
.text
.begin literal
.align 4
.Lmask0:
.byte 0xff, 0x00, 0x00, 0x00
.Lmask1:
.byte 0x00, 0xff, 0x00, 0x00
.Lmask2:
.byte 0x00, 0x00, 0xff, 0x00
.Lmask3:
.byte 0x00, 0x00, 0x00, 0xff
.end literal
#ifdef __XTENSA_EB__
# define MASK0 0xff000000
# define MASK1 0x00ff0000
# define MASK2 0x0000ff00
# define MASK3 0x000000ff
#else
# define MASK0 0x000000ff
# define MASK1 0x0000ff00
# define MASK2 0x00ff0000
# define MASK3 0xff000000
#endif
# Register use
# a0/ return address
......@@ -53,6 +53,7 @@
# a11/ dst
# a12/ tmp
.text
.align 4
.global __strncpy_user
.type __strncpy_user,@function
......@@ -61,10 +62,10 @@ __strncpy_user:
# a2/ dst, a3/ src, a4/ len
mov a11, a2 # leave dst in return value register
beqz a4, .Lret # if len is zero
l32r a5, .Lmask0 # mask for byte 0
l32r a6, .Lmask1 # mask for byte 1
l32r a7, .Lmask2 # mask for byte 2
l32r a8, .Lmask3 # mask for byte 3
movi a5, MASK0 # mask for byte 0
movi a6, MASK1 # mask for byte 1
movi a7, MASK2 # mask for byte 2
movi a8, MASK3 # mask for byte 3
bbsi.l a3, 0, .Lsrc1mod2 # if only 8-bit aligned
bbsi.l a3, 1, .Lsrc2mod4 # if only 16-bit aligned
.Lsrcaligned: # return here when src is word-aligned
......
......@@ -24,18 +24,18 @@
/*
* size_t __strnlen_user(const char *s, size_t len)
*/
.text
.begin literal
.align 4
.Lmask0:
.byte 0xff, 0x00, 0x00, 0x00
.Lmask1:
.byte 0x00, 0xff, 0x00, 0x00
.Lmask2:
.byte 0x00, 0x00, 0xff, 0x00
.Lmask3:
.byte 0x00, 0x00, 0x00, 0xff
.end literal
#ifdef __XTENSA_EB__
# define MASK0 0xff000000
# define MASK1 0x00ff0000
# define MASK2 0x0000ff00
# define MASK3 0x000000ff
#else
# define MASK0 0x000000ff
# define MASK1 0x0000ff00
# define MASK2 0x00ff0000
# define MASK3 0xff000000
#endif
# Register use:
# a2/ src
......@@ -48,6 +48,7 @@
# a9/ tmp
# a10/ tmp
.text
.align 4
.global __strnlen_user
.type __strnlen_user,@function
......@@ -56,10 +57,10 @@ __strnlen_user:
# a2/ s, a3/ len
addi a4, a2, -4 # because we overincrement at the end;
# we compensate with load offsets of 4
l32r a5, .Lmask0 # mask for byte 0
l32r a6, .Lmask1 # mask for byte 1
l32r a7, .Lmask2 # mask for byte 2
l32r a8, .Lmask3 # mask for byte 3
movi a5, MASK0 # mask for byte 0
movi a6, MASK1 # mask for byte 1
movi a7, MASK2 # mask for byte 2
movi a8, MASK3 # mask for byte 3
bbsi.l a2, 0, .L1mod2 # if only 8-bit aligned
bbsi.l a2, 1, .L2mod4 # if only 16-bit aligned
......
......@@ -205,7 +205,7 @@ void __init init_mmu (void)
/* Writing zeros to the <t>TLBCFG special registers ensure
* that valid values exist in the register. For existing
* PGSZID<w> fields, zero selects the first element of the
* page-size array. For nonexistant PGSZID<w> fields, zero is
* page-size array. For nonexistent PGSZID<w> fields, zero is
* the best value to write. Also, when changing PGSZID<w>
* fields, the corresponding TLB must be flushed.
*/
......
......@@ -473,7 +473,7 @@ static int iss_net_open(struct net_device *dev)
netif_start_queue(dev);
/* clear buffer - it can happen that the host side of the interface
* is full when we gethere. In this case, new data is never queued,
* is full when we get here. In this case, new data is never queued,
* SIGIOs never arrive, and the net never works.
*/
while ((err = iss_net_rx(dev)) > 0)
......
......@@ -7,7 +7,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_BITOPS_H
......@@ -31,53 +31,30 @@
#if XCHAL_HAVE_NSA
static __inline__ int __cntlz (unsigned long x)
static inline unsigned long __cntlz (unsigned long x)
{
int lz;
asm ("nsau %0, %1" : "=r" (lz) : "r" (x));
return 31 - lz;
return lz;
}
#else
static __inline__ int __cntlz (unsigned long x)
{
unsigned long sum, x1, x2, x4, x8, x16;
x1 = x & 0xAAAAAAAA;
x2 = x & 0xCCCCCCCC;
x4 = x & 0xF0F0F0F0;
x8 = x & 0xFF00FF00;
x16 = x & 0xFFFF0000;
sum = x2 ? 2 : 0;
sum += (x16 != 0) * 16;
sum += (x8 != 0) * 8;
sum += (x4 != 0) * 4;
sum += (x1 != 0);
return sum;
}
#endif
/*
* ffz: Find first zero in word. Undefined if no zero exists.
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
static __inline__ int ffz(unsigned long x)
static inline int ffz(unsigned long x)
{
if ((x = ~x) == 0)
return 32;
return __cntlz(x & -x);
return 31 - __cntlz(~x & -~x);
}
/*
* __ffs: Find first bit set in word. Return 0 for bit 0
*/
static __inline__ int __ffs(unsigned long x)
static inline int __ffs(unsigned long x)
{
return __cntlz(x & -x);
return 31 - __cntlz(x & -x);
}
/*
......@@ -86,9 +63,9 @@ static __inline__ int __ffs(unsigned long x)
* differs in spirit from the above ffz (man ffs).
*/
static __inline__ int ffs(unsigned long x)
static inline int ffs(unsigned long x)
{
return __cntlz(x & -x) + 1;
return 32 - __cntlz(x & -x);
}
/*
......@@ -96,20 +73,36 @@ static __inline__ int ffs(unsigned long x)
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static __inline__ int fls (unsigned int x)
static inline int fls (unsigned int x)
{
return __cntlz(x);
return 32 - __cntlz(x);
}
#else
/* Use the generic implementation if we don't have the nsa/nsau instructions. */
# include <asm-generic/bitops/ffs.h>
# include <asm-generic/bitops/__ffs.h>
# include <asm-generic/bitops/ffz.h>
# include <asm-generic/bitops/fls.h>
#endif
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#ifdef __XTENSA_EL__
# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr))
# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr))
# define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr), (unsigned long*)(addr))
# define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr), (unsigned long*)(addr))
#elif defined(__XTENSA_EB__)
# define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr))
# define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr))
# define ext2_set_bit_atomic(lock,nr,addr) \
test_and_set_bit((nr) ^ 0x18, (unsigned long*)(addr))
# define ext2_clear_bit_atomic(lock,nr,addr) \
test_and_clear_bit((nr) ^ 0x18, (unsigned long*)(addr))
#else
# error processor byte order undefined!
#endif
......
......@@ -12,6 +12,7 @@
#define _XTENSA_BYTEORDER_H
#include <asm/types.h>
#include <linux/compiler.h>
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
......@@ -78,4 +79,4 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x)
# error processor byte order undefined!
#endif
#endif /* __ASM_XTENSA_BYTEORDER_H */
#endif /* _XTENSA_BYTEORDER_H */
......@@ -64,6 +64,7 @@ typedef struct {
# define COPROCESSOR_INFO_SIZE 8
# endif
#endif
#endif /* XCHAL_HAVE_CP */
#ifndef __ASSEMBLY__
......@@ -74,8 +75,11 @@ extern void save_coprocessor_registers(void*, int);
# else
# define release_coprocessors(task)
# endif
#endif
#endif
typedef unsigned char cp_state_t[XTENSA_CP_EXTRA_SIZE]
__attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN)));
#endif /* !__ASSEMBLY__ */
#endif /* _XTENSA_COPROCESSOR_H */
......@@ -5,21 +5,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_DIV64_H
#define _XTENSA_DIV64_H
#include <linux/types.h>
#include <asm-generic/div64.h>
#define do_div(n,base) ({ \
int __res = n % ((unsigned int) base); \
n /= (unsigned int) base; \
__res; })
static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
{
return dividend / divisor;
}
#endif
#endif /* _XTENSA_DIV64_H */
......@@ -13,7 +13,6 @@
#ifndef _XTENSA_ELF_H
#define _XTENSA_ELF_H
#include <asm/variant/core.h>
#include <asm/ptrace.h>
/* Xtensa processor ELF architecture-magic number */
......@@ -49,7 +48,7 @@ typedef struct {
elf_greg_t lcount;
elf_greg_t sar;
elf_greg_t syscall;
elf_greg_t ar[XCHAL_NUM_AREGS];
elf_greg_t ar[64];
} xtensa_gregset_t;
#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
......
/*
* include/asm-xtensa/fcntl.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_FCNTL_H
#define _XTENSA_FCNTL_H
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
#define O_ACCMODE 0003
#define O_RDONLY 00
#define O_WRONLY 01
#define O_RDWR 02
#define O_CREAT 0100 /* not fcntl */
#define O_EXCL 0200 /* not fcntl */
#define O_NOCTTY 0400 /* not fcntl */
#define O_TRUNC 01000 /* not fcntl */
#define O_APPEND 02000
#define O_NONBLOCK 04000
#define O_NDELAY O_NONBLOCK
#define O_SYNC 010000
#define FASYNC 020000 /* fcntl, for BSD compatibility */
#define O_DIRECT 040000 /* direct disk access hint */
#define O_LARGEFILE 0100000
#define O_DIRECTORY 0200000 /* must be a directory */
#define O_NOFOLLOW 0400000 /* don't follow links */
#define O_NOATIME 01000000
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#define F_SETFD 2 /* set/clear close_on_exec */
#define F_GETFL 3 /* get file->f_flags */
#define F_SETFL 4 /* set file->f_flags */
#define F_GETLK 5
#define F_SETLK 6
#define F_SETLKW 7
#define F_SETOWN 8 /* for sockets. */
#define F_GETOWN 9 /* for sockets. */
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
/* for posix fcntl() and lockf() */
#define F_RDLCK 0
#define F_WRLCK 1
#define F_UNLCK 2
/* for old implementation of bsd flock () */
#define F_EXLCK 4 /* or 3 */
#define F_SHLCK 8 /* or 4 */
/* for leases */
#define F_INPROGRESS 16
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB 4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */
#define LOCK_MAND 32 /* This is a mandatory flock */
#define LOCK_READ 64 /* ... Which allows concurrent read operations */
#define LOCK_WRITE 128 /* ... Which allows concurrent write operations */
#define LOCK_RW 192 /* ... Which allows concurrent read & write ops */
struct flock {
short l_type;
short l_whence;
off_t l_start;
off_t l_len;
pid_t l_pid;
};
struct flock64 {
short l_type;
short l_whence;
loff_t l_start;
loff_t l_len;
pid_t l_pid;
};
#define F_LINUX_SPECIFIC_BASE 1024
#endif /* _XTENSA_FCNTL_H */
#include <asm-generic/fcntl.h>
......@@ -14,6 +14,7 @@
#define _XTENSA_MMU_CONTEXT_H
#include <linux/stringify.h>
#include <linux/sched.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
......
......@@ -131,6 +131,6 @@ void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#include <asm-generic/memory_model.h>
#endif /* __KERNEL__ */
#endif /* _XTENSA_PAGE_H */
......@@ -11,15 +11,13 @@
#ifndef _XTENSA_PARAM_H
#define _XTENSA_PARAM_H
#include <asm/variant/core.h>
#ifdef __KERNEL__
# define HZ 100 /* internal timer frequency */
# define USER_HZ 100 /* for user interfaces in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
#endif
#define EXEC_PAGESIZE (1 << XCHAL_MMU_MIN_PTE_PAGE_SIZE)
#define EXEC_PAGESIZE 4096
#ifndef NGROUPS
#define NGROUPS 32
......
......@@ -11,8 +11,6 @@
#ifndef _XTENSA_PTRACE_H
#define _XTENSA_PTRACE_H
#include <asm/variant/core.h>
/*
* Kernel stack
*
......@@ -101,7 +99,8 @@ struct pt_regs {
unsigned long windowbase; /* 48 */
unsigned long windowstart; /* 52 */
unsigned long syscall; /* 56 */
int reserved[2]; /* 64 */
unsigned long icountlevel; /* 60 */
int reserved[1]; /* 64 */
/* Make sure the areg field is 16 bytes aligned. */
int align[0] __attribute__ ((aligned(16)));
......@@ -113,6 +112,9 @@ struct pt_regs {
};
#ifdef __KERNEL__
#include <asm/variant/core.h>
# define task_pt_regs(tsk) ((struct pt_regs*) \
(task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
......
......@@ -9,8 +9,6 @@
#ifndef _XTENSA_SHMPARAM_H
#define _XTENSA_SHMPARAM_H
#include <asm/processor.h>
/*
* Xtensa can have variable size caches, and if
* the size of single way is larger than the page size,
......
......@@ -5,21 +5,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2003 Tensilica Inc.
* Copyright (C) 2001 - 2007 Tensilica Inc.
*/
#ifndef _XTENSA_SIGCONTEXT_H
#define _XTENSA_SIGCONTEXT_H
#define _ASMLANGUAGE
#include <asm/processor.h>
#include <asm/coprocessor.h>
struct _cpstate {
unsigned char _cpstate[XTENSA_CP_EXTRA_SIZE];
} __attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN)));
struct sigcontext {
unsigned long oldmask;
......@@ -27,18 +18,13 @@ struct sigcontext {
/* CPU registers */
unsigned long sc_pc;
unsigned long sc_ps;
unsigned long sc_wmask;
unsigned long sc_windowbase;
unsigned long sc_windowstart;
unsigned long sc_lbeg;
unsigned long sc_lend;
unsigned long sc_lcount;
unsigned long sc_sar;
unsigned long sc_depc;
unsigned long sc_dareg0;
unsigned long sc_treg[4];
unsigned long sc_areg[XCHAL_NUM_AREGS];
struct _cpstate *sc_cpstate;
unsigned long sc_acclo;
unsigned long sc_acchi;
unsigned long sc_a[16];
};
#endif /* __ASM_XTENSA_SIGCONTEXT_H */
#endif /* _XTENSA_SIGCONTEXT_H */
......@@ -116,6 +116,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
#define TIF_IRET 5 /* return with iret */
#define TIF_MEMDIE 6
#define TIF_RESTORE_SIGMASK 7 /* restore signal mask in do_signal() */
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
......@@ -125,6 +126,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
#define _TIF_IRET (1<<TIF_IRET)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */
#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */
......
......@@ -485,8 +485,8 @@ __SYSCALL(217, sys_sched_get_priority_min, 1)
__SYSCALL(218, sys_sched_rr_get_interval, 2)
#define __NR_sched_yield 219
__SYSCALL(219, sys_sched_yield, 0)
#define __NR_sigreturn 222
__SYSCALL(222, xtensa_sigreturn, 0)
#define __NR_available222 222
__SYSCALL(222, sys_ni_syscall, 0)
/* Signal Handling */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment