Commit 9a8fd558 authored by Chris Zankel's avatar Chris Zankel Committed by Linus Torvalds

[PATCH] xtensa: Architecture support for Tensilica Xtensa Part 6

The attached patches provides part 6 of an architecture implementation for the
Tensilica Xtensa CPU series.
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 3f65ce4d
/*
* include/asm-xtensa/addrspace.h
*
* Dummy a.out file. Xtensa does not support the a.out format, but the kernel
* seems to depend on it.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_A_OUT_H
#define _XTENSA_A_OUT_H
/* Note: the kernel needs the a.out definitions, even if only ELF is used. */
#define STACK_TOP TASK_SIZE
struct exec
{
unsigned long a_info;
unsigned a_text;
unsigned a_data;
unsigned a_bss;
unsigned a_syms;
unsigned a_entry;
unsigned a_trsize;
unsigned a_drsize;
};
#endif /* _XTENSA_A_OUT_H */
/*
* include/asm-xtensa/atomic.h
*
* Atomic operations that C can't guarantee us. Useful for resource counting..
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_ATOMIC_H
#define _XTENSA_ATOMIC_H
#include <linux/config.h>
#include <linux/stringify.h>
typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#include <asm/processor.h>
#include <asm/system.h>
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
/*
* This Xtensa implementation assumes that the right mechanism
* for exclusion is for locking interrupts to level 1.
*
* Locking interrupts looks like this:
*
* rsil a15, 1
* <code>
* wsr a15, PS
* rsync
*
* Note that a15 is used here because the register allocation
* done by the compiler is not guaranteed and a window overflow
* may not occur between the rsil and wsr instructions. By using
* a15 in the rsil, the machine is guaranteed to be in a state
* where no register reference will cause an overflow.
*/
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v,i) ((v)->counter = (i))
/**
* atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
extern __inline__ void atomic_add(int i, atomic_t * v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"add %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
}
/**
* atomic_sub - subtract the atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
extern __inline__ void atomic_sub(int i, atomic_t *v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"sub %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
}
/*
* We use atomic_{add|sub}_return to define other functions.
*/
extern __inline__ int atomic_add_return(int i, atomic_t * v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"add %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
}
extern __inline__ int atomic_sub_return(int i, atomic_t * v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"sub %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v and returns
* true if the result is zero, or false for all
* other cases.
*/
#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
#define atomic_inc(v) atomic_add(1,(v))
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
#define atomic_inc_return(v) atomic_add_return(1,(v))
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
#define atomic_dec(v) atomic_sub(1,(v))
/**
* atomic_dec_return - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
#define atomic_dec_return(v) atomic_sub_return(1,(v))
/**
* atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
/**
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
/**
* atomic_add_negative - add and test if negative
* @v: pointer of type atomic_t
* @i: integer value to add
*
* Atomically adds @i to @v and returns true
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
extern __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned int all_f = -1;
unsigned int vval;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"xor %1, %4, %3 \n\t"
"and %0, %0, %4 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"rsync \n"
: "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask)
: "a15", "memory"
);
}
extern __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned int vval;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t"
"or %0, %0, %1 \n\t"
"s32i %0, %2, 0 \n\t"
"wsr a15, "__stringify(PS)" \n\t"
"rsync \n"
: "=&a" (vval)
: "a" (mask), "a" (v)
: "a15", "memory"
);
}
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */
This diff is collapsed.
/*
* include/asm-xtensa/bootparam.h
*
* Definition of the Linux/Xtensa boot parameter structure
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* (Concept borrowed from the 68K port)
*/
#ifndef _XTENSA_BOOTPARAM_H
#define _XTENSA_BOOTPARAM_H
#define BP_VERSION 0x0001
#define BP_TAG_COMMAND_LINE 0x1001 /* command line (0-terminated string)*/
#define BP_TAG_INITRD 0x1002 /* ramdisk addr and size (bp_meminfo) */
#define BP_TAG_MEMORY 0x1003 /* memory addr and size (bp_meminfo) */
#define BP_TAG_SERIAL_BAUSRATE 0x1004 /* baud rate of current console. */
#define BP_TAG_SERIAL_PORT 0x1005 /* serial device of current console */
#define BP_TAG_FIRST 0x7B0B /* first tag with a version number */
#define BP_TAG_LAST 0x7E0B /* last tag */
#ifndef __ASSEMBLY__
/* All records are aligned to 4 bytes */
typedef struct bp_tag {
unsigned short id; /* tag id */
unsigned short size; /* size of this record excluding the structure*/
unsigned long data[0]; /* data */
} bp_tag_t;
typedef struct meminfo {
unsigned long type;
unsigned long start;
unsigned long end;
} meminfo_t;
#define SYSMEM_BANKS_MAX 5
#define MEMORY_TYPE_CONVENTIONAL 0x1000
#define MEMORY_TYPE_NONE 0x2000
typedef struct sysmem_info {
int nr_banks;
meminfo_t bank[SYSMEM_BANKS_MAX];
} sysmem_info_t;
extern sysmem_info_t sysmem;
#endif
#endif
/*
* include/asm-xtensa/bug.h
*
* Macros to cause a 'bug' message.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_BUG_H
#define _XTENSA_BUG_H
#include <linux/stringify.h>
#define ILL __asm__ __volatile__ (".byte 0,0,0\n")
#ifdef CONFIG_KALLSYMS
# define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
ILL; \
} while (0)
#else
# define BUG() do { \
printk("kernel BUG!\n"); \
ILL; \
} while (0)
#endif
#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0)
#define PAGE_BUG(page) do { BUG(); } while (0)
#define WARN_ON(condition) do { \
if (unlikely((condition)!=0)) { \
printk ("Warning in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \
dump_stack(); \
} \
} while (0)
#endif /* _XTENSA_BUG_H */
/*
* include/asm-xtensa/bugs.h
*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Xtensa processors don't have any bugs. :)
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*/
#ifndef _XTENSA_BUGS_H
#define _XTENSA_BUGS_H
#include <asm/processor.h>
static void __init check_bugs(void)
{
}
#endif /* _XTENSA_BUGS_H */
/*
* include/asm-xtensa/byteorder.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_BYTEORDER_H
#define _XTENSA_BYTEORDER_H
#include <asm/processor.h>
#include <asm/types.h>
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
__u32 res;
/* instruction sequence from Xtensa ISA release 2/2000 */
__asm__("ssai 8 \n\t"
"srli %0, %1, 16 \n\t"
"src %0, %0, %1 \n\t"
"src %0, %0, %0 \n\t"
"src %0, %1, %0 \n"
: "=&a" (res)
: "a" (x)
);
return res;
}
static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
{
/* Given that 'short' values are signed (i.e., can be negative),
* we cannot assume that the upper 16-bits of the register are
* zero. We are careful to mask values after shifting.
*/
/* There exists an anomaly between xt-gcc and xt-xcc. xt-gcc
* inserts an extui instruction after putting this function inline
* to ensure that it uses only the least-significant 16 bits of
* the result. xt-xcc doesn't use an extui, but assumes the
* __asm__ macro follows convention that the upper 16 bits of an
* 'unsigned short' result are still zero. This macro doesn't
* follow convention; indeed, it leaves garbage in the upport 16
* bits of the register.
* Declaring the temporary variables 'res' and 'tmp' to be 32-bit
* types while the return type of the function is a 16-bit type
* forces both compilers to insert exactly one extui instruction
* (or equivalent) to mask off the upper 16 bits. */
__u32 res;
__u32 tmp;
__asm__("extui %1, %2, 8, 8\n\t"
"slli %0, %2, 8 \n\t"
"or %0, %0, %1 \n"
: "=&a" (res), "=&a" (tmp)
: "a" (x)
);
return res;
}
#define __arch__swab32(x) ___arch__swab32(x)
#define __arch__swab16(x) ___arch__swab16(x)
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
#endif
#ifdef __XTENSA_EL__
# include <linux/byteorder/little_endian.h>
#elif defined(__XTENSA_EB__)
# include <linux/byteorder/big_endian.h>
#else
# error processor byte order undefined!
#endif
#endif /* __ASM_XTENSA_BYTEORDER_H */
/*
* include/asm-xtensa/cacheflush.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* 2 of the License, or (at your option) any later version.
*
* (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_CACHE_H
#define _XTENSA_CACHE_H
#include <xtensa/config/core.h>
#if XCHAL_ICACHE_SIZE > 0
# if (XCHAL_ICACHE_SIZE % (XCHAL_ICACHE_LINESIZE*XCHAL_ICACHE_WAYS*4)) != 0
# error cache configuration outside expected/supported range!
# endif
#endif
#if XCHAL_DCACHE_SIZE > 0
# if (XCHAL_DCACHE_SIZE % (XCHAL_DCACHE_LINESIZE*XCHAL_DCACHE_WAYS*4)) != 0
# error cache configuration outside expected/supported range!
# endif
#endif
#define L1_CACHE_SHIFT XCHAL_CACHE_LINEWIDTH_MAX
#define L1_CACHE_BYTES XCHAL_CACHE_LINESIZE_MAX
#endif /* _XTENSA_CACHE_H */
/*
* include/asm-xtensa/cacheflush.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_CACHEFLUSH_H
#define _XTENSA_CACHEFLUSH_H
#ifdef __KERNEL__
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* flush and invalidate data cache, invalidate instruction cache:
*
* __flush_invalidate_cache_all()
* __flush_invalidate_cache_range(from,sze)
*
* invalidate data or instruction cache:
*
* __invalidate_icache_all()
* __invalidate_icache_page(adr)
* __invalidate_dcache_page(adr)
* __invalidate_icache_range(from,size)
* __invalidate_dcache_range(from,size)
*
* flush data cache:
*
* __flush_dcache_page(adr)
*
* flush and invalidate data cache:
*
* __flush_invalidate_dcache_all()
* __flush_invalidate_dcache_page(adr)
* __flush_invalidate_dcache_range(from,size)
*/
extern void __flush_invalidate_cache_all(void);
extern void __flush_invalidate_cache_range(unsigned long, unsigned long);
extern void __flush_invalidate_dcache_all(void);
extern void __invalidate_icache_all(void);
extern void __invalidate_dcache_page(unsigned long);
extern void __invalidate_icache_page(unsigned long);
extern void __invalidate_icache_range(unsigned long, unsigned long);
extern void __invalidate_dcache_range(unsigned long, unsigned long);
#if XCHAL_DCACHE_IS_WRITEBACK
extern void __flush_dcache_page(unsigned long);
extern void __flush_invalidate_dcache_page(unsigned long);
extern void __flush_invalidate_dcache_range(unsigned long, unsigned long);
#else
# define __flush_dcache_page(p) do { } while(0)
# define __flush_invalidate_dcache_page(p) do { } while(0)
# define __flush_invalidate_dcache_range(p,s) do { } while(0)
#endif
/*
* We have physically tagged caches - nothing to do here -
* unless we have cache aliasing.
*
* Pages can get remapped. Because this might change the 'color' of that page,
* we have to flush the cache before the PTE is changed.
* (see also Documentation/cachetlb.txt)
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
#define flush_cache_all() __flush_invalidate_cache_all();
#define flush_cache_mm(mm) __flush_invalidate_cache_all();
#define flush_cache_vmap(start,end) __flush_invalidate_cache_all();
#define flush_cache_vunmap(start,end) __flush_invalidate_cache_all();
extern void flush_dcache_page(struct page*);
extern void flush_cache_range(struct vm_area_struct*, ulong, ulong);
extern void flush_cache_page(struct vm_area_struct*, unsigned long, unsigned long);
#else
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_vmap(start,end) do { } while (0)
#define flush_cache_vunmap(start,end) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_cache_page(vma,addr,pfn) do { } while (0)
#define flush_cache_range(vma,start,end) do { } while (0)
#endif
#define flush_icache_range(start,end) \
__invalidate_icache_range(start,(end)-(start))
/* This is not required, see Documentation/cachetlb.txt */
#define flush_icache_page(vma,page) do { } while(0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
#endif /* __KERNEL__ */
#endif /* _XTENSA_CACHEFLUSH_H */
/*
* include/asm-xtensa/checksum.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_CHECKSUM_H
#define _XTENSA_CHECKSUM_H
#include <linux/config.h>
#include <linux/in6.h>
#include <xtensa/config/core.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
*
* it's best to have buff aligned on a 32-bit boundary
*/
asmlinkage unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum);
/*
* the same as csum_partial, but copies from src while it
* checksums, and handles user-space pointer exceptions correctly, when needed.
*
* here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
asmlinkage unsigned int csum_partial_copy_generic( const char *src, char *dst, int len, int sum,
int *src_err_ptr, int *dst_err_ptr);
/*
* Note: when you get a NULL pointer exception here this means someone
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
* verify_area().
*/
extern __inline__
unsigned int csum_partial_copy_nocheck ( const char *src, char *dst,
int len, int sum)
{
return csum_partial_copy_generic ( src, dst, len, sum, NULL, NULL);
}
extern __inline__
unsigned int csum_partial_copy_from_user ( const char *src, char *dst,
int len, int sum, int *err_ptr)
{
return csum_partial_copy_generic ( src, dst, len, sum, err_ptr, NULL);
}
/*
* These are the old (and unsafe) way of doing checksums, a warning message will be
* printed if they are used and an exeption occurs.
*
* these functions should go away after some time.
*/
#define csum_partial_copy_fromuser csum_partial_copy
unsigned int csum_partial_copy( const char *src, char *dst, int len, int sum);
/*
* Fold a partial checksum
*/
static __inline__ unsigned int csum_fold(unsigned int sum)
{
unsigned int __dummy;
__asm__("extui %1, %0, 16, 16\n\t"
"extui %0 ,%0, 0, 16\n\t"
"add %0, %0, %1\n\t"
"slli %1, %0, 16\n\t"
"add %0, %0, %1\n\t"
"extui %0, %0, 16, 16\n\t"
"neg %0, %0\n\t"
"addi %0, %0, -1\n\t"
"extui %0, %0, 0, 16\n\t"
: "=r" (sum), "=&r" (__dummy)
: "0" (sum));
return sum;
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
static __inline__ unsigned short ip_fast_csum(unsigned char * iph, unsigned int ihl)
{
unsigned int sum, tmp, endaddr;
__asm__ __volatile__(
"sub %0, %0, %0\n\t"
#if XCHAL_HAVE_LOOPS
"loopgtz %2, 2f\n\t"
#else
"beqz %2, 2f\n\t"
"slli %4, %2, 2\n\t"
"add %4, %4, %1\n\t"
"0:\t"
#endif
"l32i %3, %1, 0\n\t"
"add %0, %0, %3\n\t"
"bgeu %0, %3, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"addi %1, %1, 4\n\t"
#if !XCHAL_HAVE_LOOPS
"blt %1, %4, 0b\n\t"
#endif
"2:\t"
/* Since the input registers which are loaded with iph and ihl
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
: "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp), "=&r" (endaddr)
: "1" (iph), "2" (ihl));
return csum_fold(sum);
}
static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
unsigned int sum)
{
#ifdef __XTENSA_EL__
unsigned long len_proto = (ntohs(len)<<16)+proto*256;
#elif defined(__XTENSA_EB__)
unsigned long len_proto = (proto<<16)+len;
#else
# error processor byte order undefined!
#endif
__asm__("add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"add %0, %0, %2\n\t"
"bgeu %0, %2, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"add %0, %0, %3\n\t"
"bgeu %0, %3, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
: "=r" (sum), "=r" (len_proto)
: "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
return sum;
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
*/
static __inline__ unsigned short int csum_tcpudp_magic(unsigned long saddr,
unsigned long daddr,
unsigned short len,
unsigned short proto,
unsigned int sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
*/
static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len)
{
return csum_fold (csum_partial(buff, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
struct in6_addr *daddr,
__u32 len,
unsigned short proto,
unsigned int sum)
{
unsigned int __dummy;
__asm__("l32i %1, %2, 0\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %2, 4\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %2, 8\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %2, 12\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %3, 0\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %3, 4\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %3, 8\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"l32i %1, %3, 12\n\t"
"add %0, %0, %1\n\t"
"bgeu %0, %1, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"add %0, %0, %4\n\t"
"bgeu %0, %4, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
"add %0, %0, %5\n\t"
"bgeu %0, %5, 1f\n\t"
"addi %0, %0, 1\n\t"
"1:\t"
: "=r" (sum), "=&r" (__dummy)
: "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
return csum_fold(sum);
}
/*
* Copy and checksum to user
*/
#define HAVE_CSUM_COPY_USER
static __inline__ unsigned int csum_and_copy_to_user (const char *src, char *dst,
int len, int sum, int *err_ptr)
{
if (access_ok(VERIFY_WRITE, dst, len))
return csum_partial_copy_generic(src, dst, len, sum, NULL, err_ptr);
if (len)
*err_ptr = -EFAULT;
return -1; /* invalid checksum */
}
#endif
/*
* include/asm-xtensa/cpextra.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_COPROCESSOR_H
#define _XTENSA_COPROCESSOR_H
#include <xtensa/config/core.h>
#define XTOFS(last_start,last_size,align) \
((last_start+last_size+align-1) & -align)
#define XTENSA_CP_EXTRA_OFFSET 0
#define XTENSA_CP_EXTRA_ALIGN XCHAL_EXTRA_SA_ALIGN
#define XTENSA_CPE_CP0_OFFSET \
XTOFS(XTENSA_CP_EXTRA_OFFSET, XCHAL_EXTRA_SA_SIZE, XCHAL_CP0_SA_ALIGN)
#define XTENSA_CPE_CP1_OFFSET \
XTOFS(XTENSA_CPE_CP0_OFFSET, XCHAL_CP0_SA_SIZE, XCHAL_CP1_SA_ALIGN)
#define XTENSA_CPE_CP2_OFFSET \
XTOFS(XTENSA_CPE_CP1_OFFSET, XCHAL_CP1_SA_SIZE, XCHAL_CP2_SA_ALIGN)
#define XTENSA_CPE_CP3_OFFSET \
XTOFS(XTENSA_CPE_CP2_OFFSET, XCHAL_CP2_SA_SIZE, XCHAL_CP3_SA_ALIGN)
#define XTENSA_CPE_CP4_OFFSET \
XTOFS(XTENSA_CPE_CP3_OFFSET, XCHAL_CP3_SA_SIZE, XCHAL_CP4_SA_ALIGN)
#define XTENSA_CPE_CP5_OFFSET \
XTOFS(XTENSA_CPE_CP4_OFFSET, XCHAL_CP4_SA_SIZE, XCHAL_CP5_SA_ALIGN)
#define XTENSA_CPE_CP6_OFFSET \
XTOFS(XTENSA_CPE_CP5_OFFSET, XCHAL_CP5_SA_SIZE, XCHAL_CP6_SA_ALIGN)
#define XTENSA_CPE_CP7_OFFSET \
XTOFS(XTENSA_CPE_CP6_OFFSET, XCHAL_CP6_SA_SIZE, XCHAL_CP7_SA_ALIGN)
#define XTENSA_CP_EXTRA_SIZE \
XTOFS(XTENSA_CPE_CP7_OFFSET, XCHAL_CP7_SA_SIZE, 16)
#if XCHAL_CP_NUM > 0
# ifndef __ASSEMBLY__
/*
* Tasks that own contents of (last user) each coprocessor.
* Entries are 0 for not-owned or non-existent coprocessors.
* Note: The size of this structure is fixed to 8 bytes in entry.S
*/
typedef struct {
struct task_struct *owner; /* owner */
int offset; /* offset in cpextra space. */
} coprocessor_info_t;
# else
# define COPROCESSOR_INFO_OWNER 0
# define COPROCESSOR_INFO_OFFSET 4
# define COPROCESSOR_INFO_SIZE 8
# endif
#endif
#ifndef __ASSEMBLY__
# if XCHAL_CP_NUM > 0
struct task_struct;
extern void release_coprocessors (struct task_struct*);
extern void save_coprocessor_registers(void*, int);
# else
# define release_coprocessors(task)
# endif
#endif
#endif /* _XTENSA_COPROCESSOR_H */
/*
* include/asm-xtensa/cpumask.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_CPUMASK_H
#define _XTENSA_CPUMASK_H
#include <asm-generic/cpumask.h>
#endif /* _XTENSA_CPUMASK_H */
#ifndef _XTENSA_CPUTIME_H
#define _XTENSA_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* _XTENSA_CPUTIME_H */
/*
* include/asm-xtensa/current.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_CURRENT_H
#define _XTENSA_CURRENT_H
#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
struct task_struct;
static inline struct task_struct *get_current(void)
{
return current_thread_info()->task;
}
#define current get_current()
#else
#define CURRENT_SHIFT 13
#define GET_CURRENT(reg,sp) \
GET_THREAD_INFO(reg,sp); \
l32i reg, reg, TI_TASK \
#endif
#endif /* XTENSA_CURRENT_H */
/*
* include/asm-xtensa/delay.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
*/
#ifndef _XTENSA_DELAY_H
#define _XTENSA_DELAY_H
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/param.h>
extern unsigned long loops_per_jiffy;
extern __inline__ void __delay(unsigned long loops)
{
/* 2 cycles per loop. */
__asm__ __volatile__ ("1: addi %0, %0, -2; bgeui %0, 1, 1b"
: "=r" (loops) : "0" (loops));
}
static __inline__ u32 xtensa_get_ccount(void)
{
u32 ccount;
asm volatile ("rsr %0, 234; # CCOUNT\n" : "=r" (ccount));
return ccount;
}
/* For SMP/NUMA systems, change boot_cpu_data to something like
* local_cpu_data->... where local_cpu_data points to the current
* cpu. */
static __inline__ void udelay (unsigned long usecs)
{
unsigned long start = xtensa_get_ccount();
unsigned long cycles = usecs * (loops_per_jiffy / (1000000UL / HZ));
/* Note: all variables are unsigned (can wrap around)! */
while (((unsigned long)xtensa_get_ccount()) - start < cycles)
;
}
#endif
/*
* include/asm-xtensa/div64.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_DIV64_H
#define _XTENSA_DIV64_H
#define do_div(n,base) ({ \
int __res = n % ((unsigned int) base); \
n /= (unsigned int) base; \
__res; })
#endif
/*
* include/asm-xtensa/dma_mapping.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_DMA_MAPPING_H
#define _XTENSA_DMA_MAPPING_H
#include <asm/scatterlist.h>
#include <asm/cache.h>
#include <asm/io.h>
#include <linux/mm.h>
/*
* DMA-consistent mapping functions.
*/
extern void *consistent_alloc(int, size_t, dma_addr_t, unsigned long);
extern void consistent_free(void*, size_t, dma_addr_t);
extern void consistent_sync(void*, size_t, int);
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
static inline dma_addr_t
dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
consistent_sync(ptr, size, direction);
return virt_to_phys(ptr);
}
static inline void
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline int
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sg++ ) {
BUG_ON(!sg->page);
sg->dma_address = page_to_phys(sg->page) + sg->offset;
consistent_sync(page_address(sg->page) + sg->offset,
sg->length, direction);
}
return nents;
}
static inline dma_addr_t
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
}
static inline void
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline void
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
}
static inline void
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
}
static inline void
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle), size, direction);
}
static inline void
dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
}
static inline void
dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction direction)
{
consistent_sync((void *)bus_to_virt(dma_handle)+offset,size,direction);
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nelems; i++, sg++)
consistent_sync(page_address(sg->page) + sg->offset,
sg->length, dir);
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nelems; i++, sg++)
consistent_sync(page_address(sg->page) + sg->offset,
sg->length, dir);
}
static inline int
dma_mapping_error(dma_addr_t dma_addr)
{
return 0;
}
static inline int
dma_supported(struct device *dev, u64 mask)
{
return 1;
}
static inline int
dma_set_mask(struct device *dev, u64 mask)
{
if(!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline int
dma_get_cache_alignment(void)
{
return L1_CACHE_BYTES;
}
#define dma_is_consistent(d) (1)
static inline void
dma_cache_sync(void *vaddr, size_t size,
enum dma_data_direction direction)
{
consistent_sync(vaddr, size, direction);
}
#endif /* _XTENSA_DMA_MAPPING_H */
/*
* include/asm-xtensa/dma.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_DMA_H
#define _XTENSA_DMA_H
#include <linux/config.h>
#include <asm/io.h> /* need byte IO */
#include <xtensa/config/core.h>
/*
* This is only to be defined if we have PC-like DMA.
* By default this is not true on an Xtensa processor,
* however on boards with a PCI bus, such functionality
* might be emulated externally.
*
* NOTE: there still exists driver code that assumes
* this is defined, eg. drivers/sound/soundcard.c (as of 2.4).
*/
#define MAX_DMA_CHANNELS 8
/*
* The maximum virtual address to which DMA transfers
* can be performed on this platform.
*
* NOTE: This is board (platform) specific, not processor-specific!
*
* NOTE: This assumes DMA transfers can only be performed on
* the section of physical memory contiguously mapped in virtual
* space for the kernel. For the Xtensa architecture, this
* means the maximum possible size of this DMA area is
* the size of the statically mapped kernel segment
* (XCHAL_KSEG_{CACHED,BYPASS}_SIZE), ie. 128 MB.
*
* NOTE: When the entire KSEG area is DMA capable, we substract
* one from the max address so that the virt_to_phys() macro
* works correctly on the address (otherwise the address
* enters another area, and virt_to_phys() may not return
* the value desired).
*/
#define MAX_DMA_ADDRESS (PAGE_OFFSET + XCHAL_KSEG_CACHED_SIZE - 1)
/* Reserve and release a DMA channel */
extern int request_dma(unsigned int dmanr, const char * device_id);
extern void free_dma(unsigned int dmanr);
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif
/*
* include/asm-xtensa/elf.h
*
* ELF register definitions
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_ELF_H
#define _XTENSA_ELF_H
#include <asm/ptrace.h>
#include <asm/coprocessor.h>
#include <xtensa/config/core.h>
/* Xtensa processor ELF architecture-magic number */
#define EM_XTENSA 94
#define EM_XTENSA_OLD 0xABC7
/* ELF register definitions. This is needed for core dump support. */
/*
* elf_gregset_t contains the application-level state in the following order:
* Processor info: config_version, cpuxy
* Processor state: pc, ps, exccause, excvaddr, wb, ws,
* lbeg, lend, lcount, sar
* GP regs: ar0 - arXX
*/
typedef unsigned long elf_greg_t;
typedef struct {
elf_greg_t xchal_config_id0;
elf_greg_t xchal_config_id1;
elf_greg_t cpux;
elf_greg_t cpuy;
elf_greg_t pc;
elf_greg_t ps;
elf_greg_t exccause;
elf_greg_t excvaddr;
elf_greg_t windowbase;
elf_greg_t windowstart;
elf_greg_t lbeg;
elf_greg_t lend;
elf_greg_t lcount;
elf_greg_t sar;
elf_greg_t syscall;
elf_greg_t ar[XCHAL_NUM_AREGS];
} xtensa_gregset_t;
#define ELF_NGREG (sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
/*
* Compute the size of the coprocessor and extra state layout (register info)
* table (in bytes).
* This is actually the maximum size of the table, as opposed to the size,
* which is available from the _xtensa_reginfo_table_size global variable.
*
* (See also arch/xtensa/kernel/coprocessor.S)
*
*/
#ifndef XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM
# define XTENSA_CPE_LTABLE_SIZE 0
#else
# define XTENSA_CPE_SEGMENT(num) (num ? (1+num) : 0)
# define XTENSA_CPE_LTABLE_ENTRIES \
( XTENSA_CPE_SEGMENT(XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP0_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP1_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP2_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP3_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP4_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP5_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP6_SA_CONTENTS_LIBDB_NUM) \
+ XTENSA_CPE_SEGMENT(XCHAL_CP7_SA_CONTENTS_LIBDB_NUM) \
+ 1 /* final entry */ \
)
# define XTENSA_CPE_LTABLE_SIZE (XTENSA_CPE_LTABLE_ENTRIES * 8)
#endif
/*
* Instantiations of the elf_fpregset_t type contain, in most
* architectures, the floating point (FPU) register set.
* For Xtensa, this type is extended to contain all custom state,
* ie. coprocessor and "extra" (non-coprocessor) state (including,
* for example, TIE-defined states and register files; as well
* as other optional processor state).
* This includes FPU state if a floating-point coprocessor happens
* to have been configured within the Xtensa processor.
*
* TOTAL_FPREGS_SIZE is the required size (without rounding)
* of elf_fpregset_t. It provides space for the following:
*
* a) 32-bit mask of active coprocessors for this task (similar
* to CPENABLE in single-threaded Xtensa processor systems)
*
* b) table describing the layout of custom states (ie. of
* individual registers, etc) within the save areas
*
* c) save areas for each coprocessor and for non-coprocessor
* ("extra") state
*
* Note that save areas may require up to 16-byte alignment when
* accessed by save/restore sequences. We do not need to ensure
* such alignment in an elf_fpregset_t structure because custom
* state is not directly loaded/stored into it; rather, save area
* contents are copied to elf_fpregset_t from the active save areas
* (see 'struct task_struct' definition in processor.h for that)
* using memcpy(). But we do allow space for such alignment,
* to allow optimizations of layout and copying.
*/
#define TOTAL_FPREGS_SIZE \
(4 + XTENSA_CPE_LTABLE_SIZE + XTENSA_CP_EXTRA_SIZE)
#define ELF_NFPREG \
((TOTAL_FPREGS_SIZE + sizeof(elf_fpreg_t) - 1) / sizeof(elf_fpreg_t))
typedef unsigned int elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_CORE_COPY_REGS(_eregs, _pregs) \
xtensa_elf_core_copy_regs (&_eregs, _pregs);
extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ( ( (x)->e_machine == EM_XTENSA ) || \
( (x)->e_machine == EM_XTENSA_OLD ) )
/*
* These are used to set parameters in the core dumps.
*/
#ifdef __XTENSA_EL__
# define ELF_DATA ELFDATA2LSB
#elif defined(__XTENSA_EB__)
# define ELF_DATA ELFDATA2MSB
#else
# error processor byte order undefined!
#endif
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_XTENSA
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
* This is the location that an ET_DYN program is loaded if exec'ed. Typical
* use of this is to invoke "./ld.so someprog" to test out a new version of
* the loader. We need to make sure that it is out of the way of the program
* that it will "exec", and that there is sufficient room for the brk.
*/
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
/*
* This yields a mask that user programs can use to figure out what
* instruction set this CPU supports. This could be done in user space,
* but it's not easy, and we've already done it here.
*/
#define ELF_HWCAP (0)
/*
* This yields a string that ld.so will use to load implementation
* specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
* For the moment, we have only optimizations for the Intel generations,
* but that could change...
*/
#define ELF_PLATFORM (NULL)
/*
* The Xtensa processor ABI says that when the program starts, a2
* contains a pointer to a function which might be registered using
* `atexit'. This provides a mean for the dynamic linker to call
* DT_FINI functions for shared libraries that have been loaded before
* the code runs.
*
* A value of 0 tells we have no such handler.
*
* We might as well make sure everything else is cleared too (except
* for the stack pointer in a1), just to make things more
* deterministic. Also, clearing a0 terminates debugger backtraces.
*/
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->areg[0]=0; /*_r->areg[1]=0;*/ _r->areg[2]=0; _r->areg[3]=0; \
_r->areg[4]=0; _r->areg[5]=0; _r->areg[6]=0; _r->areg[7]=0; \
_r->areg[8]=0; _r->areg[9]=0; _r->areg[10]=0; _r->areg[11]=0; \
_r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
} while (0)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
extern void do_copy_regs (xtensa_gregset_t*, struct pt_regs*,
struct task_struct*);
extern void do_restore_regs (xtensa_gregset_t*, struct pt_regs*,
struct task_struct*);
extern void do_save_fpregs (elf_fpregset_t*, struct pt_regs*,
struct task_struct*);
extern int do_restore_fpregs (elf_fpregset_t*, struct pt_regs*,
struct task_struct*);
#endif /* __KERNEL__ */
#endif /* _XTENSA_ELF_H */
/*
* include/asm-xtensa/errno.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_ERRNO_H
#define _XTENSA_ERRNO_H
#define EPERM 1 /* Operation not permitted */
#define ENOENT 2 /* No such file or directory */
#define ESRCH 3 /* No such process */
#define EINTR 4 /* Interrupted system call */
#define EIO 5 /* I/O error */
#define ENXIO 6 /* No such device or address */
#define E2BIG 7 /* Arg list too long */
#define ENOEXEC 8 /* Exec format error */
#define EBADF 9 /* Bad file number */
#define ECHILD 10 /* No child processes */
#define EAGAIN 11 /* Try again */
#define ENOMEM 12 /* Out of memory */
#define EACCES 13 /* Permission denied */
#define EFAULT 14 /* Bad address */
#define ENOTBLK 15 /* Block device required */
#define EBUSY 16 /* Device or resource busy */
#define EEXIST 17 /* File exists */
#define EXDEV 18 /* Cross-device link */
#define ENODEV 19 /* No such device */
#define ENOTDIR 20 /* Not a directory */
#define EISDIR 21 /* Is a directory */
#define EINVAL 22 /* Invalid argument */
#define ENFILE 23 /* File table overflow */
#define EMFILE 24 /* Too many open files */
#define ENOTTY 25 /* Not a typewriter */
#define ETXTBSY 26 /* Text file busy */
#define EFBIG 27 /* File too large */
#define ENOSPC 28 /* No space left on device */
#define ESPIPE 29 /* Illegal seek */
#define EROFS 30 /* Read-only file system */
#define EMLINK 31 /* Too many links */
#define EPIPE 32 /* Broken pipe */
#define EDOM 33 /* Math argument out of domain of func */
#define ERANGE 34 /* Math result not representable */
#define EDEADLK 35 /* Resource deadlock would occur */
#define ENAMETOOLONG 36 /* File name too long */
#define ENOLCK 37 /* No record locks available */
#define ENOSYS 38 /* Function not implemented */
#define ENOTEMPTY 39 /* Directory not empty */
#define ELOOP 40 /* Too many symbolic links encountered */
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define ENOMSG 42 /* No message of desired type */
#define EIDRM 43 /* Identifier removed */
#define ECHRNG 44 /* Channel number out of range */
#define EL2NSYNC 45 /* Level 2 not synchronized */
#define EL3HLT 46 /* Level 3 halted */
#define EL3RST 47 /* Level 3 reset */
#define ELNRNG 48 /* Link number out of range */
#define EUNATCH 49 /* Protocol driver not attached */
#define ENOCSI 50 /* No CSI structure available */
#define EL2HLT 51 /* Level 2 halted */
#define EBADE 52 /* Invalid exchange */
#define EBADR 53 /* Invalid request descriptor */
#define EXFULL 54 /* Exchange full */
#define ENOANO 55 /* No anode */
#define EBADRQC 56 /* Invalid request code */
#define EBADSLT 57 /* Invalid slot */
#define EDEADLOCK EDEADLK
#define EBFONT 59 /* Bad font file format */
#define ENOSTR 60 /* Device not a stream */
#define ENODATA 61 /* No data available */
#define ETIME 62 /* Timer expired */
#define ENOSR 63 /* Out of streams resources */
#define ENONET 64 /* Machine is not on the network */
#define ENOPKG 65 /* Package not installed */
#define EREMOTE 66 /* Object is remote */
#define ENOLINK 67 /* Link has been severed */
#define EADV 68 /* Advertise error */
#define ESRMNT 69 /* Srmount error */
#define ECOMM 70 /* Communication error on send */
#define EPROTO 71 /* Protocol error */
#define EMULTIHOP 72 /* Multihop attempted */
#define EDOTDOT 73 /* RFS specific error */
#define EBADMSG 74 /* Not a data message */
#define EOVERFLOW 75 /* Value too large for defined data type */
#define ENOTUNIQ 76 /* Name not unique on network */
#define EBADFD 77 /* File descriptor in bad state */
#define EREMCHG 78 /* Remote address changed */
#define ELIBACC 79 /* Can not access a needed shared library */
#define ELIBBAD 80 /* Accessing a corrupted shared library */
#define ELIBSCN 81 /* .lib section in a.out corrupted */
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
#define ELIBEXEC 83 /* Cannot exec a shared library directly */
#define EILSEQ 84 /* Illegal byte sequence */
#define ERESTART 85 /* Interrupted system call should be restarted */
#define ESTRPIPE 86 /* Streams pipe error */
#define EUSERS 87 /* Too many users */
#define ENOTSOCK 88 /* Socket operation on non-socket */
#define EDESTADDRREQ 89 /* Destination address required */
#define EMSGSIZE 90 /* Message too long */
#define EPROTOTYPE 91 /* Protocol wrong type for socket */
#define ENOPROTOOPT 92 /* Protocol not available */
#define EPROTONOSUPPORT 93 /* Protocol not supported */
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
#define EPFNOSUPPORT 96 /* Protocol family not supported */
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
#define EADDRINUSE 98 /* Address already in use */
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
#define ENETDOWN 100 /* Network is down */
#define ENETUNREACH 101 /* Network is unreachable */
#define ENETRESET 102 /* Network dropped connection because of reset */
#define ECONNABORTED 103 /* Software caused connection abort */
#define ECONNRESET 104 /* Connection reset by peer */
#define ENOBUFS 105 /* No buffer space available */
#define EISCONN 106 /* Transport endpoint is already connected */
#define ENOTCONN 107 /* Transport endpoint is not connected */
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
#define ETOOMANYREFS 109 /* Too many references: cannot splice */
#define ETIMEDOUT 110 /* Connection timed out */
#define ECONNREFUSED 111 /* Connection refused */
#define EHOSTDOWN 112 /* Host is down */
#define EHOSTUNREACH 113 /* No route to host */
#define EALREADY 114 /* Operation already in progress */
#define EINPROGRESS 115 /* Operation now in progress */
#define ESTALE 116 /* Stale NFS file handle */
#define EUCLEAN 117 /* Structure needs cleaning */
#define ENOTNAM 118 /* Not a XENIX named type file */
#define ENAVAIL 119 /* No XENIX semaphores available */
#define EISNAM 120 /* Is a named type file */
#define EREMOTEIO 121 /* Remote I/O error */
#define EDQUOT 122 /* Quota exceeded */
#define ENOMEDIUM 123 /* No medium found */
#define EMEDIUMTYPE 124 /* Wrong medium type */
#endif /* _XTENSA_ERRNO_H */
/*
* include/asm-xtensa/fcntl.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995, 1996, 1997, 1998 by Ralf Baechle
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_FCNTL_H
#define _XTENSA_FCNTL_H
/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
located on an ext2 file system */
#define O_ACCMODE 0x0003
#define O_RDONLY 0x0000
#define O_WRONLY 0x0001
#define O_RDWR 0x0002
#define O_APPEND 0x0008
#define O_SYNC 0x0010
#define O_NONBLOCK 0x0080
#define O_CREAT 0x0100 /* not fcntl */
#define O_TRUNC 0x0200 /* not fcntl */
#define O_EXCL 0x0400 /* not fcntl */
#define O_NOCTTY 0x0800 /* not fcntl */
#define FASYNC 0x1000 /* fcntl, for BSD compatibility */
#define O_LARGEFILE 0x2000 /* allow large file opens - currently ignored */
#define O_DIRECT 0x8000 /* direct disk access hint - currently ignored*/
#define O_DIRECTORY 0x10000 /* must be a directory */
#define O_NOFOLLOW 0x20000 /* don't follow links */
#define O_NOATIME 0x100000
#define O_NDELAY O_NONBLOCK
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
#define F_SETFD 2 /* set/clear close_on_exec */
#define F_GETFL 3 /* get file->f_flags */
#define F_SETFL 4 /* set file->f_flags */
#define F_GETLK 14
#define F_GETLK64 15
#define F_SETLK 6
#define F_SETLKW 7
#define F_SETLK64 16
#define F_SETLKW64 17
#define F_SETOWN 24 /* for sockets. */
#define F_GETOWN 23 /* for sockets. */
#define F_SETSIG 10 /* for sockets. */
#define F_GETSIG 11 /* for sockets. */
/* for F_[GET|SET]FL */
#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
/* for posix fcntl() and lockf() */
#define F_RDLCK 0
#define F_WRLCK 1
#define F_UNLCK 2
/* for old implementation of bsd flock () */
#define F_EXLCK 4 /* or 3 */
#define F_SHLCK 8 /* or 4 */
/* for leases */
#define F_INPROGRESS 16
/* operations for bsd flock(), also used by the kernel implementation */
#define LOCK_SH 1 /* shared lock */
#define LOCK_EX 2 /* exclusive lock */
#define LOCK_NB 4 /* or'd with one of the above to prevent
blocking */
#define LOCK_UN 8 /* remove lock */
#define LOCK_MAND 32 /* This is a mandatory flock ... */
#define LOCK_READ 64 /* which allows concurrent read operations */
#define LOCK_WRITE 128 /* which allows concurrent write operations */
#define LOCK_RW 192 /* which allows concurrent read & write ops */
typedef struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
long l_sysid;
__kernel_pid_t l_pid;
long pad[4];
} flock_t;
struct flock64 {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
pid_t l_pid;
};
#define F_LINUX_SPECIFIC_BASE 1024
#endif /* _XTENSA_FCNTL_H */
/*
* include/asm-xtensa/fixmap.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_FIXMAP_H
#define _XTENSA_FIXMAP_H
#include <asm/processor.h>
#ifdef CONFIG_MMU
/*
* Here we define all the compile-time virtual addresses.
*/
#if XCHAL_SEG_MAPPABLE_VADDR != 0
# error "Current port requires virtual user space starting at 0"
#endif
#if XCHAL_SEG_MAPPABLE_SIZE < 0x80000000
# error "Current port requires at least 0x8000000 bytes for user space"
#endif
/* Verify instruction/data ram/rom and xlmi don't overlay vmalloc space. */
#define __IN_VMALLOC(addr) \
(((addr) >= VMALLOC_START) && ((addr) < VMALLOC_END))
#define __SPAN_VMALLOC(start,end) \
(((start) < VMALLOC_START) && ((end) >= VMALLOC_END))
#define INSIDE_VMALLOC(start,end) \
(__IN_VMALLOC((start)) || __IN_VMALLOC(end) || __SPAN_VMALLOC((start),(end)))
#if XCHAL_NUM_INSTROM
# if XCHAL_NUM_INSTROM == 1
# if INSIDE_VMALLOC(XCHAL_INSTROM0_VADDR,XCHAL_INSTROM0_VADDR+XCHAL_INSTROM0_SIZE)
# error vmalloc range conflicts with instrom0
# endif
# endif
# if XCHAL_NUM_INSTROM == 2
# if INSIDE_VMALLOC(XCHAL_INSTROM1_VADDR,XCHAL_INSTROM1_VADDR+XCHAL_INSTROM1_SIZE)
# error vmalloc range conflicts with instrom1
# endif
# endif
#endif
#if XCHAL_NUM_INSTRAM
# if XCHAL_NUM_INSTRAM == 1
# if INSIDE_VMALLOC(XCHAL_INSTRAM0_VADDR,XCHAL_INSTRAM0_VADDR+XCHAL_INSTRAM0_SIZE)
# error vmalloc range conflicts with instram0
# endif
# endif
# if XCHAL_NUM_INSTRAM == 2
# if INSIDE_VMALLOC(XCHAL_INSTRAM1_VADDR,XCHAL_INSTRAM1_VADDR+XCHAL_INSTRAM1_SIZE)
# error vmalloc range conflicts with instram1
# endif
# endif
#endif
#if XCHAL_NUM_DATAROM
# if XCHAL_NUM_DATAROM == 1
# if INSIDE_VMALLOC(XCHAL_DATAROM0_VADDR,XCHAL_DATAROM0_VADDR+XCHAL_DATAROM0_SIZE)
# error vmalloc range conflicts with datarom0
# endif
# endif
# if XCHAL_NUM_DATAROM == 2
# if INSIDE_VMALLOC(XCHAL_DATAROM1_VADDR,XCHAL_DATAROM1_VADDR+XCHAL_DATAROM1_SIZE)
# error vmalloc range conflicts with datarom1
# endif
# endif
#endif
#if XCHAL_NUM_DATARAM
# if XCHAL_NUM_DATARAM == 1
# if INSIDE_VMALLOC(XCHAL_DATARAM0_VADDR,XCHAL_DATARAM0_VADDR+XCHAL_DATARAM0_SIZE)
# error vmalloc range conflicts with dataram0
# endif
# endif
# if XCHAL_NUM_DATARAM == 2
# if INSIDE_VMALLOC(XCHAL_DATARAM1_VADDR,XCHAL_DATARAM1_VADDR+XCHAL_DATARAM1_SIZE)
# error vmalloc range conflicts with dataram1
# endif
# endif
#endif
#if XCHAL_NUM_XLMI
# if XCHAL_NUM_XLMI == 1
# if INSIDE_VMALLOC(XCHAL_XLMI0_VADDR,XCHAL_XLMI0_VADDR+XCHAL_XLMI0_SIZE)
# error vmalloc range conflicts with xlmi0
# endif
# endif
# if XCHAL_NUM_XLMI == 2
# if INSIDE_VMALLOC(XCHAL_XLMI1_VADDR,XCHAL_XLMI1_VADDR+XCHAL_XLMI1_SIZE)
# error vmalloc range conflicts with xlmi1
# endif
# endif
#endif
#if (XCHAL_NUM_INSTROM > 2) || \
(XCHAL_NUM_INSTRAM > 2) || \
(XCHAL_NUM_DATARAM > 2) || \
(XCHAL_NUM_DATAROM > 2) || \
(XCHAL_NUM_XLMI > 2)
# error Insufficient checks on vmalloc above for more than 2 devices
#endif
/*
* USER_VM_SIZE does not necessarily equal TASK_SIZE. We bumped
* TASK_SIZE down to 0x4000000 to simplify the handling of windowed
* call instructions (currently limited to a range of 1 GByte). User
* tasks may very well reclaim the VM space from 0x40000000 to
* 0x7fffffff in the future, so we do not want the kernel becoming
* accustomed to having any of its stuff (e.g., page tables) in this
* region. This VM region is no-man's land for now.
*/
#define USER_VM_START XCHAL_SEG_MAPPABLE_VADDR
#define USER_VM_SIZE 0x80000000
/* Size of page table: */
#define PGTABLE_SIZE_BITS (32 - XCHAL_MMU_MIN_PTE_PAGE_SIZE + 2)
#define PGTABLE_SIZE (1L << PGTABLE_SIZE_BITS)
/* All kernel-mappable space: */
#define KERNEL_ALLMAP_START (USER_VM_START + USER_VM_SIZE)
#define KERNEL_ALLMAP_SIZE (XCHAL_SEG_MAPPABLE_SIZE - KERNEL_ALLMAP_START)
/* Carve out page table at start of kernel-mappable area: */
#if KERNEL_ALLMAP_SIZE < PGTABLE_SIZE
#error "Gimme some space for page table!"
#endif
#define PGTABLE_START KERNEL_ALLMAP_START
/* Remaining kernel-mappable space: */
#define KERNEL_MAPPED_START (KERNEL_ALLMAP_START + PGTABLE_SIZE)
#define KERNEL_MAPPED_SIZE (KERNEL_ALLMAP_SIZE - PGTABLE_SIZE)
#if KERNEL_MAPPED_SIZE < 0x01000000 /* 16 MB is arbitrary for now */
# error "Shouldn't the kernel have at least *some* mappable space?"
#endif
#define MAX_LOW_MEMORY XCHAL_KSEG_CACHED_SIZE
#endif
/*
* Some constants used elsewhere, but perhaps only in Xtensa header
* files, so maybe we can get rid of some and access compile-time HAL
* directly...
*
* Note: We assume that system RAM is located at the very start of the
* kernel segments !!
*/
#define KERNEL_VM_LOW XCHAL_KSEG_CACHED_VADDR
#define KERNEL_VM_HIGH XCHAL_KSEG_BYPASS_VADDR
#define KERNEL_SPACE XCHAL_KSEG_CACHED_VADDR
/*
* Returns the physical/virtual addresses of the kernel space
* (works with the cached kernel segment only, which is the
* one normally used for kernel operation).
*/
/* PHYSICAL BYPASS CACHED
*
* bypass vaddr bypass paddr * cached vaddr
* cached vaddr cached paddr bypass vaddr *
* bypass paddr * bypass vaddr cached vaddr
* cached paddr * bypass vaddr cached vaddr
* other * * *
*/
#define PHYSADDR(a) \
(((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR + XCHAL_KSEG_BYPASS_PADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_CACHED_PADDR : \
(unsigned)(a))
#define BYPASS_ADDR(a) \
(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_CACHED_SIZE)? \
(unsigned)(a) - XCHAL_KSEG_CACHED_VADDR+XCHAL_KSEG_BYPASS_VADDR: \
(unsigned)(a))
#define CACHED_ADDR(a) \
(((unsigned)(a) >= XCHAL_KSEG_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_PADDR + XCHAL_KSEG_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_CACHED_PADDR + XCHAL_KSEG_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KSEG_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KSEG_BYPASS_VADDR+XCHAL_KSEG_CACHED_VADDR : \
(unsigned)(a))
#define PHYSADDR_IO(a) \
(((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_PADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_PADDR : \
(unsigned)(a))
#define BYPASS_ADDR_IO(a) \
(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_BYPASS_VADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_VADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_VADDR + XCHAL_KIO_BYPASS_VADDR : \
(unsigned)(a))
#define CACHED_ADDR_IO(a) \
(((unsigned)(a) >= XCHAL_KIO_BYPASS_PADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_PADDR + XCHAL_KIO_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KIO_CACHED_PADDR \
&& (unsigned)(a) < XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_CACHED_PADDR + XCHAL_KIO_CACHED_VADDR : \
((unsigned)(a) >= XCHAL_KIO_BYPASS_VADDR \
&& (unsigned)(a) < XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_BYPASS_SIZE) ? \
(unsigned)(a) - XCHAL_KIO_BYPASS_VADDR + XCHAL_KIO_CACHED_VADDR : \
(unsigned)(a))
#endif /* _XTENSA_ADDRSPACE_H */
/*
* include/asm-xtensa/hardirq.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_HARDIRQ_H
#define _XTENSA_HARDIRQ_H
#include <linux/config.h>
#include <linux/cache.h>
#include <asm/irq.h>
/* headers.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
unsigned int __nmi_count; /* arch dependent */
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#endif /* _XTENSA_HARDIRQ_H */
/*
* include/asm-xtensa/hdreg.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
* Copyright (C) 1994-1996 Linus Torvalds & authors
*/
#ifndef _XTENSA_HDREG_H
#define _XTENSA_HDREG_H
typedef unsigned int ide_ioreg_t;
#endif
/*
* include/asm-xtensa/highmem.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_HIGHMEM_H
#define _XTENSA_HIGHMEM_H
extern void flush_cache_kmaps(void);
#endif
/*
* include/asm-xtensa/hw_irq.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2002 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_HW_IRQ_H
#define _XTENSA_HW_IRQ_H
static inline void hw_resend_irq(struct hw_interrupt_type *h, unsigned int i)
{
}
#endif
/*
* include/asm-xtensa/ide.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1996 Linus Torvalds & authors
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_IDE_H
#define _XTENSA_IDE_H
#ifdef __KERNEL__
#include <linux/config.h>
#ifndef MAX_HWIFS
# define MAX_HWIFS 1
#endif
static __inline__ int ide_default_irq(unsigned long base)
{
/* Unsupported! */
return 0;
}
static __inline__ unsigned long ide_default_io_base(int index)
{
/* Unsupported! */
return 0;
}
#endif /* __KERNEL__ */
#endif /* _XTENSA_IDE_H */
/*
* linux/include/asm-xtensa/io.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_IO_H
#define _XTENSA_IO_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#include <asm/fixmap.h>
#define _IO_BASE 0
/*
* swap functions to change byte order from little-endian to big-endian and
* vice versa.
*/
static inline unsigned short _swapw (unsigned short v)
{
return (v << 8) | (v >> 8);
}
static inline unsigned int _swapl (unsigned int v)
{
return (v << 24) | ((v & 0xff00) << 8) | ((v >> 8) & 0xff00) | (v >> 24);
}
/*
* Change virtual addresses to physical addresses and vv.
* These are trivial on the 1:1 Linux/Xtensa mapping
*/
extern inline unsigned long virt_to_phys(volatile void * address)
{
return PHYSADDR((unsigned long)address);
}
extern inline void * phys_to_virt(unsigned long address)
{
return (void*) CACHED_ADDR(address);
}
/*
* IO bus memory addresses are also 1:1 with the physical address
*/
extern inline unsigned long virt_to_bus(volatile void * address)
{
return PHYSADDR((unsigned long)address);
}
extern inline void * bus_to_virt (unsigned long address)
{
return (void *) CACHED_ADDR(address);
}
/*
* Change "struct page" to physical address.
*/
extern inline void *ioremap(unsigned long offset, unsigned long size)
{
return (void *) CACHED_ADDR_IO(offset);
}
extern inline void *ioremap_nocache(unsigned long offset, unsigned long size)
{
return (void *) BYPASS_ADDR_IO(offset);
}
extern inline void iounmap(void *addr)
{
}
/*
* Generic I/O
*/
#define readb(addr) \
({ unsigned char __v = (*(volatile unsigned char *)(addr)); __v; })
#define readw(addr) \
({ unsigned short __v = (*(volatile unsigned short *)(addr)); __v; })
#define readl(addr) \
({ unsigned int __v = (*(volatile unsigned int *)(addr)); __v; })
#define writeb(b, addr) (void)((*(volatile unsigned char *)(addr)) = (b))
#define writew(b, addr) (void)((*(volatile unsigned short *)(addr)) = (b))
#define writel(b, addr) (void)((*(volatile unsigned int *)(addr)) = (b))
static inline __u8 __raw_readb(const volatile void __iomem *addr)
{
return *(__force volatile __u8 *)(addr);
}
static inline __u16 __raw_readw(const volatile void __iomem *addr)
{
return *(__force volatile __u16 *)(addr);
}
static inline __u32 __raw_readl(const volatile void __iomem *addr)
{
return *(__force volatile __u32 *)(addr);
}
static inline void __raw_writeb(__u8 b, volatile void __iomem *addr)
{
*(__force volatile __u8 *)(addr) = b;
}
static inline void __raw_writew(__u16 b, volatile void __iomem *addr)
{
*(__force volatile __u16 *)(addr) = b;
}
static inline void __raw_writel(__u32 b, volatile void __iomem *addr)
{
*(__force volatile __u32 *)(addr) = b;
}
/* These are the definitions for the x86 IO instructions
* inb/inw/inl/outb/outw/outl, the "string" versions
* insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
* inb_p/inw_p/...
* The macros don't do byte-swapping.
*/
#define inb(port) readb((u8 *)((port)+_IO_BASE))
#define outb(val, port) writeb((val),(u8 *)((unsigned long)(port)+_IO_BASE))
#define inw(port) readw((u16 *)((port)+_IO_BASE))
#define outw(val, port) writew((val),(u16 *)((unsigned long)(port)+_IO_BASE))
#define inl(port) readl((u32 *)((port)+_IO_BASE))
#define outl(val, port) writel((val),(u32 *)((unsigned long)(port)))
#define inb_p(port) inb((port))
#define outb_p(val, port) outb((val), (port))
#define inw_p(port) inw((port))
#define outw_p(val, port) outw((val), (port))
#define inl_p(port) inl((port))
#define outl_p(val, port) outl((val), (port))
extern void insb (unsigned long port, void *dst, unsigned long count);
extern void insw (unsigned long port, void *dst, unsigned long count);
extern void insl (unsigned long port, void *dst, unsigned long count);
extern void outsb (unsigned long port, const void *src, unsigned long count);
extern void outsw (unsigned long port, const void *src, unsigned long count);
extern void outsl (unsigned long port, const void *src, unsigned long count);
#define IO_SPACE_LIMIT ~0
#define memset_io(a,b,c) memset((void *)(a),(b),(c))
#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
/* At this point the Xtensa doesn't provide byte swap instructions */
#ifdef __XTENSA_EB__
# define in_8(addr) (*(u8*)(addr))
# define in_le16(addr) _swapw(*(u16*)(addr))
# define in_le32(addr) _swapl(*(u32*)(addr))
# define out_8(b, addr) *(u8*)(addr) = (b)
# define out_le16(b, addr) *(u16*)(addr) = _swapw(b)
# define out_le32(b, addr) *(u32*)(addr) = _swapl(b)
#elif defined(__XTENSA_EL__)
# define in_8(addr) (*(u8*)(addr))
# define in_le16(addr) (*(u16*)(addr))
# define in_le32(addr) (*(u32*)(addr))
# define out_8(b, addr) *(u8*)(addr) = (b)
# define out_le16(b, addr) *(u16*)(addr) = (b)
# define out_le32(b, addr) *(u32*)(addr) = (b)
#else
# error processor byte order undefined!
#endif
/*
* * Convert a physical pointer to a virtual kernel pointer for /dev/mem
* * access
* */
#define xlate_dev_mem_ptr(p) __va(p)
/*
* * Convert a virtual cached pointer to an uncached pointer
* */
#define xlate_dev_kmem_ptr(p) p
#endif /* __KERNEL__ */
#endif /* _XTENSA_IO_H */
/*
* include/asm-xtensa/ioctl.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*
* Derived from "include/asm-i386/ioctl.h"
*/
#ifndef _XTENSA_IOCTL_H
#define _XTENSA_IOCTL_H
/* ioctl command encoding: 32 bits total, command in lower 16 bits,
* size of the parameter structure in the lower 14 bits of the
* upper 16 bits.
* Encoding the size of the parameter structure in the ioctl request
* is useful for catching programs compiled with old versions
* and to avoid overwriting user space outside the user buffer area.
* The highest 2 bits are reserved for indicating the ``access mode''.
* NOTE: This limits the max parameter size to 16kB -1 !
*/
/*
* The following is for compatibility across the various Linux
* platforms. The i386 ioctl numbering scheme doesn't really enforce
* a type field. De facto, however, the top 8 bits of the lower 16
* bits are indeed used as a type field, so we might just as well make
* this explicit here. Please be sure to use the decoding macros
* below from now on.
*/
#define _IOC_NRBITS 8
#define _IOC_TYPEBITS 8
#define _IOC_SIZEBITS 14
#define _IOC_DIRBITS 2
#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
#define _IOC_NRSHIFT 0
#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
/*
* Direction bits.
*/
#define _IOC_NONE 0U
#define _IOC_WRITE 1U
#define _IOC_READ 2U
#define _IOC(dir,type,nr,size) \
(((dir) << _IOC_DIRSHIFT) | \
((type) << _IOC_TYPESHIFT) | \
((nr) << _IOC_NRSHIFT) | \
((size) << _IOC_SIZESHIFT))
/* used to create numbers */
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
/* used to decode ioctl numbers.. */
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
/* ...and for the drivers/sound files... */
#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
#endif
/*
* include/asm-xtensa/ioctl.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003 - 2005 Tensilica Inc.
*
* Derived from "include/asm-i386/ioctls.h"
*/
#ifndef _XTENSA_IOCTLS_H
#define _XTENSA_IOCTLS_H
#include <asm/ioctl.h>
#define FIOCLEX _IO('f', 1)
#define FIONCLEX _IO('f', 2)
#define FIOASYNC _IOW('f', 125, int)
#define FIONBIO _IOW('f', 126, int)
#define FIONREAD _IOR('f', 127, int)
#define TIOCINQ FIONREAD
#define FIOQSIZE _IOR('f', 128, loff_t)
#define TCGETS 0x5401
#define TCSETS 0x5402
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA _IOR('t', 23, struct termio)
#define TCSETA _IOW('t', 24, struct termio)
#define TCSETAW _IOW('t', 25, struct termio)
#define TCSETAF _IOW('t', 28, struct termio)
#define TCSBRK _IO('t', 29)
#define TCXONC _IO('t', 30)
#define TCFLSH _IO('t', 31)
#define TIOCSWINSZ _IOW('t', 103, struct winsize)
#define TIOCGWINSZ _IOR('t', 104, struct winsize)
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */
#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */
#define TIOCSPGRP _IOW('t', 118, int)
#define TIOCGPGRP _IOR('t', 119, int)
#define TIOCEXCL _IO('T', 12)
#define TIOCNXCL _IO('T', 13)
#define TIOCSCTTY _IO('T', 14)
#define TIOCSTI _IOW('T', 18, char)
#define TIOCMGET _IOR('T', 21, unsigned int)
#define TIOCMBIS _IOW('T', 22, unsigned int)
#define TIOCMBIC _IOW('T', 23, unsigned int)
#define TIOCMSET _IOW('T', 24, unsigned int)
# define TIOCM_LE 0x001
# define TIOCM_DTR 0x002
# define TIOCM_RTS 0x004
# define TIOCM_ST 0x008
# define TIOCM_SR 0x010
# define TIOCM_CTS 0x020
# define TIOCM_CAR 0x040
# define TIOCM_RNG 0x080
# define TIOCM_DSR 0x100
# define TIOCM_CD TIOCM_CAR
# define TIOCM_RI TIOCM_RNG
#define TIOCGSOFTCAR _IOR('T', 25, unsigned int)
#define TIOCSSOFTCAR _IOW('T', 26, unsigned int)
#define TIOCLINUX _IOW('T', 28, char)
#define TIOCCONS _IO('T', 29)
#define TIOCGSERIAL _IOR('T', 30, struct serial_struct)
#define TIOCSSERIAL _IOW('T', 31, struct serial_struct)
#define TIOCPKT _IOW('T', 32, int)
# define TIOCPKT_DATA 0
# define TIOCPKT_FLUSHREAD 1
# define TIOCPKT_FLUSHWRITE 2
# define TIOCPKT_STOP 4
# define TIOCPKT_START 8
# define TIOCPKT_NOSTOP 16
# define TIOCPKT_DOSTOP 32
#define TIOCNOTTY _IO('T', 34)
#define TIOCSETD _IOW('T', 35, int)
#define TIOCGETD _IOR('T', 36, int)
#define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/
#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/
#define TIOCSBRK _IO('T', 39) /* BSD compatibility */
#define TIOCCBRK _IO('T', 40) /* BSD compatibility */
#define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/
#define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
#define TIOCSERCONFIG _IO('T', 83)
#define TIOCSERGWILD _IOR('T', 84, int)
#define TIOCSERSWILD _IOW('T', 85, int)
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
#define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
# define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */
#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */
#define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */
#define TIOCGICOUNT _IOR('T', 93, struct async_icount) /* read serial port inline interrupt counts */
#endif /* _XTENSA_IOCTLS_H */
/*
* include/asm-xtensa/ipc.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_IPC_H
#define _XTENSA_IPC_H
struct ipc_kludge {
struct msgbuf __user *msgp;
long msgtyp;
};
#define SEMOP 1
#define SEMGET 2
#define SEMCTL 3
#define SEMTIMEDOP 4
#define MSGSND 11
#define MSGRCV 12
#define MSGGET 13
#define MSGCTL 14
#define SHMAT 21
#define SHMDT 22
#define SHMGET 23
#define SHMCTL 24
#define IPCCALL(version,op) ((version)<<16 | (op))
#endif /* _XTENSA_IPC_H */
/*
* include/asm-xtensa/ipcbuf.h
*
* The ipc64_perm structure for the Xtensa architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_IPCBUF_H
#define _XTENSA_IPCBUF_H
/*
* Pad space is left for:
* - 32-bit mode_t and seq
* - 2 miscellaneous 32-bit values
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*/
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
__kernel_mode_t mode;
unsigned long seq;
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* _XTENSA_IPCBUF_H */
/*
* include/asm-xtensa/irq.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_IRQ_H
#define _XTENSA_IRQ_H
#include <linux/config.h>
#include <asm/platform/hardware.h>
#include <xtensa/config/core.h>
#ifndef PLATFORM_NR_IRQS
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
#define NR_IRQS (XTENSA_NR_IRQS + PLATFORM_NR_IRQS)
static __inline__ int irq_canonicalize(int irq)
{
return (irq);
}
struct irqaction;
#if 0 // FIXME
extern void disable_irq_nosync(unsigned int);
extern void disable_irq(unsigned int);
extern void enable_irq(unsigned int);
#endif
#endif /* _XTENSA_IRQ_H */
/*
* include/asm-xtensa/kmap_types.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_KMAP_TYPES_H
#define _XTENSA_KMAP_TYPES_H
enum km_type {
KM_BOUNCE_READ,
KM_SKB_SUNRPC_DATA,
KM_SKB_DATA_SOFTIRQ,
KM_USER0,
KM_USER1,
KM_BIO_SRC_IRQ,
KM_BIO_DST_IRQ,
KM_PTE0,
KM_PTE1,
KM_IRQ0,
KM_IRQ1,
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_TYPE_NR
};
#endif /* _XTENSA_KMAP_TYPES_H */
/*
* include/asm-xtensa/linkage.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_LINKAGE_H
#define _XTENSA_LINKAGE_H
/* Nothing to do here ... */
#endif /* _XTENSA_LINKAGE_H */
/*
* include/asm-xtensa/local.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_LOCAL_H
#define _XTENSA_LOCAL_H
#include <asm-generic/local.h>
#endif /* _XTENSA_LOCAL_H */
/*
* include/asm-xtensa/mman.h
*
* Xtensa Processor memory-manager definitions
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 by Ralf Baechle
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_MMAN_H
#define _XTENSA_MMAN_H
/*
* Protections are chosen from these bits, OR'd together. The
* implementation does not necessarily support PROT_EXEC or PROT_WRITE
* without PROT_READ. The only guarantees are that no writing will be
* allowed without PROT_WRITE and no access will be allowed for PROT_NONE.
*/
#define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x10 /* page may be used for atomic ops */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end fo growsup vma */
/*
* Flags for mmap
*/
#define MAP_SHARED 0x001 /* Share changes */
#define MAP_PRIVATE 0x002 /* Changes are private */
#define MAP_TYPE 0x00f /* Mask for type of mapping */
#define MAP_FIXED 0x010 /* Interpret addr exactly */
/* not used by linux, but here to make sure we don't clash with ABI defines */
#define MAP_RENAME 0x020 /* Assign page to file */
#define MAP_AUTOGROW 0x040 /* File may grow by writing */
#define MAP_LOCAL 0x080 /* Copy on fork/sproc */
#define MAP_AUTORSRV 0x100 /* Logical swap reserved on demand */
/* These are linux-specific */
#define MAP_NORESERVE 0x0400 /* don't check for reservations */
#define MAP_ANONYMOUS 0x0800 /* don't use a file */
#define MAP_GROWSDOWN 0x1000 /* stack-like segment */
#define MAP_DENYWRITE 0x2000 /* ETXTBSY */
#define MAP_EXECUTABLE 0x4000 /* mark it as an executable */
#define MAP_LOCKED 0x8000 /* pages are locked */
#define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x20000 /* do not block on IO */
/*
* Flags for msync
*/
#define MS_ASYNC 0x0001 /* sync memory asynchronously */
#define MS_INVALIDATE 0x0002 /* invalidate mappings & caches */
#define MS_SYNC 0x0004 /* synchronous memory sync */
/*
* Flags for mlockall
*/
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
#define MADV_NORMAL 0x0 /* default page-in behavior */
#define MADV_RANDOM 0x1 /* page-in minimum required */
#define MADV_SEQUENTIAL 0x2 /* read-ahead aggressively */
#define MADV_WILLNEED 0x3 /* pre-fault pages */
#define MADV_DONTNEED 0x4 /* discard these pages */
/* compatibility flags */
#define MAP_ANON MAP_ANONYMOUS
#define MAP_FILE 0
#endif /* _XTENSA_MMAN_H */
/*
* include/asm-xtensa/mmu.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_MMU_H
#define _XTENSA_MMU_H
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
#endif /* _XTENSA_MMU_H */
/*
* include/asm-xtensa/mmu_context.h
*
* Switch an MMU context.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_MMU_CONTEXT_H
#define _XTENSA_MMU_CONTEXT_H
#include <linux/config.h>
#include <linux/stringify.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/*
* Linux was ported to Xtensa assuming all auto-refill ways in set 0
* had the same properties (a very likely assumption). Multiple sets
* of auto-refill ways will still work properly, but not as optimally
* as the Xtensa designer may have assumed.
*
* We make this case a hard #error, killing the kernel build, to alert
* the developer to this condition (which is more likely an error).
* You super-duper clever developers can change it to a warning or
* remove it altogether if you think you know what you're doing. :)
*/
#if (XCHAL_HAVE_TLBS != 1)
# error "Linux must have an MMU!"
#endif
#if ((XCHAL_ITLB_ARF_WAYS == 0) || (XCHAL_DTLB_ARF_WAYS == 0))
# error "MMU must have auto-refill ways"
#endif
#if ((XCHAL_ITLB_ARF_SETS != 1) || (XCHAL_DTLB_ARF_SETS != 1))
# error Linux may not use all auto-refill ways as efficiently as you think
#endif
#if (XCHAL_MMU_MAX_PTE_PAGE_SIZE != XCHAL_MMU_MIN_PTE_PAGE_SIZE)
# error Only one page size allowed!
#endif
extern unsigned long asid_cache;
extern pgd_t *current_pgd;
/*
* Define the number of entries per auto-refill way in set 0 of both I and D
* TLBs. We deal only with set 0 here (an assumption further explained in
* assertions.h). Also, define the total number of ARF entries in both TLBs.
*/
#define ITLB_ENTRIES_PER_ARF_WAY (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,ENTRIES))
#define DTLB_ENTRIES_PER_ARF_WAY (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,ENTRIES))
#define ITLB_ENTRIES \
(ITLB_ENTRIES_PER_ARF_WAY * (XCHAL_ITLB_SET(XCHAL_ITLB_ARF_SET0,WAYS)))
#define DTLB_ENTRIES \
(DTLB_ENTRIES_PER_ARF_WAY * (XCHAL_DTLB_SET(XCHAL_DTLB_ARF_SET0,WAYS)))
/*
* SMALLEST_NTLB_ENTRIES is the smaller of ITLB_ENTRIES and DTLB_ENTRIES.
* In practice, they are probably equal. This macro simplifies function
* flush_tlb_range().
*/
#if (DTLB_ENTRIES < ITLB_ENTRIES)
# define SMALLEST_NTLB_ENTRIES DTLB_ENTRIES
#else
# define SMALLEST_NTLB_ENTRIES ITLB_ENTRIES
#endif
/*
* asid_cache tracks only the ASID[USER_RING] field of the RASID special
* register, which is the current user-task asid allocation value.
* mm->context has the same meaning. When it comes time to write the
* asid_cache or mm->context values to the RASID special register, we first
* shift the value left by 8, then insert the value.
* ASID[0] always contains the kernel's asid value, and we reserve three
* other asid values that we never assign to user tasks.
*/
#define ASID_INC 0x1
#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
/*
* XCHAL_MMU_ASID_INVALID is a configurable Xtensa processor constant
* indicating invalid address space. XCHAL_MMU_ASID_KERNEL is a configurable
* Xtensa processor constant indicating the kernel address space. They can
* be arbitrary values.
*
* We identify three more unique, reserved ASID values to use in the unused
* ring positions. No other user process will be assigned these reserved
* ASID values.
*
* For example, given that
*
* XCHAL_MMU_ASID_INVALID == 0
* XCHAL_MMU_ASID_KERNEL == 1
*
* the following maze of #if statements would generate
*
* ASID_RESERVED_1 == 2
* ASID_RESERVED_2 == 3
* ASID_RESERVED_3 == 4
* ASID_FIRST_NONRESERVED == 5
*/
#if (XCHAL_MMU_ASID_INVALID != XCHAL_MMU_ASID_KERNEL + 1)
# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 1) & ASID_MASK)
#else
# define ASID_RESERVED_1 ((XCHAL_MMU_ASID_KERNEL + 2) & ASID_MASK)
#endif
#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_1 + 1)
# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 1) & ASID_MASK)
#else
# define ASID_RESERVED_2 ((ASID_RESERVED_1 + 2) & ASID_MASK)
#endif
#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_2 + 1)
# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 1) & ASID_MASK)
#else
# define ASID_RESERVED_3 ((ASID_RESERVED_2 + 2) & ASID_MASK)
#endif
#if (XCHAL_MMU_ASID_INVALID != ASID_RESERVED_3 + 1)
# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 1) & ASID_MASK)
#else
# define ASID_FIRST_NONRESERVED ((ASID_RESERVED_3 + 2) & ASID_MASK)
#endif
#define ASID_ALL_RESERVED ( ((ASID_RESERVED_1) << 24) + \
((ASID_RESERVED_2) << 16) + \
((ASID_RESERVED_3) << 8) + \
((XCHAL_MMU_ASID_KERNEL)) )
/*
* NO_CONTEXT is the invalid ASID value that we don't ever assign to
* any user or kernel context. NO_CONTEXT is a better mnemonic than
* XCHAL_MMU_ASID_INVALID, so we use it in code instead.
*/
#define NO_CONTEXT XCHAL_MMU_ASID_INVALID
#if (KERNEL_RING != 0)
# error The KERNEL_RING really should be zero.
#endif
#if (USER_RING >= XCHAL_MMU_RINGS)
# error USER_RING cannot be greater than the highest numbered ring.
#endif
#if (USER_RING == KERNEL_RING)
# error The user and kernel rings really should not be equal.
#endif
#if (USER_RING == 1)
#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
((ASID_RESERVED_2) << 16) + \
(((x) & (ASID_MASK)) << 8) + \
((XCHAL_MMU_ASID_KERNEL)) )
#elif (USER_RING == 2)
#define ASID_INSERT(x) ( ((ASID_RESERVED_1) << 24) + \
(((x) & (ASID_MASK)) << 16) + \
((ASID_RESERVED_2) << 8) + \
((XCHAL_MMU_ASID_KERNEL)) )
#elif (USER_RING == 3)
#define ASID_INSERT(x) ( (((x) & (ASID_MASK)) << 24) + \
((ASID_RESERVED_1) << 16) + \
((ASID_RESERVED_2) << 8) + \
((XCHAL_MMU_ASID_KERNEL)) )
#else
#error Goofy value for USER_RING
#endif /* USER_RING == 1 */
/*
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION \
((unsigned long)(~ASID_VERSION_MASK) + 1 + ASID_FIRST_NONRESERVED)
extern inline void set_rasid_register (unsigned long val)
{
__asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
" isync\n" : : "a" (val));
}
extern inline unsigned long get_rasid_register (void)
{
unsigned long tmp;
__asm__ __volatile__ (" rsr %0, "__stringify(RASID)"\n\t" : "=a" (tmp));
return tmp;
}
#if ((XCHAL_MMU_ASID_INVALID == 0) && (XCHAL_MMU_ASID_KERNEL == 1))
extern inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
{
extern void flush_tlb_all(void);
if (! ((asid += ASID_INC) & ASID_MASK) ) {
flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
asid += ASID_FIRST_NONRESERVED;
}
mm->context = asid_cache = asid;
}
#else
#warning ASID_{INVALID,KERNEL} values impose non-optimal get_new_mmu_context implementation
/* XCHAL_MMU_ASID_INVALID == 0 and XCHAL_MMU_ASID_KERNEL ==1 are
really the best, but if you insist... */
extern inline int validate_asid (unsigned long asid)
{
switch (asid) {
case XCHAL_MMU_ASID_INVALID:
case XCHAL_MMU_ASID_KERNEL:
case ASID_RESERVED_1:
case ASID_RESERVED_2:
case ASID_RESERVED_3:
return 0; /* can't use these values as ASIDs */
}
return 1; /* valid */
}
extern inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long asid)
{
extern void flush_tlb_all(void);
while (1) {
asid += ASID_INC;
if ( ! (asid & ASID_MASK) ) {
flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION - ASID_FIRST_NONRESERVED;
asid += ASID_FIRST_NONRESERVED;
break; /* no need to validate here */
}
if (validate_asid (asid & ASID_MASK))
break;
}
mm->context = asid_cache = asid;
}
#endif
/*
* Initialize the context related info for a new mm_struct
* instance.
*/
extern inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = NO_CONTEXT;
return 0;
}
extern inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned long asid = asid_cache;
/* Check if our ASID is of an older version and thus invalid */
if ((next->context ^ asid) & ASID_VERSION_MASK)
get_new_mmu_context(next, asid);
set_rasid_register (ASID_INSERT(next->context));
invalidate_page_directory();
}
#define deactivate_mm(tsk, mm) do { } while(0)
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
extern inline void destroy_context(struct mm_struct *mm)
{
/* Nothing to do. */
}
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
extern inline void
activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
/* Unconditionally get a new ASID. */
get_new_mmu_context(next, asid_cache);
set_rasid_register (ASID_INSERT(next->context));
invalidate_page_directory();
}
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
/* Nothing to do. */
}
#endif /* _XTENSA_MMU_CONTEXT_H */
/*
* include/asm-xtensa/module.h
*
* This file contains the module code specific to the Xtensa architecture.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_MODULE_H
#define _XTENSA_MODULE_H
struct mod_arch_specific
{
/* Module support is not completely implemented. */
};
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#endif /* _XTENSA_MODULE_H */
/*
* include/asm-xtensa/msgbuf.h
*
* The msqid64_ds structure for the Xtensa architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*/
#ifndef _XTENSA_MSGBUF_H
#define _XTENSA_MSGBUF_H
struct msqid64_ds {
struct ipc64_perm msg_perm;
#ifdef __XTENSA_EB__
unsigned int __unused1;
__kernel_time_t msg_stime; /* last msgsnd time */
unsigned int __unused2;
__kernel_time_t msg_rtime; /* last msgrcv time */
unsigned int __unused3;
__kernel_time_t msg_ctime; /* last change time */
#elif defined(__XTENSA_EL__)
__kernel_time_t msg_stime; /* last msgsnd time */
unsigned int __unused1;
__kernel_time_t msg_rtime; /* last msgrcv time */
unsigned int __unused2;
__kernel_time_t msg_ctime; /* last change time */
unsigned int __unused3;
#else
# error processor byte order undefined!
#endif
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused4;
unsigned long __unused5;
};
#endif /* _XTENSA_MSGBUF_H */
/*
* include/asm-xtensa/namei.h
*
* Included from linux/fs/namei.c
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_NAMEI_H
#define _XTENSA_NAMEI_H
#ifdef __KERNEL__
/* This dummy routine maybe changed to something useful
* for /usr/gnemul/ emulation stuff.
* Look at asm-sparc/namei.h for details.
*/
#define __emul_prefix() NULL
#endif /* __KERNEL__ */
#endif /* _XTENSA_NAMEI_H */
/*
* linux/include/asm-xtensa/page.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PAGE_H
#define _XTENSA_PAGE_H
#ifdef __KERNEL__
#include <asm/processor.h>
#include <linux/config.h>
/*
* PAGE_SHIFT determines the page size
* PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
*/
#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#ifdef __ASSEMBLY__
#define __pgprot(x) (x)
#else
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t; /* page table entry */
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
/*
* Pure 2^n version of get_order
*/
extern __inline__ int get_order(unsigned long size)
{
int order;
#ifndef XCHAL_HAVE_NSU
unsigned long x1, x2, x4, x8, x16;
size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
x1 = size & 0xAAAAAAAA;
x2 = size & 0xCCCCCCCC;
x4 = size & 0xF0F0F0F0;
x8 = size & 0xFF00FF00;
x16 = size & 0xFFFF0000;
order = x2 ? 2 : 0;
order += (x16 != 0) * 16;
order += (x8 != 0) * 8;
order += (x4 != 0) * 4;
order += (x1 != 0);
return order;
#else
size = (size - 1) >> PAGE_SHIFT;
asm ("nsau %0, %1" : "=r" (order) : "r" (size));
return 32 - order;
#endif
}
struct page;
extern void clear_page(void *page);
extern void copy_page(void *to, void *from);
/*
* If we have cache aliasing and writeback caches, we might have to do
* some extra work
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
void clear_user_page(void *addr, unsigned long vaddr, struct page* page);
void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page);
#else
# define clear_user_page(page,vaddr,pg) clear_page(page)
# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#endif
/*
* This handles the memory map. We handle pages at
* XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
* These macros are for conversion of kernel address, not user
* addresses.
*/
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr)
#ifndef CONFIG_DISCONTIGMEM
# define pfn_to_page(pfn) (mem_map + (pfn))
# define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#else
# error CONFIG_DISCONTIGMEM not supported
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define WANT_PAGE_VIRTUAL
#endif /* __ASSEMBLY__ */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#endif /* _XTENSA_PAGE_H */
/*
* linux/include/asm-xtensa/page.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PAGE_H
#define _XTENSA_PAGE_H
#ifdef __KERNEL__
#include <asm/processor.h>
#include <linux/config.h>
/*
* PAGE_SHIFT determines the page size
* PAGE_ALIGN(x) aligns the pointer to the (next) page boundary
*/
#define PAGE_SHIFT XCHAL_MMU_MIN_PTE_PAGE_SIZE
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE - 1) & PAGE_MASK)
#define DCACHE_WAY_SIZE (XCHAL_DCACHE_SIZE / XCHAL_DCACHE_WAYS)
#define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR
#ifdef __ASSEMBLY__
#define __pgprot(x) (x)
#else
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t; /* page table entry */
typedef struct { unsigned long pmd; } pmd_t; /* PMD table entry */
typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
/*
* Pure 2^n version of get_order
*/
extern __inline__ int get_order(unsigned long size)
{
int order;
#ifndef XCHAL_HAVE_NSU
unsigned long x1, x2, x4, x8, x16;
size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
x1 = size & 0xAAAAAAAA;
x2 = size & 0xCCCCCCCC;
x4 = size & 0xF0F0F0F0;
x8 = size & 0xFF00FF00;
x16 = size & 0xFFFF0000;
order = x2 ? 2 : 0;
order += (x16 != 0) * 16;
order += (x8 != 0) * 8;
order += (x4 != 0) * 4;
order += (x1 != 0);
return order;
#else
size = (size - 1) >> PAGE_SHIFT;
asm ("nsau %0, %1" : "=r" (order) : "r" (size));
return 32 - order;
#endif
}
struct page;
extern void clear_page(void *page);
extern void copy_page(void *to, void *from);
/*
* If we have cache aliasing and writeback caches, we might have to do
* some extra work
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
void clear_user_page(void *addr, unsigned long vaddr, struct page* page);
void copy_user_page(void *to, void* from, unsigned long vaddr, struct page* page);
#else
# define clear_user_page(page,vaddr,pg) clear_page(page)
# define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#endif
/*
* This handles the memory map. We handle pages at
* XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space.
* These macros are for conversion of kernel address, not user
* addresses.
*/
#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET))
#define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr)
#ifndef CONFIG_DISCONTIGMEM
# define pfn_to_page(pfn) (mem_map + (pfn))
# define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#else
# error CONFIG_DISCONTIGMEM not supported
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define WANT_PAGE_VIRTUAL
#endif /* __ASSEMBLY__ */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#endif /* __KERNEL__ */
#endif /* _XTENSA_PAGE_H */
/*
* include/asm-xtensa/param.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PARAM_H
#define _XTENSA_PARAM_H
#include <xtensa/config/core.h>
#ifdef __KERNEL__
# define HZ 100 /* internal timer frequency */
# define USER_HZ 100 /* for user interfaces in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* frequnzy at which times() counts */
#endif
#define EXEC_PAGESIZE (1 << XCHAL_MMU_MIN_PTE_PAGE_SIZE)
#ifndef NGROUPS
#define NGROUPS 32
#endif
#ifndef NOGROUP
#define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#endif /* _XTENSA_PARAM_H */
/*
* include/asm-xtensa/pci-bridge.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PCI_BRIDGE_H
#define _XTENSA_PCI_BRIDGE_H
#ifdef __KERNEL__
struct device_node;
struct pci_controller;
/*
* pciauto_bus_scan() enumerates the pci space.
*/
extern int pciauto_bus_scan(struct pci_controller *, int);
struct pci_space {
unsigned long start;
unsigned long end;
unsigned long base;
};
/*
* Structure of a PCI controller (host bridge)
*/
struct pci_controller {
int index; /* used for pci_controller_num */
struct pci_controller *next;
struct pci_bus *bus;
void *arch_data;
int first_busno;
int last_busno;
struct pci_ops *ops;
volatile unsigned int *cfg_addr;
volatile unsigned char *cfg_data;
/* Currently, we limit ourselves to 1 IO range and 3 mem
* ranges since the common pci_bus structure can't handle more
*/
struct resource io_resource;
struct resource mem_resources[3];
int mem_resource_count;
/* Host bridge I/O and Memory space
* Used for BAR placement algorithms
*/
struct pci_space io_space;
struct pci_space mem_space;
/* Return the interrupt number fo a device. */
int (*map_irq)(struct pci_dev*, u8, u8);
};
static inline void pcibios_init_resource(struct resource *res,
unsigned long start, unsigned long end, int flags, char *name)
{
res->start = start;
res->end = end;
res->flags = flags;
res->name = name;
res->parent = NULL;
res->sibling = NULL;
res->child = NULL;
}
/* These are used for config access before all the PCI probing has been done. */
int early_read_config_byte(struct pci_controller*, int, int, int, u8*);
int early_read_config_word(struct pci_controller*, int, int, int, u16*);
int early_read_config_dword(struct pci_controller*, int, int, int, u32*);
int early_write_config_byte(struct pci_controller*, int, int, int, u8);
int early_write_config_word(struct pci_controller*, int, int, int, u16);
int early_write_config_dword(struct pci_controller*, int, int, int, u32);
#endif /* __KERNEL__ */
#endif /* _XTENSA_PCI_BRIDGE_H */
/*
* linux/include/asm-xtensa/pci.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PCI_H
#define _XTENSA_PCI_H
#ifdef __KERNEL__
/* Can be used to override the logic in pci_scan_bus for skipping
* already-configured bus numbers - to be used for buggy BIOSes
* or architectures with incomplete PCI setup by the loader
*/
#define pcibios_assign_all_busses() 0
extern struct pci_controller* pcibios_alloc_controller(void);
extern inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
extern inline void pcibios_penalize_isa_irq(int irq)
{
/* We don't do dynamic PCI IRQ allocation */
}
/* Assume some values. (We should revise them, if necessary) */
#define PCIBIOS_MIN_IO 0x2000
#define PCIBIOS_MIN_MEM 0x10000000
/* Dynamic DMA mapping stuff.
* Xtensa has everything mapped statically like x86.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <asm/scatterlist.h>
#include <linux/string.h>
#include <asm/io.h>
struct pci_dev;
/* The PCI address space does equal the physical memory address space.
* The networking and block device layers use this boolean for bounce buffer
* decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
/* pci_unmap_{page,single} is a no-op, so */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_ubnmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
/* We cannot access memory above 4GB */
#define pci_dac_dma_supported(pci_dev, mask) (0)
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
static inline void pcibios_add_platform_entries(struct pci_dev *dev)
{
}
#endif /* __KERNEL__ */
/* Implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
/* Generic PCI */
#include <asm-generic/pci.h>
#endif /* _XTENSA_PCI_H */
/*
* linux/include/asm-xtensa/percpu.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_PERCPU__
#define _XTENSA_PERCPU__
#include <asm-generic/percpu.h>
#endif /* _XTENSA_PERCPU__ */
/*
* linux/include/asm-xtensa/pgalloc.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Copyright (C) 2001-2005 Tensilica Inc.
*/
#ifndef _XTENSA_PGALLOC_H
#define _XTENSA_PGALLOC_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/highmem.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
/* Cache aliasing:
*
* If the cache size for one way is greater than the page size, we have to
* deal with cache aliasing. The cache index is wider than the page size:
*
* |cache |
* |pgnum |page| virtual address
* |xxxxxX|zzzz|
* | | |
* \ / | |
* trans.| |
* / \ | |
* |yyyyyY|zzzz| physical address
*
* When the page number is translated to the physical page address, the lowest
* bit(s) (X) that are also part of the cache index are also translated (Y).
* If this translation changes this bit (X), the cache index is also afected,
* thus resulting in a different cache line than before.
* The kernel does not provide a mechanism to ensure that the page color
* (represented by this bit) remains the same when allocated or when pages
* are remapped. When user pages are mapped into kernel space, the color of
* the page might also change.
*
* We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2
* to temporarily map a patch so we can match the color.
*/
#if (DCACHE_WAY_SIZE > PAGE_SIZE)
# define PAGE_COLOR_MASK (PAGE_MASK & (DCACHE_WAY_SIZE-1))
# define PAGE_COLOR(a) \
(((unsigned long)(a)&PAGE_COLOR_MASK) >> PAGE_SHIFT)
# define PAGE_COLOR_EQ(a,b) \
((((unsigned long)(a) ^ (unsigned long)(b)) & PAGE_COLOR_MASK) == 0)
# define PAGE_COLOR_MAP0(v) \
(VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK))
# define PAGE_COLOR_MAP1(v) \
(VMALLOC_END + ((unsigned long)(v) & PAGE_COLOR_MASK) + DCACHE_WAY_SIZE)
#endif
/*
* Allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
#define pgd_free(pgd) free_page((unsigned long)(pgd))
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *pte)
{
pmd_val(*(pmdp)) = (unsigned long)(pte);
__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *page)
{
pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page);
__asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp));
}
#else
# define pmd_populate_kernel(mm, pmdp, pte) \
(pmd_val(*(pmdp)) = (unsigned long)(pte))
# define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = (unsigned long)page_to_virt(page))
#endif
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, PGD_ORDER);
if (likely(pgd != NULL))
__flush_dcache_page((unsigned long)pgd);
return pgd;
}
extern pte_t* pte_alloc_one_kernel(struct mm_struct* mm, unsigned long addr);
extern struct page* pte_alloc_one(struct mm_struct* mm, unsigned long addr);
#define pte_free_kernel(pte) free_page((unsigned long)pte)
#define pte_free(pte) __free_page(pte)
#endif /* __KERNEL__ */
#endif /* _XTENSA_PGALLOC_H */
This diff is collapsed.
/*
* include/asm-xtensa/poll.h
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of
* this archive for more details.
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_POLL_H
#define _XTENSA_POLL_H
#define POLLIN 0x0001
#define POLLPRI 0x0002
#define POLLOUT 0x0004
#define POLLERR 0x0008
#define POLLHUP 0x0010
#define POLLNVAL 0x0020
#define POLLRDNORM 0x0040
#define POLLRDBAND 0x0080
#define POLLWRNORM POLLOUT
#define POLLWRBAND 0x0100
#define POLLMSG 0x0400
#define POLLREMOVE 0x0800
struct pollfd {
int fd;
short events;
short revents;
};
#endif /* _XTENSA_POLL_H */
/*
* include/asm-xtensa/posix_types.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Largely copied from include/asm-ppc/posix_types.h
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*/
#ifndef _XTENSA_POSIX_TYPES_H
#define _XTENSA_POSIX_TYPES_H
/*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
typedef unsigned long __kernel_ino_t;
typedef unsigned int __kernel_mode_t;
typedef unsigned short __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
#ifdef __GNUC__
typedef long long __kernel_loff_t;
#endif
typedef struct {
int val[2];
} __kernel_fsid_t;
#ifndef __GNUC__
#define __FD_SET(d, set) ((set)->fds_bits[__FDELT(d)] |= __FDMASK(d))
#define __FD_CLR(d, set) ((set)->fds_bits[__FDELT(d)] &= ~__FDMASK(d))
#define __FD_ISSET(d, set) ((set)->fds_bits[__FDELT(d)] & __FDMASK(d))
#define __FD_ZERO(set) \
((void) memset ((__ptr_t) (set), 0, sizeof (__kernel_fd_set)))
#else /* __GNUC__ */
#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) \
|| (__GLIBC__ == 2 && __GLIBC_MINOR__ == 0)
/* With GNU C, use inline functions instead so args are evaluated only once: */
#undef __FD_SET
static __inline__ void __FD_SET(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
fdsetp->fds_bits[_tmp] |= (1UL<<_rem);
}
#undef __FD_CLR
static __inline__ void __FD_CLR(unsigned long fd, __kernel_fd_set *fdsetp)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
fdsetp->fds_bits[_tmp] &= ~(1UL<<_rem);
}
#undef __FD_ISSET
static __inline__ int __FD_ISSET(unsigned long fd, __kernel_fd_set *p)
{
unsigned long _tmp = fd / __NFDBITS;
unsigned long _rem = fd % __NFDBITS;
return (p->fds_bits[_tmp] & (1UL<<_rem)) != 0;
}
/*
* This will unroll the loop for the normal constant case (8 ints,
* for a 256-bit fd_set)
*/
#undef __FD_ZERO
static __inline__ void __FD_ZERO(__kernel_fd_set *p)
{
unsigned int *tmp = (unsigned int *)p->fds_bits;
int i;
if (__builtin_constant_p(__FDSET_LONGS)) {
switch (__FDSET_LONGS) {
case 8:
tmp[0] = 0; tmp[1] = 0; tmp[2] = 0; tmp[3] = 0;
tmp[4] = 0; tmp[5] = 0; tmp[6] = 0; tmp[7] = 0;
return;
}
}
i = __FDSET_LONGS;
while (i) {
i--;
*tmp = 0;
tmp++;
}
}
#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
#endif /* __GNUC__ */
#endif /* _XTENSA_POSIX_TYPES_H */
This diff is collapsed.
This diff is collapsed.
/*
* include/asm-xtensa/resource.h
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2005 Tensilica Inc.
*/
#ifndef _XTENSA_RESOURCE_H
#define _XTENSA_RESOURCE_H
#include <asm-generic/resource.h>
#endif /* _XTENSA_RESOURCE_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment