Commit c8eef880 authored by Paul Mundt's avatar Paul Mundt

sh: Purge dead sh64 headers.

Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent e150e7f2
include include/asm-generic/Kbuild.asm
#ifndef __ASM_SH64_A_OUT_H
#define __ASM_SH64_A_OUT_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/a.out.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
struct exec
{
unsigned long a_info; /* Use macros N_MAGIC, etc for access */
unsigned a_text; /* length of text, in bytes */
unsigned a_data; /* length of data, in bytes */
unsigned a_bss; /* length of uninitialized data area for file, in bytes */
unsigned a_syms; /* length of symbol table data in file, in bytes */
unsigned a_entry; /* start address */
unsigned a_trsize; /* length of relocation info for text, in bytes */
unsigned a_drsize; /* length of relocation info for data, in bytes */
};
#define N_TRSIZE(a) ((a).a_trsize)
#define N_DRSIZE(a) ((a).a_drsize)
#define N_SYMSIZE(a) ((a).a_syms)
#ifdef __KERNEL__
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
#endif
#endif /* __ASM_SH64_A_OUT_H */
#ifndef __ASM_SH64_ATOMIC_H
#define __ASM_SH64_ATOMIC_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/atomic.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
*/
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
*/
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) ((v)->counter = (i))
#include <asm/system.h>
/*
* To get proper branch prediction for the main line, we must branch
* forward to code at the end of this object's .text section, then
* branch back to restart the operation.
*/
static __inline__ void atomic_add(int i, atomic_t * v)
{
unsigned long flags;
local_irq_save(flags);
*(long *)v += i;
local_irq_restore(flags);
}
static __inline__ void atomic_sub(int i, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
*(long *)v -= i;
local_irq_restore(flags);
}
static __inline__ int atomic_add_return(int i, atomic_t * v)
{
unsigned long temp, flags;
local_irq_save(flags);
temp = *(long *)v;
temp += i;
*(long *)v = temp;
local_irq_restore(flags);
return temp;
}
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
unsigned long temp, flags;
local_irq_save(flags);
temp = *(long *)v;
temp -= i;
*(long *)v = temp;
local_irq_restore(flags);
return temp;
}
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
/*
* atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (likely(ret == old))
v->counter = new;
local_irq_restore(flags);
return ret;
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
local_irq_save(flags);
ret = v->counter;
if (ret != u)
v->counter += a;
local_irq_restore(flags);
return ret != u;
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
static __inline__ void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
*(long *)v &= ~mask;
local_irq_restore(flags);
}
static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
{
unsigned long flags;
local_irq_save(flags);
*(long *)v |= mask;
local_irq_restore(flags);
}
/* Atomic operations are already serializing on SH */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#include <asm-generic/atomic.h>
#endif /* __ASM_SH64_ATOMIC_H */
#ifndef __ASM_SH64_AUXVEC_H
#define __ASM_SH64_AUXVEC_H
#endif /* __ASM_SH64_AUXVEC_H */
#ifndef __ASM_SH64_BITOPS_H
#define __ASM_SH64_BITOPS_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/bitops.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*/
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/system.h>
/* For __swab32 */
#include <asm/byteorder.h>
static __inline__ void set_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
local_irq_save(flags);
*a |= mask;
local_irq_restore(flags);
}
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
static inline void clear_bit(int nr, volatile unsigned long *a)
{
int mask;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
local_irq_save(flags);
*a &= ~mask;
local_irq_restore(flags);
}
static __inline__ void change_bit(int nr, volatile void * addr)
{
int mask;
volatile unsigned int *a = addr;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
local_irq_save(flags);
*a ^= mask;
local_irq_restore(flags);
}
static __inline__ int test_and_set_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
local_irq_save(flags);
retval = (mask & *a) != 0;
*a |= mask;
local_irq_restore(flags);
return retval;
}
static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
local_irq_save(flags);
retval = (mask & *a) != 0;
*a &= ~mask;
local_irq_restore(flags);
return retval;
}
static __inline__ int test_and_change_bit(int nr, volatile void * addr)
{
int mask, retval;
volatile unsigned int *a = addr;
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
local_irq_save(flags);
retval = (mask & *a) != 0;
*a ^= mask;
local_irq_restore(flags);
return retval;
}
#include <asm-generic/bitops/non-atomic.h>
static __inline__ unsigned long ffz(unsigned long word)
{
unsigned long result, __d2, __d3;
__asm__("gettr tr0, %2\n\t"
"pta $+32, tr0\n\t"
"andi %1, 1, %3\n\t"
"beq %3, r63, tr0\n\t"
"pta $+4, tr0\n"
"0:\n\t"
"shlri.l %1, 1, %1\n\t"
"addi %0, 1, %0\n\t"
"andi %1, 1, %3\n\t"
"beqi %3, 1, tr0\n"
"1:\n\t"
"ptabs %2, tr0\n\t"
: "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
: "0" (0L), "1" (word));
return result;
}
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/fls64.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_BITOPS_H */
#ifndef __ASM_SH64_BUG_H
#define __ASM_SH64_BUG_H
#ifdef CONFIG_BUG
/*
* Tell the user there is some problem, then force a segfault (in process
* context) or a panic (interrupt context).
*/
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
*(volatile int *)0 = 0; \
} while (0)
#define HAVE_ARCH_BUG
#endif
#include <asm-generic/bug.h>
#endif /* __ASM_SH64_BUG_H */
#ifndef __ASM_SH64_BUGS_H
#define __ASM_SH64_BUGS_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/bugs.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
*/
/*
* This is included by init/main.c to check for architecture-dependent bugs.
*
* Needs:
* void check_bugs(void);
*/
/*
* I don't know of any Super-H bugs yet.
*/
#include <asm/processor.h>
static void __init check_bugs(void)
{
extern char *get_cpu_subtype(void);
extern unsigned long loops_per_jiffy;
cpu_data->loops_per_jiffy = loops_per_jiffy;
printk("CPU: %s\n", get_cpu_subtype());
}
#endif /* __ASM_SH64_BUGS_H */
#ifndef __ASM_SH64_BYTEORDER_H
#define __ASM_SH64_BYTEORDER_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/byteorder.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#include <asm/types.h>
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
__asm__("byterev %0, %0\n\t"
"shari %0, 32, %0"
: "=r" (x)
: "0" (x));
return x;
}
static inline __attribute_const__ __u16 ___arch__swab16(__u16 x)
{
__asm__("byterev %0, %0\n\t"
"shari %0, 48, %0"
: "=r" (x)
: "0" (x));
return x;
}
#define __arch__swab32(x) ___arch__swab32(x)
#define __arch__swab16(x) ___arch__swab16(x)
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
#endif
#ifdef __LITTLE_ENDIAN__
#include <linux/byteorder/little_endian.h>
#else
#include <linux/byteorder/big_endian.h>
#endif
#endif /* __ASM_SH64_BYTEORDER_H */
#ifndef __ASM_SH64_CPUMASK_H
#define __ASM_SH64_CPUMASK_H
#include <asm-generic/cpumask.h>
#endif /* __ASM_SH64_CPUMASK_H */
#ifndef __SH64_CPUTIME_H
#define __SH64_CPUTIME_H
#include <asm-generic/cputime.h>
#endif /* __SH64_CPUTIME_H */
#ifndef __ASM_SH64_CURRENT_H
#define __ASM_SH64_CURRENT_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/current.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
*/
#include <linux/thread_info.h>
struct task_struct;
static __inline__ struct task_struct * get_current(void)
{
return current_thread_info()->task;
}
#define current get_current()
#endif /* __ASM_SH64_CURRENT_H */
#ifndef __ASM_SH64_DELAY_H
#define __ASM_SH64_DELAY_H
extern void __delay(int loops);
extern void __udelay(unsigned long long usecs, unsigned long lpj);
extern void __ndelay(unsigned long long nsecs, unsigned long lpj);
extern void udelay(unsigned long usecs);
extern void ndelay(unsigned long nsecs);
#endif /* __ASM_SH64_DELAY_H */
/*
* Arch specific extensions to struct device
*
* This file is released under the GPLv2
*/
#include <asm-generic/device.h>
#ifndef __ASM_SH64_DIV64_H
#define __ASM_SH64_DIV64_H
#include <asm-generic/div64.h>
#endif /* __ASM_SH64_DIV64_H */
#ifndef __ASM_SH_DMA_MAPPING_H
#define __ASM_SH_DMA_MAPPING_H
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <asm/io.h>
struct pci_dev;
extern void *consistent_alloc(struct pci_dev *hwdev, size_t size,
dma_addr_t *dma_handle);
extern void consistent_free(struct pci_dev *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle);
#define dma_supported(dev, mask) (1)
static inline int dma_set_mask(struct device *dev, u64 mask)
{
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return consistent_alloc(NULL, size, dma_handle);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle)
{
consistent_free(NULL, size, vaddr, dma_handle);
}
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction dir)
{
unsigned long start = (unsigned long) vaddr;
unsigned long s = start & L1_CACHE_ALIGN_MASK;
unsigned long e = (start + size) & L1_CACHE_ALIGN_MASK;
for (; s <= e; s += L1_CACHE_BYTES)
asm volatile ("ocbp %0, 0" : : "r" (s));
}
static inline dma_addr_t dma_map_single(struct device *dev,
void *ptr, size_t size,
enum dma_data_direction dir)
{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
return virt_to_phys(ptr);
#endif
dma_cache_sync(dev, ptr, size, dir);
return virt_to_phys(ptr);
}
#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir)
{
int i;
for (i = 0; i < nents; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
#endif
sg[i].dma_address = sg_phys(&sg[i]);
}
return nents;
}
#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
return dma_map_single(dev, page_address(page) + offset, size, dir);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction dir)
{
dma_unmap_single(dev, dma_address, size, dir);
}
static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir)
{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
return;
#endif
dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
}
static inline void dma_sync_single_range(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
if (dev->bus == &pci_bus_type)
return;
#endif
dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
}
static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
int i;
for (i = 0; i < nelems; i++) {
#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
#endif
sg[i].dma_address = sg_phys(&sg[i]);
}
}
static inline void dma_sync_single_for_cpu(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
dma_sync_single(dev, dma_handle, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir)
{
dma_sync_single(dev, dma_handle, size, dir);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t dma_handle,
unsigned long offset,
size_t size,
enum dma_data_direction direction)
{
dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
}
static inline void dma_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
dma_sync_sg(dev, sg, nelems, dir);
}
static inline void dma_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
dma_sync_sg(dev, sg, nelems, dir);
}
static inline int dma_get_cache_alignment(void)
{
/*
* Each processor family will define its own L1_CACHE_SHIFT,
* L1_CACHE_BYTES wraps to this, so this is always safe.
*/
return L1_CACHE_BYTES;
}
static inline int dma_mapping_error(dma_addr_t dma_addr)
{
return dma_addr == 0;
}
#endif /* __ASM_SH_DMA_MAPPING_H */
#ifndef __ASM_SH64_DMA_H
#define __ASM_SH64_DMA_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/dma.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
*/
#include <linux/mm.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#define MAX_DMA_CHANNELS 4
/*
* SH5 can DMA in any memory area.
*
* The static definition is dodgy because it should limit
* the highest DMA-able address based on the actual
* Physical memory available. This is actually performed
* at run time in defining the memory allowed to DMA_ZONE.
*/
#define MAX_DMA_ADDRESS ~(NPHYS_MASK)
#define DMA_MODE_READ 0
#define DMA_MODE_WRITE 1
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* __ASM_SH64_DMA_H */
#ifndef __ASM_SH64_ELF_H
#define __ASM_SH64_ELF_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/elf.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
/*
* ELF register definitions..
*/
#include <asm/ptrace.h>
#include <asm/user.h>
#include <asm/byteorder.h>
typedef unsigned long elf_greg_t;
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_fpu_struct elf_fpregset_t;
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ( (x)->e_machine == EM_SH )
/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS32
#ifdef __LITTLE_ENDIAN__
#define ELF_DATA ELFDATA2LSB
#else
#define ELF_DATA ELFDATA2MSB
#endif
#define ELF_ARCH EM_SH
#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */
#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
#define R_SH_DIR32 1
#define R_SH_REL32 2
#define R_SH_IMM_LOW16 246
#define R_SH_IMM_LOW16_PCREL 247
#define R_SH_IMM_MEDLOW16 248
#define R_SH_IMM_MEDLOW16_PCREL 249
#define ELF_CORE_COPY_REGS(_dest,_regs) \
memcpy((char *) &_dest, (char *) _regs, \
sizeof(struct pt_regs));
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
but it's not easy, and we've already done it here. */
#define ELF_HWCAP (0)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM (NULL)
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
_r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
_r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
_r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
_r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
_r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
_r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
_r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
_r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
_r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
_r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
_r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
_r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
_r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
_r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
_r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
_r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
_r->sr = SR_FD | SR_MMU; } while (0)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
#endif
#endif /* __ASM_SH64_ELF_H */
#ifndef _ASM_EMERGENCY_RESTART_H
#define _ASM_EMERGENCY_RESTART_H
#include <asm-generic/emergency-restart.h>
#endif /* _ASM_EMERGENCY_RESTART_H */
#ifndef __ASM_SH64_ERRNO_H
#define __ASM_SH64_ERRNO_H
#include <asm-generic/errno.h>
#endif /* __ASM_SH64_ERRNO_H */
#ifndef _ASM_FB_H_
#define _ASM_FB_H_
#include <linux/fb.h>
#include <linux/fs.h>
#include <asm/page.h>
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
unsigned long off)
{
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
static inline int fb_is_primary_device(struct fb_info *info)
{
return 0;
}
#endif /* _ASM_FB_H_ */
#include <asm-sh/fcntl.h>
#ifndef _ASM_FUTEX_H
#define _ASM_FUTEX_H
#include <asm-generic/futex.h>
#endif
#ifndef __ASM_SH64_GPIO_H
#define __ASM_SH64_GPIO_H
/*
* This is just a stub, so that every arch using sh-sci has a gpio.h
*/
#endif /* __ASM_SH64_GPIO_H */
#ifndef __ASM_SH64_HARDIRQ_H
#define __ASM_SH64_HARDIRQ_H
#include <linux/threads.h>
#include <linux/irq.h>
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/* arch/sh64/kernel/irq.c */
extern void ack_bad_irq(unsigned int irq);
#endif /* __ASM_SH64_HARDIRQ_H */
#ifndef __ASM_SH64_HW_IRQ_H
#define __ASM_SH64_HW_IRQ_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/hw_irq.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#endif /* __ASM_SH64_HW_IRQ_H */
/*
* linux/include/asm-sh64/ide.h
*
* Copyright (C) 1994-1996 Linus Torvalds & authors
*
* sh64 version by Richard Curnow & Paul Mundt
*/
/*
* This file contains the sh64 architecture specific IDE code.
*/
#ifndef __ASM_SH64_IDE_H
#define __ASM_SH64_IDE_H
#ifdef __KERNEL__
/* Without this, the initialisation of PCI IDE cards end up calling
* ide_init_hwif_ports, which won't work. */
#ifdef CONFIG_BLK_DEV_IDEPCI
#define ide_default_io_ctl(base) (0)
#endif
#include <asm-generic/ide_iops.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_IDE_H */
#include <asm-generic/ioctl.h>
#ifndef __ASM_SH64_IOCTLS_H
#define __ASM_SH64_IOCTLS_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/ioctls.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 Richard Curnow
*
*/
#include <asm/ioctl.h>
#define FIOCLEX 0x6601 /* _IO('f', 1) */
#define FIONCLEX 0x6602 /* _IO('f', 2) */
#define FIOASYNC 0x4004667d /* _IOW('f', 125, int) */
#define FIONBIO 0x4004667e /* _IOW('f', 126, int) */
#define FIONREAD 0x8004667f /* _IOW('f', 127, int) */
#define TIOCINQ FIONREAD
#define FIOQSIZE 0x80086680 /* _IOR('f', 128, loff_t) */
#define TCGETS 0x5401
#define TCSETS 0x5402
#define TCSETSW 0x5403
#define TCSETSF 0x5404
#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */
#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */
#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */
#define TCSETAF 0x4012741c /* _IOW('t', 28, struct termio) */
#define TCSBRK 0x741d /* _IO('t', 29) */
#define TCXONC 0x741e /* _IO('t', 30) */
#define TCFLSH 0x741f /* _IO('t', 31) */
#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */
#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */
#define TIOCSTART 0x746e /* _IO('t', 110) start output, like ^Q */
#define TIOCSTOP 0x746f /* _IO('t', 111) stop output, like ^S */
#define TIOCOUTQ 0x80047473 /* _IOR('t', 115, int) output queue size */
#define TIOCSPGRP 0x40047476 /* _IOW('t', 118, int) */
#define TIOCGPGRP 0x80047477 /* _IOR('t', 119, int) */
#define TIOCEXCL 0x540c /* _IO('T', 12) */
#define TIOCNXCL 0x540d /* _IO('T', 13) */
#define TIOCSCTTY 0x540e /* _IO('T', 14) */
#define TIOCSTI 0x40015412 /* _IOW('T', 18, char) 0x5412 */
#define TIOCMGET 0x80045415 /* _IOR('T', 21, unsigned int) 0x5415 */
#define TIOCMBIS 0x40045416 /* _IOW('T', 22, unsigned int) 0x5416 */
#define TIOCMBIC 0x40045417 /* _IOW('T', 23, unsigned int) 0x5417 */
#define TIOCMSET 0x40045418 /* _IOW('T', 24, unsigned int) 0x5418 */
#define TIOCM_LE 0x001
#define TIOCM_DTR 0x002
#define TIOCM_RTS 0x004
#define TIOCM_ST 0x008
#define TIOCM_SR 0x010
#define TIOCM_CTS 0x020
#define TIOCM_CAR 0x040
#define TIOCM_RNG 0x080
#define TIOCM_DSR 0x100
#define TIOCM_CD TIOCM_CAR
#define TIOCM_RI TIOCM_RNG
#define TIOCGSOFTCAR 0x80045419 /* _IOR('T', 25, unsigned int) 0x5419 */
#define TIOCSSOFTCAR 0x4004541a /* _IOW('T', 26, unsigned int) 0x541A */
#define TIOCLINUX 0x4004541c /* _IOW('T', 28, char) 0x541C */
#define TIOCCONS 0x541d /* _IO('T', 29) */
#define TIOCGSERIAL 0x803c541e /* _IOR('T', 30, struct serial_struct) 0x541E */
#define TIOCSSERIAL 0x403c541f /* _IOW('T', 31, struct serial_struct) 0x541F */
#define TIOCPKT 0x40045420 /* _IOW('T', 32, int) 0x5420 */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
#define TIOCPKT_FLUSHWRITE 2
#define TIOCPKT_STOP 4
#define TIOCPKT_START 8
#define TIOCPKT_NOSTOP 16
#define TIOCPKT_DOSTOP 32
#define TIOCNOTTY 0x5422 /* _IO('T', 34) */
#define TIOCSETD 0x40045423 /* _IOW('T', 35, int) 0x5423 */
#define TIOCGETD 0x80045424 /* _IOR('T', 36, int) 0x5424 */
#define TCSBRKP 0x40045424 /* _IOW('T', 37, int) 0x5425 */ /* Needed for POSIX tcsendbreak() */
#define TIOCTTYGSTRUCT 0x8c105426 /* _IOR('T', 38, struct tty_struct) 0x5426 */ /* For debugging only */
#define TIOCSBRK 0x5427 /* _IO('T', 39) */ /* BSD compatibility */
#define TIOCCBRK 0x5428 /* _IO('T', 40) */ /* BSD compatibility */
#define TIOCGSID 0x80045429 /* _IOR('T', 41, pid_t) 0x5429 */ /* Return the session ID of FD */
#define TIOCGPTN 0x80045430 /* _IOR('T',0x30, unsigned int) 0x5430 Get Pty Number (of pty-mux device) */
#define TIOCSPTLCK 0x40045431 /* _IOW('T',0x31, int) Lock/unlock Pty */
#define TIOCSERCONFIG 0x5453 /* _IO('T', 83) */
#define TIOCSERGWILD 0x80045454 /* _IOR('T', 84, int) 0x5454 */
#define TIOCSERSWILD 0x40045455 /* _IOW('T', 85, int) 0x5455 */
#define TIOCGLCKTRMIOS 0x5456
#define TIOCSLCKTRMIOS 0x5457
#define TIOCSERGSTRUCT 0x80d85458 /* _IOR('T', 88, struct async_struct) 0x5458 */ /* For debugging only */
#define TIOCSERGETLSR 0x80045459 /* _IOR('T', 89, unsigned int) 0x5459 */ /* Get line status register */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
#define TIOCSERGETMULTI 0x80a8545a /* _IOR('T', 90, struct serial_multiport_struct) 0x545A */ /* Get multiport config */
#define TIOCSERSETMULTI 0x40a8545b /* _IOW('T', 91, struct serial_multiport_struct) 0x545B */ /* Set multiport config */
#define TIOCMIWAIT 0x545c /* _IO('T', 92) wait for a change on serial input line(s) */
#define TIOCGICOUNT 0x545d /* read serial port inline interrupt counts */
#endif /* __ASM_SH64_IOCTLS_H */
#ifndef __ASM_SH64_IPCBUF_H__
#define __ASM_SH64_IPCBUF_H__
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/ipcbuf.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
/*
* The ipc64_perm structure for i386 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 32-bit mode_t and seq
* - 2 miscellaneous 32-bit values
*/
struct ipc64_perm
{
__kernel_key_t key;
__kernel_uid32_t uid;
__kernel_gid32_t gid;
__kernel_uid32_t cuid;
__kernel_gid32_t cgid;
__kernel_mode_t mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* __ASM_SH64_IPCBUF_H__ */
#include <asm-generic/irq_regs.h>
#include <asm-generic/kdebug.h>
#ifndef __ASM_SH64_KMAP_TYPES_H
#define __ASM_SH64_KMAP_TYPES_H
#include <asm-sh/kmap_types.h>
#endif /* __ASM_SH64_KMAP_TYPES_H */
#ifndef __ASM_SH64_LINKAGE_H
#define __ASM_SH64_LINKAGE_H
#include <asm-sh/linkage.h>
#endif /* __ASM_SH64_LINKAGE_H */
#ifndef __ASM_SH64_LOCAL_H
#define __ASM_SH64_LOCAL_H
#include <asm-generic/local.h>
#endif /* __ASM_SH64_LOCAL_H */
/*
* linux/include/asm-sh64/mc146818rtc.h
*
*/
/* For now, an empty place-holder to get IDE to compile. */
#ifndef __ASM_SH64_MMAN_H
#define __ASM_SH64_MMAN_H
#include <asm-sh/mman.h>
#endif /* __ASM_SH64_MMAN_H */
#ifndef __MMU_H
#define __MMU_H
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
#endif
#ifndef __ASM_SH64_MODULE_H
#define __ASM_SH64_MODULE_H
/*
* This file contains the SH architecture specific module code.
*/
struct mod_arch_specific {
/* empty */
};
#define Elf_Shdr Elf32_Shdr
#define Elf_Sym Elf32_Sym
#define Elf_Ehdr Elf32_Ehdr
#define module_map(x) vmalloc(x)
#define module_unmap(x) vfree(x)
#define module_arch_init(x) (0)
#define arch_init_modules(x) do { } while (0)
#endif /* __ASM_SH64_MODULE_H */
#ifndef __ASM_SH64_MSGBUF_H
#define __ASM_SH64_MSGBUF_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/msgbuf.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
/*
* The msqid64_ds structure for i386 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct msqid64_ds {
struct ipc64_perm msg_perm;
__kernel_time_t msg_stime; /* last msgsnd time */
unsigned long __unused1;
__kernel_time_t msg_rtime; /* last msgrcv time */
unsigned long __unused2;
__kernel_time_t msg_ctime; /* last change time */
unsigned long __unused3;
unsigned long msg_cbytes; /* current number of bytes on queue */
unsigned long msg_qnum; /* number of messages in queue */
unsigned long msg_qbytes; /* max number of bytes on queue */
__kernel_pid_t msg_lspid; /* pid of last msgsnd */
__kernel_pid_t msg_lrpid; /* last receive pid */
unsigned long __unused4;
unsigned long __unused5;
};
#endif /* __ASM_SH64_MSGBUF_H */
/*
* Pull in the generic implementation for the mutex fastpath.
*
* TODO: implement optimized primitives instead, or leave the generic
* implementation in place, or pick the atomic_xchg() based generic
* implementation. (see asm-generic/mutex-xchg.h for details)
*/
#include <asm-generic/mutex-dec.h>
#ifndef __ASM_SH64_NAMEI_H
#define __ASM_SH64_NAMEI_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/namei.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
* Included from linux/fs/namei.c
*
*/
/* This dummy routine maybe changed to something useful
* for /usr/gnemul/ emulation stuff.
* Look at asm-sparc/namei.h for details.
*/
#define __emul_prefix() NULL
#endif /* __ASM_SH64_NAMEI_H */
#ifndef __ASM_SH64_PAGE_H
#define __ASM_SH64_PAGE_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/page.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*
* benedict.gaster@superh.com 19th, 24th July 2002.
*
* Modified to take account of enabling for D-CACHE support.
*
*/
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#ifdef __ASSEMBLY__
#define PAGE_SIZE 4096
#else
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#endif
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PTE_MASK PAGE_MASK
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define HPAGE_SHIFT 16
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
#define HPAGE_SHIFT 20
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
#define HPAGE_SHIFT 29
#endif
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE-1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT)
#define ARCH_HAS_SETCLEAR_HUGE_PTE
#endif
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
extern struct page *mem_map;
extern void sh64_page_clear(void *page);
extern void sh64_page_copy(void *from, void *to);
#define clear_page(page) sh64_page_clear(page)
#define copy_page(to,from) sh64_page_copy(from, to)
#if defined(CONFIG_DCACHE_DISABLED)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#else
extern void clear_user_page(void *to, unsigned long address, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
#endif /* defined(CONFIG_DCACHE_DISABLED) */
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long long pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#endif /* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
/*
* Kconfig defined.
*/
#define __MEMORY_START (CONFIG_MEMORY_START)
#define PAGE_OFFSET (CONFIG_CACHED_MEMORY_OFFSET)
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define MAP_NR(addr) ((__pa(addr)-__MEMORY_START) >> PAGE_SHIFT)
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define phys_to_page(phys) (mem_map + (((phys) - __MEMORY_START) >> PAGE_SHIFT))
#define page_to_phys(page) (((page - mem_map) << PAGE_SHIFT) + __MEMORY_START)
/* PFN start number, because of __MEMORY_START */
#define PFN_START (__MEMORY_START >> PAGE_SHIFT)
#define ARCH_PFN_OFFSET (PFN_START)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_PAGE_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/param.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
*/
#ifndef __ASM_SH64_PARAM_H
#define __ASM_SH64_PARAM_H
#ifdef __KERNEL__
# ifdef CONFIG_SH_WDT
# define HZ 1000 /* Needed for high-res WOVF */
# else
# define HZ 100
# endif
# define USER_HZ 100 /* User interfaces are in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
#endif
#ifndef HZ
#define HZ 100
#endif
#define EXEC_PAGESIZE 4096
#ifndef NGROUPS
#define NGROUPS 32
#endif
#ifndef NOGROUP
#define NOGROUP (-1)
#endif
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#endif /* __ASM_SH64_PARAM_H */
#ifndef __ASM_SH64_PERCPU
#define __ASM_SH64_PERCPU
#include <asm-generic/percpu.h>
#endif /* __ASM_SH64_PERCPU */
#ifndef __ASM_SH64_PGALLOC_H
#define __ASM_SH64_PGALLOC_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/pgalloc.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
*/
#include <linux/mm.h>
#include <linux/quicklist.h>
#include <asm/page.h>
static inline void pgd_init(unsigned long page)
{
unsigned long *pgd = (unsigned long *)page;
extern pte_t empty_bad_pte_table[PTRS_PER_PTE];
int i;
for (i = 0; i < USER_PTRS_PER_PGD; i++)
pgd[i] = (unsigned long)empty_bad_pte_table;
}
/*
* Allocate and free page tables. The xxx_kernel() versions are
* used to allocate a kernel page table - this turns on ASN bits
* if any.
*/
static inline pgd_t *get_pgd_slow(void)
{
unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
pgd_t *ret = kmalloc(pgd_size, GFP_KERNEL);
return ret;
}
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
static inline void pgd_free(pgd_t *pgd)
{
quicklist_free(0, NULL, pgd);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
unsigned long address)
{
void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
return pg ? virt_to_page(pg) : NULL;
}
static inline void pte_free_kernel(pte_t *pte)
{
quicklist_free(0, NULL, pte);
}
static inline void pte_free(struct page *pte)
{
quicklist_free_page(0, NULL, pte);
}
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
#define __pmd_free_tlb(tlb,pmd) do { } while (0)
#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
return quicklist_alloc(0, GFP_KERNEL, NULL);
}
static inline void pmd_free(pmd_t *pmd)
{
quicklist_free(0, NULL, pmd);
}
#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
#else
#error "No defined page table size"
#endif
#define pmd_populate_kernel(mm, pmd, pte) \
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
struct page *pte)
{
set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
}
static inline void check_pgt_cache(void)
{
quicklist_trim(0, NULL, 25, 16);
}
#endif /* __ASM_SH64_PGALLOC_H */
#ifndef __ASM_SH64_POLL_H
#define __ASM_SH64_POLL_H
#include <asm-generic/poll.h>
#undef POLLREMOVE
#endif /* __ASM_SH64_POLL_H */
#ifndef __ASM_SH64_POSIX_TYPES_H
#define __ASM_SH64_POSIX_TYPES_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/posix_types.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
* This file is generally used by user-level software, so you need to
* be a little careful about namespace pollution etc. Also, we cannot
* assume GCC is being used.
*/
typedef unsigned long __kernel_ino_t;
typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
typedef long unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef int __kernel_daddr_t;
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
typedef unsigned short __kernel_gid16_t;
typedef unsigned int __kernel_uid32_t;
typedef unsigned int __kernel_gid32_t;
typedef unsigned short __kernel_old_uid_t;
typedef unsigned short __kernel_old_gid_t;
typedef unsigned short __kernel_old_dev_t;
#ifdef __GNUC__
typedef long long __kernel_loff_t;
#endif
typedef struct {
#if defined(__KERNEL__) || defined(__USE_ALL)
int val[2];
#else /* !defined(__KERNEL__) && !defined(__USE_ALL) */
int __val[2];
#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
} __kernel_fsid_t;
#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
#undef __FD_SET
static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
__fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
}
#undef __FD_CLR
static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
__fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
}
#undef __FD_ISSET
static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
{
unsigned long __tmp = __fd / __NFDBITS;
unsigned long __rem = __fd % __NFDBITS;
return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
}
/*
* This will unroll the loop for the normal constant case (8 ints,
* for a 256-bit fd_set)
*/
#undef __FD_ZERO
static __inline__ void __FD_ZERO(__kernel_fd_set *__p)
{
unsigned long *__tmp = __p->fds_bits;
int __i;
if (__builtin_constant_p(__FDSET_LONGS)) {
switch (__FDSET_LONGS) {
case 16:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
__tmp[ 8] = 0; __tmp[ 9] = 0;
__tmp[10] = 0; __tmp[11] = 0;
__tmp[12] = 0; __tmp[13] = 0;
__tmp[14] = 0; __tmp[15] = 0;
return;
case 8:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
__tmp[ 4] = 0; __tmp[ 5] = 0;
__tmp[ 6] = 0; __tmp[ 7] = 0;
return;
case 4:
__tmp[ 0] = 0; __tmp[ 1] = 0;
__tmp[ 2] = 0; __tmp[ 3] = 0;
return;
}
}
__i = __FDSET_LONGS;
while (__i) {
__i--;
*__tmp = 0;
__tmp++;
}
}
#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */
#endif /* __ASM_SH64_POSIX_TYPES_H */
#ifndef __ASM_SH64_PTRACE_H
#define __ASM_SH64_PTRACE_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/ptrace.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
/*
* This struct defines the way the registers are stored on the
* kernel stack during a system call or other kernel entry.
*/
struct pt_regs {
unsigned long long pc;
unsigned long long sr;
unsigned long long syscall_nr;
unsigned long long regs[63];
unsigned long long tregs[8];
unsigned long long pad[2];
};
#ifdef __KERNEL__
#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) ((unsigned long)instruction_pointer(regs))
extern void show_regs(struct pt_regs *);
#endif
#endif /* __ASM_SH64_PTRACE_H */
#ifndef __ASM_SH64_RESOURCE_H
#define __ASM_SH64_RESOURCE_H
#include <asm-sh/resource.h>
#endif /* __ASM_SH64_RESOURCE_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/scatterlist.h
*
* Copyright (C) 2003 Paul Mundt
*
*/
#ifndef __ASM_SH64_SCATTERLIST_H
#define __ASM_SH64_SCATTERLIST_H
#include <asm/types.h>
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;/* for highmem, page offset */
dma_addr_t dma_address;
unsigned int length;
};
/* These macros should be used after a pci_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries pci_map_sg
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
#define ISA_DMA_THRESHOLD (0xffffffff)
#endif /* !__ASM_SH64_SCATTERLIST_H */
#include <asm-sh/sci.h>
#ifndef __ASM_SH64_SECTIONS_H
#define __ASM_SH64_SECTIONS_H
#include <asm-sh/sections.h>
#endif /* __ASM_SH64_SECTIONS_H */
#ifndef _ASM_SEGMENT_H
#define _ASM_SEGMENT_H
/* Only here because we have some old header files that expect it.. */
#endif /* _ASM_SEGMENT_H */
#ifndef __ASM_SH64_SEMAPHORE_HELPER_H
#define __ASM_SH64_SEMAPHORE_HELPER_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/semaphore-helper.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#include <asm/errno.h>
/*
* SMP- and interrupt-safe semaphores helper functions.
*
* (C) Copyright 1996 Linus Torvalds
* (C) Copyright 1999 Andrea Arcangeli
*/
/*
* These two _must_ execute atomically wrt each other.
*
* This is trivially done with load_locked/store_cond,
* which we have. Let the rest of the losers suck eggs.
*/
static __inline__ void wake_one_more(struct semaphore * sem)
{
atomic_inc((atomic_t *)&sem->sleepers);
}
static __inline__ int waking_non_zero(struct semaphore *sem)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->sleepers > 0) {
sem->sleepers--;
ret = 1;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking_non_zero_interruptible:
* 1 got the lock
* 0 go to sleep
* -EINTR interrupted
*
* We must undo the sem->count down_interruptible() increment while we are
* protected by the spinlock in order to make atomic this atomic_inc() with the
* atomic_read() in wake_one_more(), otherwise we can race. -arca
*/
static __inline__ int waking_non_zero_interruptible(struct semaphore *sem,
struct task_struct *tsk)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->sleepers > 0) {
sem->sleepers--;
ret = 1;
} else if (signal_pending(tsk)) {
atomic_inc(&sem->count);
ret = -EINTR;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
/*
* waking_non_zero_trylock:
* 1 failed to lock
* 0 got the lock
*
* We must undo the sem->count down_trylock() increment while we are
* protected by the spinlock in order to make atomic this atomic_inc() with the
* atomic_read() in wake_one_more(), otherwise we can race. -arca
*/
static __inline__ int waking_non_zero_trylock(struct semaphore *sem)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&semaphore_wake_lock, flags);
if (sem->sleepers <= 0)
atomic_inc(&sem->count);
else {
sem->sleepers--;
ret = 0;
}
spin_unlock_irqrestore(&semaphore_wake_lock, flags);
return ret;
}
#endif /* __ASM_SH64_SEMAPHORE_HELPER_H */
#ifndef __ASM_SH64_SEMAPHORE_H
#define __ASM_SH64_SEMAPHORE_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/semaphore.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
* SMP- and interrupt-safe semaphores.
*
* (C) Copyright 1996 Linus Torvalds
*
* SuperH verison by Niibe Yutaka
* (Currently no asm implementation but generic C code...)
*
*/
#include <linux/linkage.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
#include <asm/system.h>
#include <asm/atomic.h>
struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
};
#define __SEMAPHORE_INITIALIZER(name, n) \
{ \
.count = ATOMIC_INIT(n), \
.sleepers = 0, \
.wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
}
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
static inline void sema_init (struct semaphore *sem, int val)
{
/*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
*
* i'd rather use the more flexible initialization above, but sadly
* GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
*/
atomic_set(&sem->count, val);
sem->sleepers = 0;
init_waitqueue_head(&sem->wait);
}
static inline void init_MUTEX (struct semaphore *sem)
{
sema_init(sem, 1);
}
static inline void init_MUTEX_LOCKED (struct semaphore *sem)
{
sema_init(sem, 0);
}
#if 0
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage int __down_failed_trylock(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);
#endif
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
extern spinlock_t semaphore_wake_lock;
static inline void down(struct semaphore * sem)
{
if (atomic_dec_return(&sem->count) < 0)
__down(sem);
}
static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
ret = __down_interruptible(sem);
return ret;
}
static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
ret = __down_trylock(sem);
return ret;
}
/*
* Note! This is subtle. We jump to wake people up only if
* the semaphore was negative (== somebody was waiting on it).
*/
static inline void up(struct semaphore * sem)
{
if (atomic_inc_return(&sem->count) <= 0)
__up(sem);
}
#endif /* __ASM_SH64_SEMAPHORE_H */
#ifndef __ASM_SH64_SEMBUF_H
#define __ASM_SH64_SEMBUF_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/sembuf.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
/*
* The semid64_ds structure for i386 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
__kernel_time_t sem_otime; /* last semop time */
unsigned long __unused1;
__kernel_time_t sem_ctime; /* last change time */
unsigned long __unused2;
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* __ASM_SH64_SEMBUF_H */
/*
* include/asm-sh64/serial.h
*
* Configuration details for 8250, 16450, 16550, etc. serial ports
*/
#ifndef _ASM_SERIAL_H
#define _ASM_SERIAL_H
/*
* This assumes you have a 1.8432 MHz clock for your UART.
*
* It'd be nice if someone built a serial card with a 24.576 MHz
* clock, since the 16550A is capable of handling a top speed of 1.5
* megabits/second; but this requires the faster clock.
*/
#define BASE_BAUD ( 1843200 / 16 )
#define RS_TABLE_SIZE 2
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
#define SERIAL_PORT_DFNS \
/* UART CLK PORT IRQ FLAGS */ \
{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS } /* ttyS1 */
/* XXX: This should be moved ino irq.h */
#define irq_cannonicalize(x) (x)
#endif /* _ASM_SERIAL_H */
#ifndef __ASM_SH64_SETUP_H
#define __ASM_SH64_SETUP_H
#define COMMAND_LINE_SIZE 256
#ifdef __KERNEL__
#define PARAM ((unsigned char *)empty_zero_page)
#define MOUNT_ROOT_RDONLY (*(unsigned long *) (PARAM+0x000))
#define RAMDISK_FLAGS (*(unsigned long *) (PARAM+0x004))
#define ORIG_ROOT_DEV (*(unsigned long *) (PARAM+0x008))
#define LOADER_TYPE (*(unsigned long *) (PARAM+0x00c))
#define INITRD_START (*(unsigned long *) (PARAM+0x010))
#define INITRD_SIZE (*(unsigned long *) (PARAM+0x014))
#define COMMAND_LINE ((char *) (PARAM+256))
#define COMMAND_LINE_SIZE 256
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_SETUP_H */
#ifndef __ASM_SH64_SHMBUF_H
#define __ASM_SH64_SHMBUF_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/shmbuf.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
/*
* The shmid64_ds structure for i386 architecture.
* Note extra padding because this structure is passed back and forth
* between kernel and user space.
*
* Pad space is left for:
* - 64-bit time_t to solve y2038 problem
* - 2 miscellaneous 32-bit values
*/
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
__kernel_time_t shm_atime; /* last attach time */
unsigned long __unused1;
__kernel_time_t shm_dtime; /* last detach time */
unsigned long __unused2;
__kernel_time_t shm_ctime; /* last change time */
unsigned long __unused3;
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
unsigned long __unused4;
unsigned long __unused5;
};
struct shminfo64 {
unsigned long shmmax;
unsigned long shmmin;
unsigned long shmmni;
unsigned long shmseg;
unsigned long shmall;
unsigned long __unused1;
unsigned long __unused2;
unsigned long __unused3;
unsigned long __unused4;
};
#endif /* __ASM_SH64_SHMBUF_H */
#ifndef __ASM_SH64_SHMPARAM_H
#define __ASM_SH64_SHMPARAM_H
/*
* Set this to a sensible safe default, we'll work out the specifics for the
* align mask from the cache descriptor at run-time.
*/
#define SHMLBA 0x4000
#define __ARCH_FORCE_SHMLBA
#endif /* __ASM_SH64_SHMPARAM_H */
#ifndef __ASM_SH64_SIGCONTEXT_H
#define __ASM_SH64_SIGCONTEXT_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/sigcontext.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
struct sigcontext {
unsigned long oldmask;
/* CPU registers */
unsigned long long sc_regs[63];
unsigned long long sc_tregs[8];
unsigned long long sc_pc;
unsigned long long sc_sr;
/* FPU registers */
unsigned long long sc_fpregs[32];
unsigned int sc_fpscr;
unsigned int sc_fpvalid;
};
#endif /* __ASM_SH64_SIGCONTEXT_H */
#ifndef __ASM_SH64_SIGINFO_H
#define __ASM_SH64_SIGINFO_H
#include <asm-generic/siginfo.h>
#endif /* __ASM_SH64_SIGINFO_H */
#ifndef __ASM_SH64_SIGNAL_H
#define __ASM_SH64_SIGNAL_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/signal.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#include <linux/types.h>
/* Avoid too many header ordering problems. */
struct siginfo;
#define _NSIG 64
#define _NSIG_BPW 32
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
typedef unsigned long old_sigset_t; /* at least 32 bits */
typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
#define SIGHUP 1
#define SIGINT 2
#define SIGQUIT 3
#define SIGILL 4
#define SIGTRAP 5
#define SIGABRT 6
#define SIGIOT 6
#define SIGBUS 7
#define SIGFPE 8
#define SIGKILL 9
#define SIGUSR1 10
#define SIGSEGV 11
#define SIGUSR2 12
#define SIGPIPE 13
#define SIGALRM 14
#define SIGTERM 15
#define SIGSTKFLT 16
#define SIGCHLD 17
#define SIGCONT 18
#define SIGSTOP 19
#define SIGTSTP 20
#define SIGTTIN 21
#define SIGTTOU 22
#define SIGURG 23
#define SIGXCPU 24
#define SIGXFSZ 25
#define SIGVTALRM 26
#define SIGPROF 27
#define SIGWINCH 28
#define SIGIO 29
#define SIGPOLL SIGIO
/*
#define SIGLOST 29
*/
#define SIGPWR 30
#define SIGSYS 31
#define SIGUNUSED 31
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
#define SIGRTMAX (_NSIG-1)
/*
* SA_FLAGS values:
*
* SA_ONSTACK indicates that a registered stack_t will be used.
* SA_RESTART flag to get restarting signals (which were the default long ago)
* SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
* SA_RESETHAND clears the handler when the signal is delivered.
* SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
* SA_NODEFER prevents the current signal from being masked in the handler.
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
#define SA_NOCLDSTOP 0x00000001
#define SA_NOCLDWAIT 0x00000002 /* not supported yet */
#define SA_SIGINFO 0x00000004
#define SA_ONSTACK 0x08000000
#define SA_RESTART 0x10000000
#define SA_NODEFER 0x40000000
#define SA_RESETHAND 0x80000000
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
#define SA_RESTORER 0x04000000
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
#define MINSIGSTKSZ 2048
#define SIGSTKSZ THREAD_SIZE
#include <asm-generic/signal.h>
#ifdef __KERNEL__
struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
void (*sa_restorer)(void);
sigset_t sa_mask; /* mask last for extensibility */
};
struct k_sigaction {
struct sigaction sa;
};
#else
/* Here we must cater to libcs that poke about in kernel headers. */
struct sigaction {
union {
__sighandler_t _sa_handler;
void (*_sa_sigaction)(int, struct siginfo *, void *);
} _u;
sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
};
#define sa_handler _u._sa_handler
#define sa_sigaction _u._sa_sigaction
#endif /* __KERNEL__ */
typedef struct sigaltstack {
void *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
#ifdef __KERNEL__
#include <asm/sigcontext.h>
#define sigmask(sig) (1UL << ((sig) - 1))
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_SIGNAL_H */
#ifndef __ASM_SH64_SMP_H
#define __ASM_SH64_SMP_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/smp.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#endif /* __ASM_SH64_SMP_H */
#ifndef __ASM_SH64_SOCKET_H
#define __ASM_SH64_SOCKET_H
#include <asm-sh/socket.h>
#endif /* __ASM_SH64_SOCKET_H */
#ifndef __ASM_SH64_SOCKIOS_H
#define __ASM_SH64_SOCKIOS_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/sockios.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
/* Socket-level I/O control calls. */
#define FIOGETOWN _IOR('f', 123, int)
#define FIOSETOWN _IOW('f', 124, int)
#define SIOCATMARK _IOR('s', 7, int)
#define SIOCSPGRP _IOW('s', 8, pid_t)
#define SIOCGPGRP _IOR('s', 9, pid_t)
#define SIOCGSTAMP _IOR('s', 100, struct timeval) /* Get stamp (timeval) */
#define SIOCGSTAMPNS _IOR('s', 101, struct timespec) /* Get stamp (timespec) */
#endif /* __ASM_SH64_SOCKIOS_H */
#ifndef __ASM_SH64_SPINLOCK_H
#define __ASM_SH64_SPINLOCK_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/spinlock.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#error "No SMP on SH64"
#endif /* __ASM_SH64_SPINLOCK_H */
#ifndef __ASM_SH64_STAT_H
#define __ASM_SH64_STAT_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/stat.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
struct __old_kernel_stat {
unsigned short st_dev;
unsigned short st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned short st_rdev;
unsigned long st_size;
unsigned long st_atime;
unsigned long st_mtime;
unsigned long st_ctime;
};
struct stat {
unsigned short st_dev;
unsigned short __pad1;
unsigned long st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned short st_rdev;
unsigned short __pad2;
unsigned long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused4;
unsigned long __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
* insane amounts of padding around dev_t's.
*/
struct stat64 {
unsigned short st_dev;
unsigned char __pad0[10];
unsigned long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned long st_uid;
unsigned long st_gid;
unsigned short st_rdev;
unsigned char __pad3[10];
long long st_size;
unsigned long st_blksize;
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __pad4; /* future possible st_blocks high bits */
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
unsigned long __unused1;
unsigned long __unused2;
};
#endif /* __ASM_SH64_STAT_H */
#ifndef __ASM_SH64_STATFS_H
#define __ASM_SH64_STATFS_H
#include <asm-generic/statfs.h>
#endif /* __ASM_SH64_STATFS_H */
#ifndef __ASM_SH64_TERMBITS_H
#define __ASM_SH64_TERMBITS_H
#include <asm-sh/termbits.h>
#endif /* __ASM_SH64_TERMBITS_H */
#ifndef __ASM_SH64_TERMIOS_H
#define __ASM_SH64_TERMIOS_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/termios.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#include <asm/termbits.h>
#include <asm/ioctls.h>
struct winsize {
unsigned short ws_row;
unsigned short ws_col;
unsigned short ws_xpixel;
unsigned short ws_ypixel;
};
#define NCC 8
struct termio {
unsigned short c_iflag; /* input mode flags */
unsigned short c_oflag; /* output mode flags */
unsigned short c_cflag; /* control mode flags */
unsigned short c_lflag; /* local mode flags */
unsigned char c_line; /* line discipline */
unsigned char c_cc[NCC]; /* control characters */
};
/* modem lines */
#define TIOCM_LE 0x001
#define TIOCM_DTR 0x002
#define TIOCM_RTS 0x004
#define TIOCM_ST 0x008
#define TIOCM_SR 0x010
#define TIOCM_CTS 0x020
#define TIOCM_CAR 0x040
#define TIOCM_RNG 0x080
#define TIOCM_DSR 0x100
#define TIOCM_CD TIOCM_CAR
#define TIOCM_RI TIOCM_RNG
#define TIOCM_OUT1 0x2000
#define TIOCM_OUT2 0x4000
#define TIOCM_LOOP 0x8000
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
#ifdef __KERNEL__
/* intr=^C quit=^\ erase=del kill=^U
eof=^D vtime=\0 vmin=\1 sxtc=\0
start=^Q stop=^S susp=^Z eol=\0
reprint=^R discard=^U werase=^W lnext=^V
eol2=\0
*/
#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
/*
* Translate a "termio" structure into a "termios". Ugh.
*/
#define SET_LOW_TERMIOS_BITS(termios, termio, x) { \
unsigned short __tmp; \
get_user(__tmp,&(termio)->x); \
*(unsigned short *) &(termios)->x = __tmp; \
}
#define user_termio_to_kernel_termios(termios, termio) \
({ \
SET_LOW_TERMIOS_BITS(termios, termio, c_iflag); \
SET_LOW_TERMIOS_BITS(termios, termio, c_oflag); \
SET_LOW_TERMIOS_BITS(termios, termio, c_cflag); \
SET_LOW_TERMIOS_BITS(termios, termio, c_lflag); \
copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \
})
/*
* Translate a "termios" structure into a "termio". Ugh.
*/
#define kernel_termios_to_user_termio(termio, termios) \
({ \
put_user((termios)->c_iflag, &(termio)->c_iflag); \
put_user((termios)->c_oflag, &(termio)->c_oflag); \
put_user((termios)->c_cflag, &(termio)->c_cflag); \
put_user((termios)->c_lflag, &(termio)->c_lflag); \
put_user((termios)->c_line, &(termio)->c_line); \
copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \
})
#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_TERMIOS_H */
#ifndef __ASM_SH64_TIMEX_H
#define __ASM_SH64_TIMEX_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/timex.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
* sh-5 architecture timex specifications
*
*/
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
#define CLOCK_TICK_FACTOR 20 /* Factor of both 1000000 and CLOCK_TICK_RATE */
typedef unsigned long cycles_t;
static __inline__ cycles_t get_cycles (void)
{
return 0;
}
#define vxtime_lock() do {} while (0)
#define vxtime_unlock() do {} while (0)
#endif /* __ASM_SH64_TIMEX_H */
#ifndef __ASM_SH64_TLBFLUSH_H
#define __ASM_SH64_TLBFLUSH_H
#include <asm/pgalloc.h>
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(mm, start, end) flushes a range of pages
*
*/
extern void flush_tlb(void);
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *mm);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
#endif /* __ASM_SH64_TLBFLUSH_H */
#ifndef __ASM_SH64_TOPOLOGY_H
#define __ASM_SH64_TOPOLOGY_H
#include <asm-generic/topology.h>
#endif /* __ASM_SH64_TOPOLOGY_H */
#ifndef __ASM_SH64_TYPES_H
#define __ASM_SH64_TYPES_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/types.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#ifndef __ASSEMBLY__
typedef unsigned short umode_t;
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
* header files exported to user space
*/
typedef __signed__ char __s8;
typedef unsigned char __u8;
typedef __signed__ short __s16;
typedef unsigned short __u16;
typedef __signed__ int __s32;
typedef unsigned int __u32;
#if defined(__GNUC__)
__extension__ typedef __signed__ long long __s64;
__extension__ typedef unsigned long long __u64;
#endif
#endif /* __ASSEMBLY__ */
/*
* These aren't exported outside the kernel to avoid name space clashes
*/
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
typedef __signed__ char s8;
typedef unsigned char u8;
typedef __signed__ short s16;
typedef unsigned short u16;
typedef __signed__ int s32;
typedef unsigned int u32;
typedef __signed__ long long s64;
typedef unsigned long long u64;
/* DMA addresses come in generic and 64-bit flavours. */
#ifdef CONFIG_HIGHMEM64G
typedef u64 dma_addr_t;
#else
typedef u32 dma_addr_t;
#endif
typedef u64 dma64_addr_t;
#endif /* __ASSEMBLY__ */
#define BITS_PER_LONG 32
#endif /* __KERNEL__ */
#endif /* __ASM_SH64_TYPES_H */
#ifndef __ASM_SH64_UCONTEXT_H
#define __ASM_SH64_UCONTEXT_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/ucontext.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
struct ucontext {
unsigned long uc_flags;
struct ucontext *uc_link;
stack_t uc_stack;
struct sigcontext uc_mcontext;
sigset_t uc_sigmask; /* mask last for extensibility */
};
#endif /* __ASM_SH64_UCONTEXT_H */
#ifndef __ASM_SH64_UNALIGNED_H
#define __ASM_SH64_UNALIGNED_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/unaligned.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#include <asm-generic/unaligned.h>
#endif /* __ASM_SH64_UNALIGNED_H */
#ifndef __ASM_SH64_USER_H
#define __ASM_SH64_USER_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* include/asm-sh64/user.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*
*/
#include <linux/types.h>
#include <asm/ptrace.h>
#include <asm/page.h>
/*
* Core file format: The core file is written in such a way that gdb
* can understand it and provide useful information to the user (under
* linux we use the `trad-core' bfd). The file contents are as follows:
*
* upage: 1 page consisting of a user struct that tells gdb
* what is present in the file. Directly after this is a
* copy of the task_struct, which is currently not used by gdb,
* but it may come in handy at some point. All of the registers
* are stored as part of the upage. The upage should always be
* only one page long.
* data: The data segment follows next. We use current->end_text to
* current->brk to pick up all of the user variables, plus any memory
* that may have been sbrk'ed. No attempt is made to determine if a
* page is demand-zero or if a page is totally unused, we just cover
* the entire range. All of the addresses are rounded in such a way
* that an integral number of pages is written.
* stack: We need the stack information in order to get a meaningful
* backtrace. We need to write the data from usp to
* current->start_stack, so we round each of these in order to be able
* to write an integer number of pages.
*/
struct user_fpu_struct {
unsigned long long fp_regs[32];
unsigned int fpscr;
};
struct user {
struct pt_regs regs; /* entire machine state */
struct user_fpu_struct fpu; /* Math Co-processor registers */
int u_fpvalid; /* True if math co-processor being used */
size_t u_tsize; /* text size (pages) */
size_t u_dsize; /* data size (pages) */
size_t u_ssize; /* stack size (pages) */
unsigned long start_code; /* text starting address */
unsigned long start_data; /* data starting address */
unsigned long start_stack; /* stack starting address */
long int signal; /* signal causing core dump */
struct regs * u_ar0; /* help gdb find registers */
struct user_fpu_struct* u_fpstate; /* Math Co-processor pointer */
unsigned long magic; /* identifies a core file */
char u_comm[32]; /* user command name */
};
#define NBPG PAGE_SIZE
#define UPAGES 1
#define HOST_TEXT_START_ADDR (u.start_code)
#define HOST_DATA_START_ADDR (u.start_data)
#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif /* __ASM_SH64_USER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment