Commit 95b3aff0 authored by David Mosberger's avatar David Mosberger

ia64: Sync up with 2.5.50 (Ski simulator has been tested only so far).

parent 578418cb
#
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
#
mainmenu "IA-64 Linux Kernel Configuration"
source "init/Kconfig"
......
......@@ -3,12 +3,14 @@
*
* Loads an ELF kernel.
*
* Copyright (C) 1998, 1999, 2001 Hewlett-Packard Co
* Copyright (C) 1998, 1999, 2001 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1998, 1999 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1998-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
* 01/07/99 S.Eranian modified to pass command line arguments to kernel
*/
struct task_struct; /* forward declaration for elf.h */
#include <linux/config.h>
#include <linux/elf.h>
#include <linux/init.h>
......
......@@ -23,7 +23,7 @@
#include "../drivers/scsi/hosts.h"
#include "simscsi.h"
#define DEBUG_SIMSCSI 1
#define DEBUG_SIMSCSI 0
/* Simulator system calls: */
......@@ -377,6 +377,12 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
return 0;
}
int
simscsi_host_reset (Scsi_Cmnd *sc)
{
printk ("simscsi_host_reset: not implemented\n");
return 0;
}
static Scsi_Host_Template driver_template = SIMSCSI;
......
/*
* Simulated SCSI driver.
*
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef SIMSCSI_H
......@@ -21,14 +21,17 @@ extern int simscsi_biosparam (struct scsi_device *, struct block_device *,
sector_t, int[]);
#define SIMSCSI { \
.name = "simscsi", \
.detect = simscsi_detect, \
.release = simscsi_release, \
.info = simscsi_info, \
.queuecommand = simscsi_queuecommand, \
.eh_host_reset_handler = simscsi_host_reset, \
.bios_param = simscsi_biosparam, \
.can_queue = SIMSCSI_REQ_QUEUE_LEN, \
.this_id = -1, \
.sg_tablesize = SG_ALL, \
.max_sectors = 1024, \
.cmd_per_lun = SIMSCSI_REQ_QUEUE_LEN, \
.present = 0, \
.unchecked_isa_dma = 0, \
......
......@@ -38,7 +38,6 @@
#include <linux/smb_mount.h>
#include <linux/ncp_fs.h>
#include <linux/quota.h>
#include <linux/module.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/cache.h>
......@@ -194,9 +193,12 @@ putstat (struct stat32 *ubuf, struct kstat *stat)
err |= __put_user(high2lowgid(stat->gid), &ubuf->st_gid);
err |= __put_user(stat->rdev, &ubuf->st_rdev);
err |= __put_user(stat->size, &ubuf->st_size);
err |= __put_user(stat->atime, &ubuf->st_atime);
err |= __put_user(stat->mtime, &ubuf->st_mtime);
err |= __put_user(stat->ctime, &ubuf->st_ctime);
err |= __put_user(stat->atime.tv_sec, &ubuf->st_atime);
err |= __put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec);
err |= __put_user(stat->mtime.tv_sec, &ubuf->st_mtime);
err |= __put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec);
err |= __put_user(stat->ctime.tv_sec, &ubuf->st_ctime);
err |= __put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec);
err |= __put_user(stat->blksize, &ubuf->st_blksize);
err |= __put_user(stat->blocks, &ubuf->st_blocks);
return err;
......@@ -2986,7 +2988,7 @@ save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *save)
ptp = ia64_task_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
put_fpreg(i, (struct _fpxreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
put_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
__put_user(mxcsr & 0xffff, &save->mxcsr);
......@@ -3030,7 +3032,7 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct *sa
ptp = ia64_task_regs(tsk);
tos = (tsk->thread.fsr >> 11) & 7;
for (i = 0; i < 8; i++)
get_fpreg(i, (struct _fpxreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
get_fpreg(i, (struct _fpreg_ia32 *)&save->st_space[4*i], ptp, swp, tos);
__get_user(mxcsr, (unsigned int *)&save->mxcsr);
num64 = mxcsr & 0xff10;
......@@ -3637,9 +3639,12 @@ putstat64 (struct stat64 *ubuf, struct kstat *kbuf)
err |= __put_user(kbuf->rdev, &ubuf->st_rdev);
err |= __put_user(kbuf->size, &ubuf->st_size_lo);
err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
err |= __put_user(kbuf->atime, &ubuf->st_atime);
err |= __put_user(kbuf->mtime, &ubuf->st_mtime);
err |= __put_user(kbuf->ctime, &ubuf->st_ctime);
err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
return err;
......
......@@ -535,7 +535,6 @@ END(ia64_trace_syscall)
GLOBAL_ENTRY(ia64_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
/*
* We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to() after do_fork()->copy_thread(). r8 contains the
......@@ -543,7 +542,6 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
*/
br.call.sptk.many rp=ia64_invoke_schedule_tail
.ret8:
#endif
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 r2=[r2]
......@@ -844,7 +842,6 @@ ENTRY(handle_syscall_error)
br.cond.sptk ia64_leave_kernel
END(handle_syscall_error)
#ifdef CONFIG_SMP
/*
* Invoke schedule_tail(task) while preserving in0-in7, which may be needed
* in case a system call gets restarted.
......@@ -861,8 +858,6 @@ GLOBAL_ENTRY(ia64_invoke_schedule_tail)
br.ret.sptk.many rp
END(ia64_invoke_schedule_tail)
#endif /* CONFIG_SMP */
#if __GNUC__ < 3
/*
......@@ -1129,18 +1124,18 @@ sys_call_table:
data8 ia64_ni_syscall /* was: ia64_oldfstat */
data8 sys_vhangup
data8 sys_lchown
data8 sys_vm86 // 1125
data8 sys_remap_file_pages // 1125
data8 sys_wait4
data8 sys_sysinfo
data8 sys_clone
data8 sys_setdomainname
data8 sys_newuname // 1130
data8 sys_adjtimex
data8 ia64_create_module
data8 ia64_ni_syscall /* was: ia64_create_module */
data8 sys_init_module
data8 sys_delete_module
data8 sys_get_kernel_syms // 1135
data8 sys_query_module
data8 ia64_ni_syscall // 1135 /* was: sys_get_kernel_syms */
data8 ia64_ni_syscall /* was: sys_query_module */
data8 sys_quotactl
data8 sys_bdflush
data8 sys_sysfs
......@@ -1173,11 +1168,7 @@ sys_call_table:
data8 sys_sched_get_priority_min
data8 sys_sched_rr_get_interval
data8 sys_nanosleep
#if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)
data8 sys_nfsservctl
#else
data8 sys_ni_syscall
#endif
data8 sys_prctl // 1170
data8 sys_getpagesize
data8 sys_mmap2
......@@ -1241,7 +1232,7 @@ sys_call_table:
data8 sys_futex // 1230
data8 sys_sched_setaffinity
data8 sys_sched_getaffinity
data8 sys_ni_syscall
data8 sys_set_tid_address
data8 sys_alloc_hugepages
data8 sys_free_hugepages // 1235
data8 sys_exit_group
......@@ -1254,7 +1245,7 @@ sys_call_table:
data8 sys_epoll_create
data8 sys_epoll_ctl
data8 sys_epoll_wait // 1245
data8 ia64_ni_syscall
data8 sys_remap_file_pages
data8 ia64_ni_syscall
data8 ia64_ni_syscall
data8 ia64_ni_syscall
......
......@@ -10,6 +10,7 @@
#include <linux/pm.h>
#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
......@@ -376,7 +377,8 @@ copy_thread (int nr, unsigned long clone_flags,
/* clear list of sampling buffer to free for new task */
p->thread.pfm_smpl_buf_list = NULL;
if (current->thread.pfm_context) retval = pfm_inherit(p, child_ptregs);
if (current->thread.pfm_context)
retval = pfm_inherit(p, child_ptregs);
#endif
return retval;
}
......
......@@ -649,9 +649,8 @@ cpu_init (void)
* "NR_CPUS" pages for all CPUs to avoid that AP calls get_zeroed_page().
*/
if (smp_processor_id() == 0) {
cpu_data = (unsigned long) __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
PERCPU_PAGE_SIZE,
__pa(MAX_DMA_ADDRESS));
cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS, PERCPU_PAGE_SIZE,
__pa(MAX_DMA_ADDRESS));
for (cpu = 0; cpu < NR_CPUS; cpu++) {
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
......
......@@ -213,13 +213,20 @@ smp_flush_tlb_all (void)
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
local_flush_tlb_mm(mm);
local_finish_flush_tlb_mm(mm);
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
return;
smp_call_function((void (*)(void *))local_flush_tlb_mm, mm, 1, 1);
/*
* We could optimize this further by using mm->cpu_vm_mask to track which CPUs
* have been running in the address space. It's not clear that this is worth the
* trouble though: to avoid races, we have to raise the IPI on the target CPU
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
* rather trivial.
*/
smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
}
/*
......
......@@ -337,7 +337,7 @@ fork_by_hand (void)
* don't care about the eip and regs settings since we'll never reschedule the
* forked task.
*/
return do_fork(CLONE_VM|CLONE_IDLETASK, 0, 0, 0, NULL);
return do_fork(CLONE_VM|CLONE_IDLETASK, 0, 0, 0, NULL, NULL);
}
static int __init
......
......@@ -340,25 +340,6 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u
return addr;
}
asmlinkage long
sys_vm86 (long arg0, long arg1, long arg2, long arg3)
{
printk(KERN_ERR "sys_vm86(%lx, %lx, %lx, %lx)!\n", arg0, arg1, arg2, arg3);
return -ENOSYS;
}
asmlinkage unsigned long
ia64_create_module (const char *name_user, size_t size)
{
extern unsigned long sys_create_module (const char *, size_t);
unsigned long addr;
addr = sys_create_module (name_user, size);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
}
#ifndef CONFIG_PCI
asmlinkage long
......
......@@ -97,6 +97,12 @@ SECTIONS
.init.data : AT(ADDR(.init.data) - PAGE_OFFSET)
{ *(.init.data) }
__initramfs_start = .;
.init.ramfs : AT(ADDR(.init.ramfs) - PAGE_OFFSET)
{ *(.init.ramfs) }
__initramfs_end = .;
. = ALIGN(16);
__setup_start = .;
.init.setup : AT(ADDR(.init.setup) - PAGE_OFFSET)
......
......@@ -229,11 +229,11 @@ struct stat32 {
unsigned int st_blksize;
unsigned int st_blocks;
unsigned int st_atime;
unsigned int __unused1;
unsigned int st_atime_nsec;
unsigned int st_mtime;
unsigned int __unused2;
unsigned int st_mtime_nsec;
unsigned int st_ctime;
unsigned int __unused3;
unsigned int st_ctime_nsec;
unsigned int __unused4;
unsigned int __unused5;
};
......@@ -254,11 +254,11 @@ struct stat64 {
unsigned int st_blocks; /* Number 512-byte blocks allocated. */
unsigned int __pad4; /* future possible st_blocks high bits */
unsigned int st_atime;
unsigned int __pad5;
unsigned int st_atime_nsec;
unsigned int st_mtime;
unsigned int __pad6;
unsigned int st_mtime_nsec;
unsigned int st_ctime;
unsigned int __pad7; /* will be high 32 bits of ctime someday */
unsigned int st_ctime_nsec;
unsigned int st_ino_lo;
unsigned int st_ino_hi;
};
......
......@@ -18,12 +18,14 @@
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
#define MAP_GROWSUP 0x0200 /* register stack-like segment */
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
#define MAP_GROWSDOWN 0x00100 /* stack-like segment */
#define MAP_GROWSUP 0x00200 /* register stack-like segment */
#define MAP_DENYWRITE 0x00800 /* ETXTBSY */
#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */
#define MAP_LOCKED 0x02000 /* pages are locked */
#define MAP_NORESERVE 0x04000 /* don't check for reservations */
#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
#define MS_ASYNC 1 /* sync memory asynchronously */
#define MS_INVALIDATE 2 /* invalidate the caches */
......
#ifndef __MMU_H
#define __MMU_H
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
/*
* Type for a context number. We declare it volatile to ensure proper ordering when it's
* accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
* init_new_context()).
*/
typedef volatile unsigned long mm_context_t;
#endif
......@@ -62,25 +62,32 @@ delayed_tlb_flush (void)
}
}
static inline void
get_new_mmu_context (struct mm_struct *mm)
static inline mm_context_t
get_mmu_context (struct mm_struct *mm)
{
mm_context_t context = mm->context;
if (context)
return context;
spin_lock(&ia64_ctx.lock);
{
if (ia64_ctx.next >= ia64_ctx.limit)
wrap_mmu_context(mm);
mm->context = ia64_ctx.next++;
/* re-check, now that we've got the lock: */
context = mm->context;
if (context == 0) {
if (ia64_ctx.next >= ia64_ctx.limit)
wrap_mmu_context(mm);
mm->context = context = ia64_ctx.next++;
}
}
spin_unlock(&ia64_ctx.lock);
return context;
}
static inline void
get_mmu_context (struct mm_struct *mm)
{
if (mm->context == 0)
get_new_mmu_context(mm);
}
/*
* Initialize context number to some sane value. MM is guaranteed to be a brand-new
* address-space, so no TLB flushing is needed, ever.
*/
static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm)
{
......@@ -95,13 +102,13 @@ destroy_context (struct mm_struct *mm)
}
static inline void
reload_context (struct mm_struct *mm)
reload_context (mm_context_t context)
{
unsigned long rid;
unsigned long rid_incr = 0;
unsigned long rr0, rr1, rr2, rr3, rr4;
rid = mm->context << 3; /* make space for encoding the region number */
rid = context << 3; /* make space for encoding the region number */
rid_incr = 1 << 8;
/* encode the region id, preferred page size, and VHPT enable bit: */
......@@ -124,6 +131,18 @@ reload_context (struct mm_struct *mm)
ia64_insn_group_barrier();
}
static inline void
activate_context (struct mm_struct *mm)
{
mm_context_t context;
do {
context = get_mmu_context(mm);
reload_context(context);
/* in the unlikely event of a TLB-flush by another thread, redo the load: */
} while (unlikely(context != mm->context));
}
/*
* Switch from address space PREV to address space NEXT.
*/
......@@ -133,12 +152,11 @@ activate_mm (struct mm_struct *prev, struct mm_struct *next)
delayed_tlb_flush();
/*
* We may get interrupts here, but that's OK because interrupt
* handlers cannot touch user-space.
* We may get interrupts here, but that's OK because interrupt handlers cannot
* touch user-space.
*/
ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
get_mmu_context(next);
reload_context(next);
activate_context(next);
}
#define switch_mm(prev_mm,next_mm,next_task,cpu) activate_mm(prev_mm, next_mm)
......
#ifndef _ASM_IA64_MODULE_H
#define _ASM_IA64_MODULE_H
/*
* This file contains the ia64 architecture specific module code.
*
* Copyright (C) 2000 Intel Corporation.
* Copyright (C) 2000 Mike Stephens <mike.stephens@intel.com>
*/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <asm/unwind.h>
#define module_map(x) vmalloc(x)
#define module_unmap(x) ia64_module_unmap(x)
#define module_arch_init(x) ia64_module_init(x)
/*
* This must match in size and layout the data created by
* modutils/obj/obj-ia64.c
*/
struct archdata {
const char *unw_table;
const char *segment_base;
const char *unw_start;
const char *unw_end;
const char *gp;
};
static inline void
arch_init_modules (struct module *kmod)
{
static struct archdata archdata;
register char *kernel_gp asm ("gp");
archdata.gp = kernel_gp;
kmod->archdata_start = (const char *) &archdata;
kmod->archdata_end = (const char *) (&archdata + 1);
}
/*
* functions to add/remove a modules unwind info when
* it is loaded or unloaded.
*/
static inline int
ia64_module_init (struct module *mod)
{
struct archdata *archdata;
if (!mod_member_present(mod, archdata_start) || !mod->archdata_start)
return 0;
archdata = (struct archdata *)(mod->archdata_start);
if (archdata->unw_start == 0)
return 0;
/*
* Make sure the unwind pointers are sane.
*/
if (archdata->unw_table) {
printk(KERN_ERR "module_arch_init: archdata->unw_table must be zero.\n");
return 1;
}
if (!mod_bound(archdata->gp, 0, mod)) {
printk(KERN_ERR "module_arch_init: archdata->gp out of bounds.\n");
return 1;
}
if (!mod_bound(archdata->unw_start, 0, mod)) {
printk(KERN_ERR "module_arch_init: archdata->unw_start out of bounds.\n");
return 1;
}
if (!mod_bound(archdata->unw_end, 0, mod)) {
printk(KERN_ERR "module_arch_init: archdata->unw_end out of bounds.\n");
return 1;
}
if (!mod_bound(archdata->segment_base, 0, mod)) {
printk(KERN_ERR "module_arch_init: archdata->segment_base out of bounds.\n");
return 1;
}
/*
* Pointers are reasonable, add the module unwind table
*/
archdata->unw_table = unw_add_unwind_table(mod->name,
(unsigned long) archdata->segment_base,
(unsigned long) archdata->gp,
archdata->unw_start, archdata->unw_end);
return 0;
}
static inline void
ia64_module_unmap (void * addr)
{
struct module *mod = (struct module *) addr;
struct archdata *archdata;
/*
* Before freeing the module memory remove the unwind table entry
*/
if (mod_member_present(mod, archdata_start) && mod->archdata_start) {
archdata = (struct archdata *)(mod->archdata_start);
if (archdata->unw_table != NULL)
unw_remove_unwind_table((void *) archdata->unw_table);
}
vfree(addr);
}
/* Module support currently broken (due to in-kernel module loader). */
#endif /* _ASM_IA64_MODULE_H */
......@@ -33,12 +33,10 @@ extern void local_flush_tlb_all (void);
#endif
static inline void
local_flush_tlb_mm (struct mm_struct *mm)
local_finish_flush_tlb_mm (struct mm_struct *mm)
{
if (mm == current->active_mm) {
get_new_mmu_context(mm);
reload_context(mm);
}
if (mm == current->active_mm)
activate_context(mm);
}
/*
......@@ -60,7 +58,7 @@ flush_tlb_mm (struct mm_struct *mm)
#ifdef CONFIG_SMP
smp_flush_tlb_mm(mm);
#else
local_flush_tlb_mm(mm);
local_finish_flush_tlb_mm(mm);
#endif
}
......
......@@ -114,18 +114,18 @@
/* 1122 was __NR_old_fstat */
#define __NR_vhangup 1123
#define __NR_lchown 1124
#define __NR_vm86 1125
#define __NR_remap_file_pages 1125
#define __NR_wait4 1126
#define __NR_sysinfo 1127
#define __NR_clone 1128
#define __NR_setdomainname 1129
#define __NR_uname 1130
#define __NR_adjtimex 1131
#define __NR_create_module 1132
/* 1132 was __NR_create_module */
#define __NR_init_module 1133
#define __NR_delete_module 1134
#define __NR_get_kernel_syms 1135
#define __NR_query_module 1136
/* 1135 was __NR_get_kernel_syms */
/* 1136 was __NR_query_module */
#define __NR_quotactl 1137
#define __NR_bdflush 1138
#define __NR_sysfs 1139
......@@ -222,7 +222,7 @@
#define __NR_futex 1230
#define __NR_sched_setaffinity 1231
#define __NR_sched_getaffinity 1232
/* 1233 currently unused */
#define __NR_set_tid_address 1233
#define __NR_alloc_hugepages 1234
#define __NR_free_hugepages 1235
#define __NR_exit_group 1236
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment