Commit 93234a6b authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://lia64.bkbits.net/to-linus-2.5/

into home.transmeta.com:/home/torvalds/v2.5/linux
parents b4f305dd 33169a92
mainmenu_name "Kernel configuration of Linux for IA-64 machines"
mainmenu_name "IA-64 Linux Kernel Configuration"
source init/Config.in
mainmenu_option next_comment
comment 'General setup'
comment 'Processor type and features'
define_bool CONFIG_IA64 y
......@@ -14,13 +14,6 @@ define_bool CONFIG_SBUS n
define_bool CONFIG_RWSEM_GENERIC_SPINLOCK y
define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM n
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
define_bool CONFIG_ACPI y
define_bool CONFIG_ACPI_EFI y
define_bool CONFIG_ACPI_INTERPRETER y
define_bool CONFIG_ACPI_KERNEL_CONFIG y
fi
choice 'IA-64 processor type' \
"Itanium CONFIG_ITANIUM \
McKinley CONFIG_MCKINLEY" Itanium
......@@ -38,6 +31,13 @@ choice 'Kernel page size' \
16KB CONFIG_IA64_PAGE_SIZE_16KB \
64KB CONFIG_IA64_PAGE_SIZE_64KB" 16KB
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
define_bool CONFIG_ACPI y
define_bool CONFIG_ACPI_EFI y
define_bool CONFIG_ACPI_INTERPRETER y
define_bool CONFIG_ACPI_KERNEL_CONFIG y
fi
if [ "$CONFIG_ITANIUM" = "y" ]; then
define_bool CONFIG_IA64_BRL_EMU y
bool ' Enable Itanium B-step specific code' CONFIG_ITANIUM_BSTEP_SPECIFIC
......@@ -136,6 +136,17 @@ else
fi
endmenu
else # ! HP_SIM
mainmenu_option next_comment
comment 'Block devices'
tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
fi
endmenu
fi # !HP_SIM
mainmenu_option next_comment
......@@ -228,15 +239,7 @@ fi
fi # !HP_SIM
if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then
mainmenu_option next_comment
comment 'Simulated drivers'
bool 'Simulated Ethernet ' CONFIG_SIMETH
bool 'Simulated serial driver support' CONFIG_SIM_SERIAL
if [ "$CONFIG_SCSI" != "n" ]; then
bool 'Simulated SCSI disk' CONFIG_SCSI_SIM
fi
endmenu
source arch/ia64/hp/Config.in
fi
......@@ -258,15 +261,6 @@ if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
bool ' Turn on compare-and-exchange bug checking (slow!)' CONFIG_IA64_DEBUG_CMPXCHG
bool ' Turn on irq debug checks (slow!)' CONFIG_IA64_DEBUG_IRQ
bool ' Built-in Kernel Debugger support' CONFIG_KDB
dep_tristate ' KDB modules' CONFIG_KDB_MODULES $CONFIG_KDB
if [ "$CONFIG_KDB" = "y" ]; then
bool ' KDB off by default' CONFIG_KDB_OFF
comment ' Load all symbols for debugging is required for KDB'
define_bool CONFIG_KALLSYMS y
else
bool ' Load all symbols for debugging' CONFIG_KALLSYMS
fi
fi
endmenu
This diff is collapsed.
#
# ia64/platform/hp/Makefile
#
# Copyright (C) 2002 Hewlett-Packard Co.
# David Mosberger-Tang <davidm@hpl.hp.com>
# Copyright (C) 1999 Silicon Graphics, Inc.
# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
#
......@@ -12,6 +14,10 @@ O_TARGET := hp.a
obj-y := hpsim_console.o hpsim_irq.o hpsim_setup.o
obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
obj-$(CONFIG_SIMETH) += simeth.o
obj-$(CONFIG_SIM_SERIAL) += simserial.o
obj-$(CONFIG_SCSI_SIM) += simscsi.o
clean::
include $(TOPDIR)/Rules.make
This diff is collapsed.
/*
* Simulated SCSI driver.
*
* Copyright (C) 1999, 2001-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
*
* 02/01/15 David Mosberger Updated for v2.5.1
* 99/12/18 David Mosberger Added support for READ10/WRITE10 needed by linux v2.3.33
*/
#include <linux/config.h>
#include <linux/blk.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <scsi/scsi.h>
#include <asm/irq.h>
#include "../drivers/scsi/scsi.h"
#include "../drivers/scsi/sd.h"
#include "../drivers/scsi/hosts.h"
#include "simscsi.h"
#define DEBUG_SIMSCSI 1
/* Simulator system calls: */
#define SSC_OPEN 50
#define SSC_CLOSE 51
#define SSC_READ 52
#define SSC_WRITE 53
#define SSC_GET_COMPLETION 54
#define SSC_WAIT_COMPLETION 55
#define SSC_WRITE_ACCESS 2
#define SSC_READ_ACCESS 1
#if DEBUG_SIMSCSI
int simscsi_debug;
# define DBG simscsi_debug
#else
# define DBG 0
#endif
static struct Scsi_Host *host;
static void simscsi_interrupt (unsigned long val);
DECLARE_TASKLET(simscsi_tasklet, simscsi_interrupt, 0);
struct disk_req {
unsigned long addr;
unsigned len;
};
struct disk_stat {
int fd;
unsigned count;
};
extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
static int desc[8] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static struct queue_entry {
Scsi_Cmnd *sc;
} queue[SIMSCSI_REQ_QUEUE_LEN];
static int rd, wr;
static atomic_t num_reqs = ATOMIC_INIT(0);
/* base name for default disks */
static char *simscsi_root = DEFAULT_SIMSCSI_ROOT;
#define MAX_ROOT_LEN 128
/*
* used to setup a new base for disk images
* to use /foo/bar/disk[a-z] as disk images
* you have to specify simscsi=/foo/bar/disk on the command line
*/
static int __init
simscsi_setup (char *s)
{
/* XXX Fix me we may need to strcpy() ? */
if (strlen(s) > MAX_ROOT_LEN) {
printk("simscsi_setup: prefix too long---using default %s\n", simscsi_root);
}
simscsi_root = s;
return 1;
}
__setup("simscsi=", simscsi_setup);
static void
simscsi_interrupt (unsigned long val)
{
Scsi_Cmnd *sc;
while ((sc = queue[rd].sc) != 0) {
atomic_dec(&num_reqs);
queue[rd].sc = 0;
if (DBG)
printk("simscsi_interrupt: done with %ld\n", sc->serial_number);
(*sc->scsi_done)(sc);
rd = (rd + 1) % SIMSCSI_REQ_QUEUE_LEN;
}
}
int
simscsi_detect (Scsi_Host_Template *templ)
{
templ->proc_name = "simscsi";
host = scsi_register(templ, 0);
return 1; /* fake one SCSI host adapter */
}
int
simscsi_release (struct Scsi_Host *host)
{
return 0; /* this is easy... */
}
const char *
simscsi_info (struct Scsi_Host *host)
{
return "simulated SCSI host adapter";
}
int
simscsi_abort (Scsi_Cmnd *cmd)
{
printk ("simscsi_abort: unimplemented\n");
return SCSI_ABORT_SUCCESS;
}
int
simscsi_reset (Scsi_Cmnd *cmd, unsigned int reset_flags)
{
printk ("simscsi_reset: unimplemented\n");
return SCSI_RESET_SUCCESS;
}
int
simscsi_biosparam (Disk *disk, kdev_t n, int ip[])
{
int size = disk->capacity;
ip[0] = 64;
ip[1] = 32;
ip[2] = size >> 11;
return 0;
}
static void
simscsi_readwrite (Scsi_Cmnd *sc, int mode, unsigned long offset, unsigned long len)
{
struct disk_stat stat;
struct disk_req req;
req.addr = __pa(sc->request_buffer);
req.len = len; /* # of bytes to transfer */
if (sc->request_bufflen < req.len)
return;
stat.fd = desc[sc->target];
if (DBG)
printk("simscsi_%s @ %lx (off %lx)\n",
mode == SSC_READ ? "read":"write", req.addr, offset);
ia64_ssc(stat.fd, 1, __pa(&req), offset, mode);
ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION);
if (stat.count == req.len) {
sc->result = GOOD;
} else {
sc->result = DID_ERROR << 16;
}
}
static void
simscsi_sg_readwrite (Scsi_Cmnd *sc, int mode, unsigned long offset)
{
int list_len = sc->use_sg;
struct scatterlist *sl = (struct scatterlist *)sc->buffer;
struct disk_stat stat;
struct disk_req req;
stat.fd = desc[sc->target];
while (list_len) {
req.addr = __pa(page_address(sl->page) + sl->offset);
req.len = sl->length;
if (DBG)
printk("simscsi_sg_%s @ %lx (off %lx) use_sg=%d len=%d\n",
mode == SSC_READ ? "read":"write", req.addr, offset,
list_len, sl->length);
ia64_ssc(stat.fd, 1, __pa(&req), offset, mode);
ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION);
/* should not happen in our case */
if (stat.count != req.len) {
sc->result = DID_ERROR << 16;
return;
}
offset += sl->length;
sl++;
list_len--;
}
sc->result = GOOD;
}
/*
* function handling both READ_6/WRITE_6 (non-scatter/gather mode)
* commands.
* Added 02/26/99 S.Eranian
*/
static void
simscsi_readwrite6 (Scsi_Cmnd *sc, int mode)
{
unsigned long offset;
offset = (((sc->cmnd[1] & 0x1f) << 16) | (sc->cmnd[2] << 8) | sc->cmnd[3])*512;
if (sc->use_sg > 0)
simscsi_sg_readwrite(sc, mode, offset);
else
simscsi_readwrite(sc, mode, offset, sc->cmnd[4]*512);
}
static void
simscsi_readwrite10 (Scsi_Cmnd *sc, int mode)
{
unsigned long offset;
offset = ( (sc->cmnd[2] << 24) | (sc->cmnd[3] << 16)
| (sc->cmnd[4] << 8) | (sc->cmnd[5] << 0))*512;
if (sc->use_sg > 0)
simscsi_sg_readwrite(sc, mode, offset);
else
simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512);
}
int
simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
{
char fname[MAX_ROOT_LEN+16];
char *buf;
#if DEBUG_SIMSCSI
register long sp asm ("sp");
if (DBG)
printk("simscsi_queuecommand: target=%d,cmnd=%u,sc=%lu,sp=%lx,done=%p\n",
sc->target, sc->cmnd[0], sc->serial_number, sp, done);
#endif
sc->result = DID_BAD_TARGET << 16;
sc->scsi_done = done;
if (sc->target <= 7 && sc->lun == 0) {
switch (sc->cmnd[0]) {
case INQUIRY:
if (sc->request_bufflen < 35) {
break;
}
sprintf (fname, "%s%c", simscsi_root, 'a' + sc->target);
desc[sc->target] = ia64_ssc (__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS,
0, 0, SSC_OPEN);
if (desc[sc->target] < 0) {
/* disk doesn't exist... */
break;
}
buf = sc->request_buffer;
buf[0] = 0; /* magnetic disk */
buf[1] = 0; /* not a removable medium */
buf[2] = 2; /* SCSI-2 compliant device */
buf[3] = 2; /* SCSI-2 response data format */
buf[4] = 31; /* additional length (bytes) */
buf[5] = 0; /* reserved */
buf[6] = 0; /* reserved */
buf[7] = 0; /* various flags */
memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28);
sc->result = GOOD;
break;
case TEST_UNIT_READY:
sc->result = GOOD;
break;
case READ_6:
if (desc[sc->target] < 0 )
break;
simscsi_readwrite6(sc, SSC_READ);
break;
case READ_10:
if (desc[sc->target] < 0 )
break;
simscsi_readwrite10(sc, SSC_READ);
break;
case WRITE_6:
if (desc[sc->target] < 0)
break;
simscsi_readwrite6(sc, SSC_WRITE);
break;
case WRITE_10:
if (desc[sc->target] < 0)
break;
simscsi_readwrite10(sc, SSC_WRITE);
break;
case READ_CAPACITY:
if (desc[sc->target] < 0 || sc->request_bufflen < 8) {
break;
}
buf = sc->request_buffer;
/* pretend to be a 1GB disk (partition table contains real stuff): */
buf[0] = 0x00;
buf[1] = 0x1f;
buf[2] = 0xff;
buf[3] = 0xff;
/* set block size of 512 bytes: */
buf[4] = 0;
buf[5] = 0;
buf[6] = 2;
buf[7] = 0;
sc->result = GOOD;
break;
case MODE_SENSE:
printk("MODE_SENSE\n");
break;
case START_STOP:
printk("START_STOP\n");
break;
default:
panic("simscsi: unknown SCSI command %u\n", sc->cmnd[0]);
}
}
if (sc->result == DID_BAD_TARGET) {
sc->result |= DRIVER_SENSE << 24;
sc->sense_buffer[0] = 0x70;
sc->sense_buffer[2] = 0x00;
}
if (atomic_read(&num_reqs) >= SIMSCSI_REQ_QUEUE_LEN) {
panic("Attempt to queue command while command is pending!!");
}
atomic_inc(&num_reqs);
queue[wr].sc = sc;
wr = (wr + 1) % SIMSCSI_REQ_QUEUE_LEN;
tasklet_schedule(&simscsi_tasklet);
return 0;
}
static Scsi_Host_Template driver_template = SIMSCSI;
#define __initcall(fn) late_initcall(fn)
#include "../drivers/scsi/scsi_module.c"
/*
* Simulated SCSI driver.
*
* Copyright (C) 1999 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#ifndef SIMSCSI_H
#define SIMSCSI_H
#define SIMSCSI_REQ_QUEUE_LEN 64
#define DEFAULT_SIMSCSI_ROOT "/var/ski-disks/sd"
extern int simscsi_detect (Scsi_Host_Template *);
extern int simscsi_release (struct Scsi_Host *);
extern const char *simscsi_info (struct Scsi_Host *);
extern int simscsi_queuecommand (Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
extern int simscsi_abort (Scsi_Cmnd *);
extern int simscsi_reset (Scsi_Cmnd *, unsigned int);
extern int simscsi_biosparam (Disk *, kdev_t, int[]);
#define SIMSCSI { \
detect: simscsi_detect, \
release: simscsi_release, \
info: simscsi_info, \
queuecommand: simscsi_queuecommand, \
abort: simscsi_abort, \
reset: simscsi_reset, \
bios_param: simscsi_biosparam, \
can_queue: SIMSCSI_REQ_QUEUE_LEN, \
this_id: -1, \
sg_tablesize: SG_ALL, \
cmd_per_lun: SIMSCSI_REQ_QUEUE_LEN, \
present: 0, \
unchecked_isa_dma: 0, \
use_clustering: DISABLE_CLUSTERING \
}
#endif /* SIMSCSI_H */
This diff is collapsed.
#include <asm/asmmacro.h>
#include <asm/offsets.h>
#include <asm/signal.h>
#include <asm/thread_info.h>
#include "../kernel/minstate.h"
......@@ -87,18 +88,21 @@ END(sys32_sigsuspend)
GLOBAL_ENTRY(ia32_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
/*
* We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
* address of the previously executing task.
*/
br.call.sptk.many rp=ia64_invoke_schedule_tail
.ret1: adds r2=IA64_TASK_PTRACE_OFFSET,r13
.ret1:
#endif
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld8 r2=[r2]
ld4 r2=[r2]
;;
mov r8=0
tbit.nz p6,p0=r2,PT_SYSCALLTRACE_BIT
tbit.nz p6,p0=r2,TIF_SYSCALL_TRACE
(p6) br.cond.spnt .ia32_strace_check_retval
;; // prevent RAW on r8
END(ia32_ret_from_clone)
......
......@@ -20,7 +20,7 @@ ia32_intercept (struct pt_regs *regs, unsigned long isr)
{
switch ((isr >> 16) & 0xff) {
case 0: /* Instruction intercept fault */
case 3: /* Locked Data reference fault */
case 4: /* Locked Data reference fault */
case 1: /* Gate intercept trap */
return -1;
......
......@@ -486,6 +486,7 @@ END(ia64_trace_syscall)
GLOBAL_ENTRY(ia64_ret_from_clone)
PT_REGS_UNWIND_INFO(0)
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
/*
* We need to call schedule_tail() to complete the scheduling process.
* Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
......@@ -493,6 +494,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
*/
br.call.sptk.many rp=ia64_invoke_schedule_tail
.ret8:
#endif
adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 r2=[r2]
......@@ -620,7 +622,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
shr.u r18=r19,16 // get byte size of existing "dirty" partition
;;
mov r16=ar.bsp // get existing backing store pointer
movl r17=PERCPU_ADDR+IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET
movl r17=THIS_CPU(ia64_phys_stacked_size_p8)
;;
ld4 r17=[r17] // r17 = cpu_data->phys_stacked_size_p8
(pKern) br.cond.dpnt skip_rbs_switch
......@@ -756,6 +758,7 @@ ENTRY(handle_syscall_error)
br.cond.sptk ia64_leave_kernel
END(handle_syscall_error)
#ifdef CONFIG_SMP
/*
* Invoke schedule_tail(task) while preserving in0-in7, which may be needed
* in case a system call gets restarted.
......@@ -772,6 +775,8 @@ GLOBAL_ENTRY(ia64_invoke_schedule_tail)
br.ret.sptk.many rp
END(ia64_invoke_schedule_tail)
#endif /* CONFIG_SMP */
#if __GNUC__ < 3
/*
......
......@@ -59,7 +59,7 @@ EXPORT_SYMBOL(clear_page);
#include <asm/processor.h>
# ifndef CONFIG_NUMA
EXPORT_SYMBOL(_cpu_data);
EXPORT_SYMBOL(cpu_info);
# endif
EXPORT_SYMBOL(kernel_thread);
......
......@@ -645,7 +645,6 @@ ENTRY(break_fault)
mov r3=255
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
adds r2=IA64_TASK_PTRACE_OFFSET,r13 // r2 = &current->ptrace
;;
cmp.geu p6,p7=r3,r15 // (syscall > 0 && syscall <= 1024+255) ?
movl r16=sys_call_table
......
This diff is collapsed.
......@@ -193,7 +193,10 @@ ia64_save_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_save_regs(task);
if (local_cpu_data->pfm_syst_wide) pfm_syst_wide_update_task(task, 0);
# ifdef CONFIG_SMP
if (local_cpu_data->pfm_syst_wide)
pfm_syst_wide_update_task(task, 0);
# endif
#endif
if (IS_IA32_PROCESS(ia64_task_regs(task)))
......@@ -210,7 +213,9 @@ ia64_load_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_load_regs(task);
# ifdef CONFIG_SMP
if (local_cpu_data->pfm_syst_wide) pfm_syst_wide_update_task(task, 1);
# endif
#endif
if (IS_IA32_PROCESS(ia64_task_regs(task)))
......
......@@ -54,12 +54,10 @@
extern char _end;
#ifdef CONFIG_NUMA
struct cpuinfo_ia64 *boot_cpu_data;
#else
struct cpuinfo_ia64 _cpu_data[NR_CPUS] __attribute__ ((section ("__special_page_section")));
#endif
unsigned long __per_cpu_offset[NR_CPUS];
struct cpuinfo_ia64 cpu_info __per_cpu_data;
unsigned long ia64_phys_stacked_size_p8;
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
struct screen_info screen_info;
......@@ -511,6 +509,12 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}
void
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
}
/*
* cpu_init() initializes state that is per-CPU. This function acts
* as a 'CPU state barrier', nothing should get across.
......@@ -518,46 +522,21 @@ identify_cpu (struct cpuinfo_ia64 *c)
void
cpu_init (void)
{
extern char __per_cpu_start[], __phys_per_cpu_start[], __per_cpu_end[];
extern void __init ia64_mmu_init (void *);
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
unsigned int max_ctx;
struct cpuinfo_ia64 *my_cpu_data;
#ifdef CONFIG_NUMA
int cpu, order;
struct cpuinfo_ia64 *my_cpu_info;
void *my_cpu_data;
int cpu = smp_processor_id();
/*
* If NUMA is configured, the cpu_data array is not preallocated. The boot cpu
* allocates entries for every possible cpu. As the remaining cpus come online,
* they reallocate a new cpu_data structure on their local node. This extra work
* is required because some boot code references all cpu_data structures
* before the cpus are actually started.
*/
if (!boot_cpu_data) {
my_cpu_data = alloc_bootmem_pages_node(NODE_DATA(numa_node_id()),
sizeof(struct cpuinfo_ia64));
boot_cpu_data = my_cpu_data;
my_cpu_data->cpu_data[0] = my_cpu_data;
for (cpu = 1; cpu < NR_CPUS; ++cpu)
my_cpu_data->cpu_data[cpu]
= alloc_bootmem_pages_node(NODE_DATA(numa_node_id()),
sizeof(struct cpuinfo_ia64));
for (cpu = 1; cpu < NR_CPUS; ++cpu)
memcpy(my_cpu_data->cpu_data[cpu]->cpu_data,
my_cpu_data->cpu_data, sizeof(my_cpu_data->cpu_data));
} else {
order = get_order(sizeof(struct cpuinfo_ia64));
my_cpu_data = page_address(alloc_pages_node(numa_node_id(), GFP_KERNEL, order));
memcpy(my_cpu_data, boot_cpu_data->cpu_data[smp_processor_id()],
sizeof(struct cpuinfo_ia64));
__free_pages(virt_to_page(boot_cpu_data->cpu_data[smp_processor_id()]),
order);
for (cpu = 0; cpu < NR_CPUS; ++cpu)
boot_cpu_data->cpu_data[cpu]->cpu_data[smp_processor_id()] = my_cpu_data;
}
#else
my_cpu_data = cpu_data(smp_processor_id());
#endif
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start);
memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start;
my_cpu_info = my_cpu_data + ((char *) &cpu_info - __per_cpu_start);
/*
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called
......@@ -565,7 +544,7 @@ cpu_init (void)
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() the old way, through identity mapped space.
*/
identify_cpu(my_cpu_data);
identify_cpu(my_cpu_info);
/* Clear the stack memory reserved for pt_regs: */
memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
......@@ -626,7 +605,7 @@ cpu_init (void)
printk ("cpu_init: PAL RSE info failed, assuming 96 physical stacked regs\n");
num_phys_stacked = 96;
}
local_cpu_data->phys_stacked_size_p8 = num_phys_stacked*8 + 8;
/* size of physical stacked register partition plus 8 bytes: */
ia64_phys_stacked_size_p8 = num_phys_stacked*8 + 8;
platform_cpu_init();
}
......@@ -75,12 +75,11 @@ struct call_data_struct {
static volatile struct call_data_struct *call_data;
static spinlock_t migration_lock = SPIN_LOCK_UNLOCKED;
static task_t *migrating_task;
#define IPI_CALL_FUNC 0
#define IPI_CPU_STOP 1
#define IPI_MIGRATE_TASK 2
/* This needs to be cacheline aligned because it is written to by *other* CPUs. */
static __u64 ipi_operation __per_cpu_data ____cacheline_aligned;
static void
stop_this_cpu (void)
......@@ -99,7 +98,7 @@ void
handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
{
int this_cpu = smp_processor_id();
unsigned long *pending_ipis = &local_cpu_data->ipi_operation;
unsigned long *pending_ipis = &this_cpu(ipi_operation);
unsigned long ops;
/* Count this now; we may make a call that never returns. */
......@@ -143,14 +142,6 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
}
break;
case IPI_MIGRATE_TASK:
{
task_t *p = migrating_task;
spin_unlock(&migration_lock);
sched_task_migrated(p);
}
break;
case IPI_CPU_STOP:
stop_this_cpu();
break;
......@@ -167,7 +158,7 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
static inline void
send_IPI_single (int dest_cpu, int op)
{
set_bit(op, &cpu_data(dest_cpu)->ipi_operation);
set_bit(op, &per_cpu(ipi_operation, dest_cpu));
platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
}
......@@ -350,15 +341,6 @@ smp_send_stop (void)
smp_num_cpus = 1;
}
void
smp_migrate_task (int cpu, task_t *p)
{
/* The target CPU will unlock the migration spinlock: */
spin_lock(&migration_lock);
migrating_task = p;
send_IPI_single(cpu, IPI_MIGRATE_TASK);
}
int __init
setup_profiling_timer (unsigned int multiplier)
{
......
......@@ -1305,11 +1305,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
* handler into reading an arbitrary kernel addresses...
*/
if (!user_mode(regs)) {
#ifdef GAS_HAS_LOCAL_TAGS
fix = search_exception_table(regs->cr_iip + ia64_psr(regs)->ri);
#else
fix = search_exception_table(regs->cr_iip);
#endif
fix = SEARCH_EXCEPTION_TABLE(regs);
}
if (user_mode(regs) || fix.cont) {
if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
......
......@@ -18,7 +18,7 @@
# define PREFETCH_LINES 9 // magic number
#else
# define L3_LINE_SIZE 128 // McKinley L3 line size
# define PREFETCH_LINES 7 // magic number
# define PREFETCH_LINES 12 // magic number
#endif
#define saved_lc r2
......
......@@ -49,7 +49,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
int signal = SIGSEGV, code = SEGV_MAPERR;
struct vm_area_struct *vma, *prev_vma;
struct mm_struct *mm = current->mm;
struct exception_fixup fix;
struct siginfo si;
unsigned long mask;
......@@ -167,15 +166,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
return;
}
#ifdef GAS_HAS_LOCAL_TAGS
fix = search_exception_table(regs->cr_iip + ia64_psr(regs)->ri);
#else
fix = search_exception_table(regs->cr_iip);
#endif
if (fix.cont) {
handle_exception(regs, fix);
if (done_with_exception(regs))
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to terminate things
......
......@@ -103,12 +103,12 @@ free_initmem (void)
free_page(addr);
++totalram_pages;
}
printk ("Freeing unused kernel memory: %ldkB freed\n",
(&__init_end - &__init_begin) >> 10);
printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
(&__init_end - &__init_begin) >> 10);
}
void
free_initrd_mem(unsigned long start, unsigned long end)
free_initrd_mem (unsigned long start, unsigned long end)
{
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
......@@ -145,7 +145,7 @@ free_initrd_mem(unsigned long start, unsigned long end)
end = end & PAGE_MASK;
if (start < end)
printk ("Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
if (!VALID_PAGE(virt_to_page(start)))
......
......@@ -7,12 +7,6 @@ BEGIN {
print " * This file was generated by arch/ia64/tools/print_offsets.awk."
print " *"
print " */"
#
# This is a cheesy hack. Make sure that
# PT_PTRACED == 1<<PT_PTRACED_BIT.
#
print "#define PT_PTRACED_BIT 0"
print "#define PT_SYSCALLTRACE_BIT 1"
}
# look for .tab:
......
......@@ -52,14 +52,7 @@ tab[] =
{ "SIGFRAME_SIZE", sizeof (struct sigframe) },
{ "UNW_FRAME_INFO_SIZE", sizeof (struct unw_frame_info) },
{ "", 0 }, /* spacer */
{ "IA64_TASK_PTRACE_OFFSET", offsetof (struct task_struct, ptrace) },
{ "IA64_TASK_THREAD_OFFSET", offsetof (struct task_struct, thread) },
{ "IA64_TASK_THREAD_KSP_OFFSET", offsetof (struct task_struct, thread.ksp) },
#ifdef CONFIG_PERFMON
{ "IA64_TASK_PFM_OVFL_BLOCK_RESET_OFFSET",offsetof(struct task_struct, thread.pfm_ovfl_block_reset) },
#endif
{ "IA64_TASK_PID_OFFSET", offsetof (struct task_struct, pid) },
{ "IA64_TASK_MM_OFFSET", offsetof (struct task_struct, mm) },
{ "IA64_PT_REGS_CR_IPSR_OFFSET", offsetof (struct pt_regs, cr_ipsr) },
{ "IA64_PT_REGS_CR_IIP_OFFSET", offsetof (struct pt_regs, cr_iip) },
{ "IA64_PT_REGS_CR_IFS_OFFSET", offsetof (struct pt_regs, cr_ifs) },
......@@ -169,9 +162,6 @@ tab[] =
{ "IA64_SIGFRAME_SIGCONTEXT_OFFSET", offsetof (struct sigframe, sc) },
{ "IA64_CLONE_VFORK", CLONE_VFORK },
{ "IA64_CLONE_VM", CLONE_VM },
{ "IA64_CPU_IRQ_COUNT_OFFSET", offsetof (struct cpuinfo_ia64, irq_stat.f.irq_count) },
{ "IA64_CPU_BH_COUNT_OFFSET", offsetof (struct cpuinfo_ia64, irq_stat.f.bh_count) },
{ "IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET",offsetof (struct cpuinfo_ia64, phys_stacked_size_p8)},
};
static const char *tabs = "\t\t\t\t\t\t\t\t\t\t";
......@@ -189,16 +179,6 @@ main (int argc, char **argv)
printf ("/*\n * DO NOT MODIFY\n *\n * This file was generated by "
"arch/ia64/tools/print_offsets.\n *\n */\n\n");
/* This is stretching things a bit, but entry.S needs the bit number
for PT_PTRACED and it can't include <linux/sched.h> so this seems
like a reasonably solution. At least the code won't break in
subtle ways should PT_PTRACED ever change. Ditto for
PT_TRACESYS_BIT. */
printf ("#define PT_PTRACED_BIT\t\t\t%u\n", ffs (PT_PTRACED) - 1);
#if 0
printf ("#define PT_SYSCALLTRACE_BIT\t\t\t%u\n\n", ffs (PT_SYSCALLTRACE) - 1);
#endif
for (i = 0; i < sizeof (tab) / sizeof (tab[0]); ++i)
{
if (tab[i].name[0] == '\0')
......
#include <linux/config.h>
#include <asm/cache.h>
#include <asm/ptrace.h>
#include <asm/system.h>
......@@ -70,11 +71,6 @@ SECTIONS
{ *(__ksymtab) }
__stop___ksymtab = .;
__start___kallsyms = .; /* All kernel symbols for debugging */
__kallsyms : AT(ADDR(__kallsyms) - PAGE_OFFSET)
{ *(__kallsyms) }
__stop___kallsyms = .;
/* Unwind info & table: */
. = ALIGN(8);
.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET)
......@@ -124,10 +120,7 @@ SECTIONS
.data.init_task : AT(ADDR(.data.init_task) - PAGE_OFFSET)
{ *(.data.init_task) }
.data.page_aligned : AT(ADDR(.data.page_aligned) - PAGE_OFFSET)
{ *(.data.idt) }
. = ALIGN(64);
. = ALIGN(SMP_CACHE_BYTES);
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - PAGE_OFFSET)
{ *(.data.cacheline_aligned) }
......@@ -135,6 +128,17 @@ SECTIONS
.kstrtab : AT(ADDR(.kstrtab) - PAGE_OFFSET)
{ *(.kstrtab) }
/* Per-cpu data: */
. = ALIGN(PAGE_SIZE);
__phys_per_cpu_start = .;
.data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - PAGE_OFFSET)
{
__per_cpu_start = .;
*(.data.percpu)
__per_cpu_end = .;
}
. = __phys_per_cpu_start + 4096; /* ensure percpu fits into smallest page size (4KB) */
.data : AT(ADDR(.data) - PAGE_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
......@@ -151,7 +155,7 @@ SECTIONS
{ *(.sbss) *(.scommon) }
.bss : AT(ADDR(.bss) - PAGE_OFFSET)
{ *(.bss) *(COMMON) }
. = ALIGN(64 / 8);
_end = .;
/* Stabs debugging sections. */
......
......@@ -9,6 +9,8 @@
* scheduler patch
*/
#include <linux/types.h>
#include <asm/system.h>
/**
......@@ -97,7 +99,7 @@ clear_bit (int nr, volatile void *addr)
static __inline__ void
__clear_bit (int nr, volatile void *addr)
{
volatile __u32 *p = (__u32 *) addr + (nr >> 5);;
volatile __u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
*p &= ~m;
}
......
......@@ -5,7 +5,7 @@
/*
* Copyright (C) 1998-2000 Hewlett-Packard Co
* Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
/* Bytes per L1 (data) cache line. */
......
......@@ -2,8 +2,8 @@
#define _ASM_IA64_HARDIRQ_H
/*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1998-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
......
......@@ -9,6 +9,7 @@
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */
#define MAP_SHARED 0x01 /* Share changes */
......
......@@ -6,23 +6,16 @@
* This file was generated by arch/ia64/tools/print_offsets.awk.
*
*/
#define PT_PTRACED_BIT 0
#define PT_SYSCALLTRACE_BIT 1
#define IA64_TASK_SIZE 3936 /* 0xf60 */
#define IA64_THREAD_INFO_SIZE 24 /* 0x18 */
#define IA64_PT_REGS_SIZE 400 /* 0x190 */
#define IA64_SWITCH_STACK_SIZE 560 /* 0x230 */
#define IA64_SIGINFO_SIZE 128 /* 0x80 */
#define IA64_CPU_SIZE 16384 /* 0x4000 */
#define IA64_CPU_SIZE 224 /* 0xe0 */
#define SIGFRAME_SIZE 2816 /* 0xb00 */
#define UNW_FRAME_INFO_SIZE 448 /* 0x1c0 */
#define IA64_TASK_PTRACE_OFFSET 32 /* 0x20 */
#define IA64_TASK_THREAD_OFFSET 1472 /* 0x5c0 */
#define IA64_TASK_THREAD_KSP_OFFSET 1480 /* 0x5c8 */
#define IA64_TASK_PFM_OVFL_BLOCK_RESET_OFFSET 2096 /* 0x830 */
#define IA64_TASK_PID_OFFSET 212 /* 0xd4 */
#define IA64_TASK_MM_OFFSET 136 /* 0x88 */
#define IA64_PT_REGS_CR_IPSR_OFFSET 0 /* 0x0 */
#define IA64_PT_REGS_CR_IIP_OFFSET 8 /* 0x8 */
#define IA64_PT_REGS_CR_IFS_OFFSET 16 /* 0x10 */
......@@ -132,8 +125,5 @@
#define IA64_SIGFRAME_SIGCONTEXT_OFFSET 160 /* 0xa0 */
#define IA64_CLONE_VFORK 16384 /* 0x4000 */
#define IA64_CLONE_VM 256 /* 0x100 */
#define IA64_CPU_IRQ_COUNT_OFFSET 0 /* 0x0 */
#define IA64_CPU_BH_COUNT_OFFSET 4 /* 0x4 */
#define IA64_CPU_PHYS_STACKED_SIZE_P8_OFFSET 12 /* 0xc */
#endif /* _ASM_IA64_OFFSETS_H */
......@@ -15,6 +15,8 @@
#include <linux/config.h>
#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/kregs.h>
#include <asm/system.h>
......@@ -184,6 +186,10 @@
*/
#define IA64_USEC_PER_CYC_SHIFT 41
#define __HAVE_ARCH_PER_CPU
#define THIS_CPU(var) (var)
#ifndef __ASSEMBLY__
#include <linux/threads.h>
......@@ -196,6 +202,11 @@
#include <asm/unwind.h>
#include <asm/atomic.h>
extern unsigned long __per_cpu_offset[NR_CPUS];
#define per_cpu(var, cpu) (*(__typeof__(&(var))) ((void *) &(var) + __per_cpu_offset[cpu]))
#define this_cpu(var) (var)
/* like above but expressed as bitfields for more efficient access: */
struct ia64_psr {
__u64 reserved0 : 1;
......@@ -239,7 +250,7 @@ struct ia64_psr {
* CPU type, hardware bug flags, and per-CPU state. Frequently used
* state comes earlier:
*/
struct cpuinfo_ia64 {
extern struct cpuinfo_ia64 {
/* irq_stat must be 64-bit aligned */
union {
struct {
......@@ -249,7 +260,6 @@ struct cpuinfo_ia64 {
__u64 irq_and_bh_counts;
} irq_stat;
__u32 softirq_pending;
__u32 phys_stacked_size_p8; /* size of physical stacked registers + 8 */
__u64 itm_delta; /* # of clock cycles between clock ticks */
__u64 itm_next; /* interval timer mask value to use for next clock tick */
__u64 *pgd_quick;
......@@ -282,41 +292,15 @@ struct cpuinfo_ia64 {
__u64 prof_multiplier;
__u32 pfm_syst_wide;
__u32 pfm_dcr_pp;
/* this is written to by *other* CPUs: */
__u64 ipi_operation ____cacheline_aligned;
#endif
#ifdef CONFIG_NUMA
void *node_directory;
int numa_node_id;
struct cpuinfo_ia64 *cpu_data[NR_CPUS];
#endif
/* Platform specific word. MUST BE LAST IN STRUCT */
__u64 platform_specific;
} __attribute__ ((aligned (PAGE_SIZE))) ;
} cpu_info __per_cpu_data;
/*
* The "local" data pointer. It points to the per-CPU data of the currently executing
* CPU, much like "current" points to the per-task data of the currently executing task.
*/
#define local_cpu_data ((struct cpuinfo_ia64 *) PERCPU_ADDR)
/*
* On NUMA systems, cpu_data for each cpu is allocated during cpu_init() & is allocated on
* the node that contains the cpu. This minimizes off-node memory references. cpu_data
* for each cpu contains an array of pointers to the cpu_data structures of each of the
* other cpus.
*
* On non-NUMA systems, cpu_data is a static array allocated at compile time. References
* to the cpu_data of another cpu is done by direct references to the appropriate entry of
* the array.
*/
#ifdef CONFIG_NUMA
# define cpu_data(cpu) local_cpu_data->cpu_data[cpu]
# define numa_node_id() (local_cpu_data->numa_node_id)
#else
extern struct cpuinfo_ia64 _cpu_data[NR_CPUS];
# define cpu_data(cpu) (&_cpu_data[cpu])
#endif
#define local_cpu_data (&this_cpu(cpu_info))
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
extern void identify_cpu (struct cpuinfo_ia64 *);
extern void print_cpu_info (struct cpuinfo_ia64 *);
......
......@@ -136,6 +136,7 @@ typedef struct siginfo {
#define SI_ASYNCIO (-4) /* sent by AIO completion */
#define SI_SIGIO (-5) /* sent by queued SIGIO */
#define SI_TKILL (-6) /* sent by tkill system call */
#define SI_DETHREAD (-7) /* sent by execve() killing subsidiary threads */
#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
......
......@@ -12,30 +12,36 @@
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
#ifdef CONFIG_SMP
# define kernel_locked() spin_is_locked(&kernel_flag)
# define check_irq_holder(cpu) \
do { \
if (global_irq_holder == (cpu)) \
BUG(); \
} while (0)
#else
# define kernel_locked() (1)
#endif
/*
* Release global kernel lock and global interrupt lock
*/
static __inline__ void
release_kernel_lock(struct task_struct *task, int cpu)
{
if (unlikely(task->lock_depth >= 0)) {
spin_unlock(&kernel_flag);
if (global_irq_holder == (cpu)) \
BUG(); \
}
}
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
spin_unlock(&kernel_flag); \
check_irq_holder(cpu); \
} \
} while (0)
/*
* Re-acquire the kernel lock
*/
static __inline__ void
reacquire_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
spin_lock(&kernel_flag);
}
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
......
......@@ -397,13 +397,17 @@ extern void ia64_load_extra (struct task_struct *task);
} while (0)
#ifdef CONFIG_SMP
/*
* In the SMP case, we save the fph state when context-switching
* away from a thread that modified fph. This way, when the thread
* gets scheduled on another CPU, the CPU can pick up the state from
* task->thread.fph, avoiding the complication of having to fetch
* the latest fph state from another CPU.
*/
/* Return true if this CPU can call the console drivers in printk() */
#define arch_consoles_callable() (cpu_online_map & (1UL << smp_processor_id()))
/*
* In the SMP case, we save the fph state when context-switching
* away from a thread that modified fph. This way, when the thread
* gets scheduled on another CPU, the CPU can pick up the state from
* task->thread.fph, avoiding the complication of having to fetch
* the latest fph state from another CPU.
*/
# define switch_to(prev,next) do { \
if (ia64_psr(ia64_task_regs(prev))->mfh) { \
ia64_psr(ia64_task_regs(prev))->mfh = 0; \
......
......@@ -320,4 +320,22 @@ struct exception_fixup {
extern struct exception_fixup search_exception_table (unsigned long addr);
extern void handle_exception (struct pt_regs *regs, struct exception_fixup fixup);
#ifdef GAS_HAS_LOCAL_TAGS
#define SEARCH_EXCEPTION_TABLE(regs) search_exception_table(regs->cr_iip + ia64_psr(regs)->ri);
#else
#define SEARCH_EXCEPTION_TABLE(regs) search_exception_table(regs->cr_iip);
#endif
static inline int
done_with_exception (struct pt_regs *regs)
{
struct exception_fixup fix;
fix = SEARCH_EXCEPTION_TABLE(regs);
if (fix.cont) {
handle_exception(regs, fix);
return 1;
}
return 0;
}
#endif /* _ASM_IA64_UACCESS_H */
......@@ -1452,8 +1452,10 @@ void __init init_idle(task_t *idle, int cpu)
set_tsk_need_resched(idle);
__restore_flags(flags);
#ifdef CONFIG_PREEMPT
/* Set the preempt count _outside_ the spinlocks! */
idle->thread_info->preempt_count = (idle->lock_depth >= 0);
#endif
}
extern void init_timervecs(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment