Commit b068ec41 authored by Andi Kleen's avatar Andi Kleen Committed by Linus Torvalds

[PATCH] x86-64 merge

x86_64 core updates.

 - Make it compile again (switch_to macros etc., add dummy suspend.h)
 - reenable strength reduce optimization
 - Fix ramdisk (patch from Mikael Pettersson)
 - Some merges from i386
 - Reimplement lazy iobitmap allocation.  I reimplemented it based
   on bcrl's idea.
 - Fix IPC 32bit emulation to actually work and move into own file
 - New fixed mtrr.c from DaveJ ported from 2.4 and reenable it.
 - Move tlbstate into PDA.
 - Add some changes that got lost during the last merge.
 - new memset that seems to actually work.
 - Align signal handler stack frames to 16 bytes.
 - Some more minor bugfixes.
parent 9343c8e2
......@@ -43,15 +43,9 @@ CFLAGS += -mcmodel=kernel
CFLAGS += -pipe
# this makes reading assembly source easier
CFLAGS += -fno-reorder-blocks
# needed for later gcc 3.1
CFLAGS += -finline-limit=2000
# needed for earlier gcc 3.1
#CFLAGS += -fno-strength-reduce
#CFLAGS += -g
# prevent gcc from keeping the stack 16 byte aligned (FIXME)
#CFLAGS += -mpreferred-stack-boundary=2
HEAD := arch/x86_64/kernel/head.o arch/x86_64/kernel/head64.o arch/x86_64/kernel/init_task.o
SUBDIRS := arch/x86_64/tools $(SUBDIRS) arch/x86_64/kernel arch/x86_64/mm arch/x86_64/lib
......
......@@ -21,10 +21,6 @@ ROOT_DEV := CURRENT
SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
# If you want the RAM disk device, define this to be the size in blocks.
RAMDISK := -DRAMDISK=512
# ---------------------------------------------------------------------------
BOOT_INCL = $(TOPDIR)/include/linux/config.h \
......
......@@ -47,8 +47,7 @@ define_bool CONFIG_EISA n
define_bool CONFIG_X86_IO_APIC y
define_bool CONFIG_X86_LOCAL_APIC y
#currently broken:
#bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
bool 'Symmetric multi-processing support' CONFIG_SMP
if [ "$CONFIG_SMP" = "n" ]; then
bool 'Preemptible Kernel' CONFIG_PREEMPT
......@@ -226,6 +225,7 @@ if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
bool ' Additional run-time checks' CONFIG_CHECKING
bool ' Debug __init statements' CONFIG_INIT_DEBUG
bool ' Spinlock debugging' CONFIG_DEBUG_SPINLOCK
fi
endmenu
......
......@@ -9,8 +9,9 @@ export-objs := ia32_ioctl.o sys_ia32.o
all: ia32.o
O_TARGET := ia32.o
obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_ioctl.o ia32_signal.o \
ia32_binfmt.o fpu32.o socket32.o ptrace32.o
obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_ioctl.o \
ia32_signal.o \
ia32_binfmt.o fpu32.o socket32.o ptrace32.o ipc32.o
clean::
......
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/mm.h>
#include <linux/shm.h>
#include <linux/slab.h>
#include <linux/ipc.h>
#include <asm/mman.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#include <asm/ipc.h>
#include <asm/ia32.h>
/*
* sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation..
*
* This is really horribly ugly.
*/
struct msgbuf32 {
s32 mtype;
char mtext[1];
};
struct ipc_perm32 {
int key;
__kernel_uid_t32 uid;
__kernel_gid_t32 gid;
__kernel_uid_t32 cuid;
__kernel_gid_t32 cgid;
unsigned short mode;
unsigned short seq;
};
struct ipc64_perm32 {
unsigned key;
__kernel_uid32_t32 uid;
__kernel_gid32_t32 gid;
__kernel_uid32_t32 cuid;
__kernel_gid32_t32 cgid;
unsigned short mode;
unsigned short __pad1;
unsigned short seq;
unsigned short __pad2;
unsigned int unused1;
unsigned int unused2;
};
struct semid_ds32 {
struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
__kernel_time_t32 sem_otime; /* last semop time */
__kernel_time_t32 sem_ctime; /* last change time */
u32 sem_base; /* ptr to first semaphore in array */
u32 sem_pending; /* pending operations to be processed */
u32 sem_pending_last; /* last pending operation */
u32 undo; /* undo requests on this array */
unsigned short sem_nsems; /* no. of semaphores in array */
};
struct semid64_ds32 {
struct ipc64_perm32 sem_perm;
__kernel_time_t32 sem_otime;
unsigned int __unused1;
__kernel_time_t32 sem_ctime;
unsigned int __unused2;
unsigned int sem_nsems;
unsigned int __unused3;
unsigned int __unused4;
};
struct msqid_ds32 {
struct ipc_perm32 msg_perm;
u32 msg_first;
u32 msg_last;
__kernel_time_t32 msg_stime;
__kernel_time_t32 msg_rtime;
__kernel_time_t32 msg_ctime;
u32 wwait;
u32 rwait;
unsigned short msg_cbytes;
unsigned short msg_qnum;
unsigned short msg_qbytes;
__kernel_ipc_pid_t32 msg_lspid;
__kernel_ipc_pid_t32 msg_lrpid;
};
struct msqid64_ds32 {
struct ipc64_perm32 msg_perm;
__kernel_time_t32 msg_stime;
unsigned int __unused1;
__kernel_time_t32 msg_rtime;
unsigned int __unused2;
__kernel_time_t32 msg_ctime;
unsigned int __unused3;
unsigned int msg_cbytes;
unsigned int msg_qnum;
unsigned int msg_qbytes;
__kernel_pid_t32 msg_lspid;
__kernel_pid_t32 msg_lrpid;
unsigned int __unused4;
unsigned int __unused5;
};
struct shmid_ds32 {
struct ipc_perm32 shm_perm;
int shm_segsz;
__kernel_time_t32 shm_atime;
__kernel_time_t32 shm_dtime;
__kernel_time_t32 shm_ctime;
__kernel_ipc_pid_t32 shm_cpid;
__kernel_ipc_pid_t32 shm_lpid;
unsigned short shm_nattch;
};
struct shmid64_ds32 {
struct ipc64_perm32 shm_perm;
__kernel_size_t32 shm_segsz;
__kernel_time_t32 shm_atime;
unsigned int __unused1;
__kernel_time_t32 shm_dtime;
unsigned int __unused2;
__kernel_time_t32 shm_ctime;
unsigned int __unused3;
__kernel_pid_t32 shm_cpid;
__kernel_pid_t32 shm_lpid;
unsigned int shm_nattch;
unsigned int __unused4;
unsigned int __unused5;
};
struct shminfo64_32 {
unsigned int shmmax;
unsigned int shmmin;
unsigned int shmmni;
unsigned int shmseg;
unsigned int shmall;
unsigned int __unused1;
unsigned int __unused2;
unsigned int __unused3;
unsigned int __unused4;
};
struct shm_info32 {
int used_ids;
u32 shm_tot, shm_rss, shm_swp;
u32 swap_attempts, swap_successes;
};
struct ipc_kludge {
struct msgbuf *msgp;
int msgtyp;
};
#define A(__x) ((unsigned long)(__x))
#define AA(__x) ((unsigned long)(__x))
#define SEMOP 1
#define SEMGET 2
#define SEMCTL 3
#define MSGSND 11
#define MSGRCV 12
#define MSGGET 13
#define MSGCTL 14
#define SHMAT 21
#define SHMDT 22
#define SHMGET 23
#define SHMCTL 24
#define IPCOP_MASK(__x) (1UL << (__x))
static int
ipc_parse_version32 (int *cmd)
{
if (*cmd & IPC_64) {
*cmd ^= IPC_64;
return IPC_64;
} else {
return IPC_OLD;
}
}
static int
semctl32 (int first, int second, int third, void *uptr)
{
union semun fourth;
u32 pad;
int err = 0, err2;
struct semid64_ds s;
mm_segment_t old_fs;
int version = ipc_parse_version32(&third);
if (!uptr)
return -EINVAL;
if (get_user(pad, (u32 *)uptr))
return -EFAULT;
if (third == SETVAL)
fourth.val = (int)pad;
else
fourth.__pad = (void *)A(pad);
switch (third) {
case IPC_INFO:
case IPC_RMID:
case IPC_SET:
case SEM_INFO:
case GETVAL:
case GETPID:
case GETNCNT:
case GETZCNT:
case GETALL:
case SETVAL:
case SETALL:
err = sys_semctl(first, second, third, fourth);
break;
case IPC_STAT:
case SEM_STAT:
fourth.__pad = &s;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_semctl(first, second|IPC_64, third, fourth);
set_fs(old_fs);
if (version == IPC_64) {
struct semid64_ds32 *usp64 = (struct semid64_ds32 *) A(pad);
if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
err = -EFAULT;
break;
}
err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
} else {
struct semid_ds32 *usp32 = (struct semid_ds32 *) A(pad);
if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
err = -EFAULT;
break;
}
err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
}
if (err2)
err = -EFAULT;
break;
}
return err;
}
static int
do_sys32_msgsnd (int first, int second, int third, void *uptr)
{
struct msgbuf *p = kmalloc(second + sizeof(struct msgbuf) + 4, GFP_USER);
struct msgbuf32 *up = (struct msgbuf32 *)uptr;
mm_segment_t old_fs;
int err;
if (!p)
return -ENOMEM;
err = get_user(p->mtype, &up->mtype);
err |= copy_from_user(p->mtext, &up->mtext, second);
if (err)
goto out;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_msgsnd(first, p, second, third);
set_fs(old_fs);
out:
kfree(p);
return err;
}
static int
do_sys32_msgrcv (int first, int second, int msgtyp, int third, int version, void *uptr)
{
struct msgbuf32 *up;
struct msgbuf *p;
mm_segment_t old_fs;
int err;
if (!version) {
struct ipc_kludge *uipck = (struct ipc_kludge *)uptr;
struct ipc_kludge ipck;
err = -EINVAL;
if (!uptr)
goto out;
err = -EFAULT;
if (copy_from_user(&ipck, uipck, sizeof(struct ipc_kludge)))
goto out;
uptr = (void *)A(ipck.msgp);
msgtyp = ipck.msgtyp;
}
err = -ENOMEM;
p = kmalloc(second + sizeof(struct msgbuf) + 4, GFP_USER);
if (!p)
goto out;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_msgrcv(first, p, second + 4, msgtyp, third);
set_fs(old_fs);
if (err < 0)
goto free_then_out;
up = (struct msgbuf32 *)uptr;
if (put_user(p->mtype, &up->mtype) || copy_to_user(&up->mtext, p->mtext, err))
err = -EFAULT;
free_then_out:
kfree(p);
out:
return err;
}
static int
msgctl32 (int first, int second, void *uptr)
{
int err = -EINVAL, err2;
struct msqid_ds m;
struct msqid64_ds m64;
struct msqid_ds32 *up32 = (struct msqid_ds32 *)uptr;
struct msqid64_ds32 *up64 = (struct msqid64_ds32 *)uptr;
mm_segment_t old_fs;
int version = ipc_parse_version32(&second);
switch (second) {
case IPC_INFO:
case IPC_RMID:
case MSG_INFO:
err = sys_msgctl(first, second, (struct msqid_ds *)uptr);
break;
case IPC_SET:
if (version == IPC_64) {
err = get_user(m.msg_perm.uid, &up64->msg_perm.uid);
err |= get_user(m.msg_perm.gid, &up64->msg_perm.gid);
err |= get_user(m.msg_perm.mode, &up64->msg_perm.mode);
err |= get_user(m.msg_qbytes, &up64->msg_qbytes);
} else {
err = get_user(m.msg_perm.uid, &up32->msg_perm.uid);
err |= get_user(m.msg_perm.gid, &up32->msg_perm.gid);
err |= get_user(m.msg_perm.mode, &up32->msg_perm.mode);
err |= get_user(m.msg_qbytes, &up32->msg_qbytes);
}
if (err)
break;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_msgctl(first, second, &m);
set_fs(old_fs);
break;
case IPC_STAT:
case MSG_STAT:
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_msgctl(first, second|IPC_64, (void *) &m64);
set_fs(old_fs);
if (version == IPC_64) {
if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
err = -EFAULT;
break;
}
err2 = __put_user(m64.msg_perm.key, &up64->msg_perm.key);
err2 |= __put_user(m64.msg_perm.uid, &up64->msg_perm.uid);
err2 |= __put_user(m64.msg_perm.gid, &up64->msg_perm.gid);
err2 |= __put_user(m64.msg_perm.cuid, &up64->msg_perm.cuid);
err2 |= __put_user(m64.msg_perm.cgid, &up64->msg_perm.cgid);
err2 |= __put_user(m64.msg_perm.mode, &up64->msg_perm.mode);
err2 |= __put_user(m64.msg_perm.seq, &up64->msg_perm.seq);
err2 |= __put_user(m64.msg_stime, &up64->msg_stime);
err2 |= __put_user(m64.msg_rtime, &up64->msg_rtime);
err2 |= __put_user(m64.msg_ctime, &up64->msg_ctime);
err2 |= __put_user(m64.msg_cbytes, &up64->msg_cbytes);
err2 |= __put_user(m64.msg_qnum, &up64->msg_qnum);
err2 |= __put_user(m64.msg_qbytes, &up64->msg_qbytes);
err2 |= __put_user(m64.msg_lspid, &up64->msg_lspid);
err2 |= __put_user(m64.msg_lrpid, &up64->msg_lrpid);
if (err2)
err = -EFAULT;
} else {
if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
err = -EFAULT;
break;
}
err2 = __put_user(m64.msg_perm.key, &up32->msg_perm.key);
err2 |= __put_user(m64.msg_perm.uid, &up32->msg_perm.uid);
err2 |= __put_user(m64.msg_perm.gid, &up32->msg_perm.gid);
err2 |= __put_user(m64.msg_perm.cuid, &up32->msg_perm.cuid);
err2 |= __put_user(m64.msg_perm.cgid, &up32->msg_perm.cgid);
err2 |= __put_user(m64.msg_perm.mode, &up32->msg_perm.mode);
err2 |= __put_user(m64.msg_perm.seq, &up32->msg_perm.seq);
err2 |= __put_user(m64.msg_stime, &up32->msg_stime);
err2 |= __put_user(m64.msg_rtime, &up32->msg_rtime);
err2 |= __put_user(m64.msg_ctime, &up32->msg_ctime);
err2 |= __put_user(m64.msg_cbytes, &up32->msg_cbytes);
err2 |= __put_user(m64.msg_qnum, &up32->msg_qnum);
err2 |= __put_user(m64.msg_qbytes, &up32->msg_qbytes);
err2 |= __put_user(m64.msg_lspid, &up32->msg_lspid);
err2 |= __put_user(m64.msg_lrpid, &up32->msg_lrpid);
if (err2)
err = -EFAULT;
}
break;
}
return err;
}
static int
shmat32 (int first, int second, int third, int version, void *uptr)
{
unsigned long raddr;
u32 *uaddr = (u32 *)A((u32)third);
int err;
if (version == 1)
return -EINVAL; /* iBCS2 emulator entry point: unsupported */
err = sys_shmat(first, uptr, second, &raddr);
if (err)
return err;
return put_user(raddr, uaddr);
}
static int put_shmid64(struct shmid64_ds *s64p, void *uptr, int version)
{
int err2;
#define s64 (*s64p)
if (version == IPC_64) {
struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr;
if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64)))
return -EFAULT;
err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
} else {
struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr;
if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32)))
return -EFAULT;
err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
}
#undef s64
return err2 ? -EFAULT : 0;
}
static int
shmctl32 (int first, int second, void *uptr)
{
int err = -EFAULT, err2;
struct shmid_ds s;
struct shmid64_ds s64;
mm_segment_t old_fs;
struct shm_info32 *uip = (struct shm_info32 *)uptr;
struct shm_info si;
int version = ipc_parse_version32(&second);
struct shminfo64 smi;
struct shminfo *usi32 = (struct shminfo *) uptr;
struct shminfo64_32 *usi64 = (struct shminfo64_32 *) uptr;
switch (second) {
case IPC_INFO:
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_shmctl(first, second|IPC_64, (struct shmid_ds *)&smi);
set_fs(old_fs);
if (version == IPC_64) {
if (!access_ok(VERIFY_WRITE, usi64, sizeof(*usi64))) {
err = -EFAULT;
break;
}
err2 = __put_user(smi.shmmax, &usi64->shmmax);
err2 |= __put_user(smi.shmmin, &usi64->shmmin);
err2 |= __put_user(smi.shmmni, &usi64->shmmni);
err2 |= __put_user(smi.shmseg, &usi64->shmseg);
err2 |= __put_user(smi.shmall, &usi64->shmall);
} else {
if (!access_ok(VERIFY_WRITE, usi32, sizeof(*usi32))) {
err = -EFAULT;
break;
}
err2 = __put_user(smi.shmmax, &usi32->shmmax);
err2 |= __put_user(smi.shmmin, &usi32->shmmin);
err2 |= __put_user(smi.shmmni, &usi32->shmmni);
err2 |= __put_user(smi.shmseg, &usi32->shmseg);
err2 |= __put_user(smi.shmall, &usi32->shmall);
}
if (err2)
err = -EFAULT;
break;
case IPC_RMID:
case SHM_LOCK:
case SHM_UNLOCK:
err = sys_shmctl(first, second, (struct shmid_ds *)uptr);
break;
case IPC_SET:
if (version == IPC_64) {
struct shmid64_ds32 *up64 = (struct shmid64_ds32 *)uptr;
err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
} else {
struct shmid_ds32 *up32 = (struct shmid_ds32 *)uptr;
err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
}
if (err)
break;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_shmctl(first, second, &s);
set_fs(old_fs);
break;
case IPC_STAT:
case SHM_STAT:
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_shmctl(first, second|IPC_64, (void *) &s64);
set_fs(old_fs);
if (err < 0)
break;
err2 = put_shmid64(&s64, uptr, version);
if (err2)
err = err2;
break;
case SHM_INFO:
old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_shmctl(first, second, (void *)&si);
set_fs(old_fs);
if (err < 0)
break;
if (!access_ok(VERIFY_WRITE, uip, sizeof(*uip))) {
err = -EFAULT;
break;
}
err2 = __put_user(si.used_ids, &uip->used_ids);
err2 |= __put_user(si.shm_tot, &uip->shm_tot);
err2 |= __put_user(si.shm_rss, &uip->shm_rss);
err2 |= __put_user(si.shm_swp, &uip->shm_swp);
err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
err2 |= __put_user(si.swap_successes, &uip->swap_successes);
if (err2)
err = -EFAULT;
break;
}
return err;
}
asmlinkage long
sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
int version;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
switch (call) {
case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */
return sys_semop(first, (struct sembuf *)AA(ptr), second);
case SEMGET:
return sys_semget(first, second, third);
case SEMCTL:
return semctl32(first, second, third, (void *)AA(ptr));
case MSGSND:
return do_sys32_msgsnd(first, second, third, (void *)AA(ptr));
case MSGRCV:
return do_sys32_msgrcv(first, second, fifth, third, version, (void *)AA(ptr));
case MSGGET:
return sys_msgget((key_t) first, second);
case MSGCTL:
return msgctl32(first, second, (void *)AA(ptr));
case SHMAT:
return shmat32(first, second, third, version, (void *)AA(ptr));
break;
case SHMDT:
return sys_shmdt((char *)AA(ptr));
case SHMGET:
return sys_shmget(first, second, third);
case SHMCTL:
return shmctl32(first, second, (void *)AA(ptr));
default:
return -EINVAL;
}
return -EINVAL;
}
......@@ -1118,422 +1118,6 @@ sys32_setrlimit(unsigned int resource, struct rlimit32 *rlim)
return ret;
}
/*
* sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation..
*
* This is really horribly ugly.
*/
struct msgbuf32 { s32 mtype; char mtext[1]; };
struct ipc_perm32
{
key_t key;
__kernel_uid_t32 uid;
__kernel_gid_t32 gid;
__kernel_uid_t32 cuid;
__kernel_gid_t32 cgid;
__kernel_mode_t32 mode;
unsigned short seq;
};
struct semid_ds32 {
struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
__kernel_time_t32 sem_otime; /* last semop time */
__kernel_time_t32 sem_ctime; /* last change time */
u32 sem_base; /* ptr to first semaphore in array */
u32 sem_pending; /* pending operations to be processed */
u32 sem_pending_last; /* last pending operation */
u32 undo; /* undo requests on this array */
unsigned short sem_nsems; /* no. of semaphores in array */
};
struct msqid_ds32
{
struct ipc_perm32 msg_perm;
u32 msg_first;
u32 msg_last;
__kernel_time_t32 msg_stime;
__kernel_time_t32 msg_rtime;
__kernel_time_t32 msg_ctime;
u32 wwait;
u32 rwait;
unsigned short msg_cbytes;
unsigned short msg_qnum;
unsigned short msg_qbytes;
__kernel_ipc_pid_t32 msg_lspid;
__kernel_ipc_pid_t32 msg_lrpid;
};
struct shmid_ds32 {
struct ipc_perm32 shm_perm;
int shm_segsz;
__kernel_time_t32 shm_atime;
__kernel_time_t32 shm_dtime;
__kernel_time_t32 shm_ctime;
__kernel_ipc_pid_t32 shm_cpid;
__kernel_ipc_pid_t32 shm_lpid;
unsigned short shm_nattch;
};
#define IPCOP_MASK(__x) (1UL << (__x))
static int
do_sys32_semctl(int first, int second, int third, void *uptr)
{
union semun fourth;
u32 pad;
int err;
struct semid64_ds s;
struct semid_ds32 *usp;
mm_segment_t old_fs;
if (!uptr)
return -EINVAL;
err = -EFAULT;
if (get_user (pad, (u32 *)uptr))
return err;
if(third == SETVAL)
fourth.val = (int)pad;
else
fourth.__pad = (void *)A(pad);
switch (third) {
case IPC_INFO:
case IPC_RMID:
case IPC_SET:
case SEM_INFO:
case GETVAL:
case GETPID:
case GETNCNT:
case GETZCNT:
case GETALL:
case SETVAL:
case SETALL:
err = sys_semctl (first, second, third, fourth);
break;
case IPC_STAT:
case SEM_STAT:
usp = (struct semid_ds32 *)A(pad);
fourth.__pad = &s;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_semctl (first, second, third, fourth);
set_fs (old_fs);
if (verify_area(VERIFY_WRITE, usp, sizeof(struct semid_ds32)) ||
__put_user(s.sem_perm.key, &usp->sem_perm.key) ||
__put_user(s.sem_perm.uid, &usp->sem_perm.uid) ||
__put_user(s.sem_perm.gid, &usp->sem_perm.gid) ||
__put_user(s.sem_perm.cuid, &usp->sem_perm.cuid) ||
__put_user (s.sem_perm.cgid, &usp->sem_perm.cgid) ||
__put_user (s.sem_perm.mode, &usp->sem_perm.mode) ||
__put_user (s.sem_perm.seq, &usp->sem_perm.seq) ||
__put_user (s.sem_otime, &usp->sem_otime) ||
__put_user (s.sem_ctime, &usp->sem_ctime) ||
__put_user (s.sem_nsems, &usp->sem_nsems))
return -EFAULT;
break;
}
return err;
}
static int
do_sys32_msgsnd (int first, int second, int third, void *uptr)
{
struct msgbuf *p = kmalloc (second + sizeof (struct msgbuf)
+ 4, GFP_USER);
struct msgbuf32 *up = (struct msgbuf32 *)uptr;
mm_segment_t old_fs;
int err;
if (!p)
return -ENOMEM;
err = verify_area(VERIFY_READ, up, sizeof(struct msgbuf32));
if (err)
goto out;
err = __get_user (p->mtype, &up->mtype);
err |= __copy_from_user (p->mtext, &up->mtext, second);
if (err)
goto out;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_msgsnd (first, p, second, third);
set_fs (old_fs);
out:
kfree (p);
return err;
}
static int
do_sys32_msgrcv (int first, int second, int msgtyp, int third,
int version, void *uptr)
{
struct msgbuf32 *up;
struct msgbuf *p;
mm_segment_t old_fs;
int err;
if (!version) {
struct ipc_kludge *uipck = (struct ipc_kludge *)uptr;
struct ipc_kludge ipck;
err = -EINVAL;
if (!uptr)
goto out;
err = -EFAULT;
if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge)))
goto out;
uptr = (void *)A(ipck.msgp);
msgtyp = ipck.msgtyp;
}
err = -ENOMEM;
p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
if (!p)
goto out;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_msgrcv (first, p, second + 4, msgtyp, third);
set_fs (old_fs);
if (err < 0)
goto free_then_out;
up = (struct msgbuf32 *)uptr;
if (verify_area(VERIFY_WRITE, up, sizeof(struct msgbuf32)) ||
__put_user (p->mtype, &up->mtype) ||
__copy_to_user (&up->mtext, p->mtext, err))
err = -EFAULT;
free_then_out:
kfree (p);
out:
return err;
}
static int
do_sys32_msgctl (int first, int second, void *uptr)
{
int err = -EINVAL;
struct msqid_ds m;
struct msqid64_ds m64;
struct msqid_ds32 *up = (struct msqid_ds32 *)uptr;
mm_segment_t old_fs;
switch (second) {
case IPC_INFO:
case IPC_RMID:
case MSG_INFO:
err = sys_msgctl (first, second, (struct msqid_ds *)uptr);
break;
case IPC_SET:
err = verify_area(VERIFY_READ, up, sizeof(struct msqid_ds32));
if (err)
break;
err = __get_user (m.msg_perm.uid, &up->msg_perm.uid);
err |= __get_user (m.msg_perm.gid, &up->msg_perm.gid);
err |= __get_user (m.msg_perm.mode, &up->msg_perm.mode);
err |= __get_user (m.msg_qbytes, &up->msg_qbytes);
if (err)
break;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_msgctl (first, second, &m);
set_fs (old_fs);
break;
case IPC_STAT:
case MSG_STAT:
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_msgctl (first, second, (void *) &m64);
set_fs (old_fs);
if (verify_area(VERIFY_WRITE, up, sizeof(struct msqid_ds32)) ||
__put_user (m64.msg_perm.key, &up->msg_perm.key) ||
__put_user(m64.msg_perm.uid, &up->msg_perm.uid) ||
__put_user(m64.msg_perm.gid, &up->msg_perm.gid) ||
__put_user(m64.msg_perm.cuid, &up->msg_perm.cuid) ||
__put_user(m64.msg_perm.cgid, &up->msg_perm.cgid) ||
__put_user(m64.msg_perm.mode, &up->msg_perm.mode) ||
__put_user(m64.msg_perm.seq, &up->msg_perm.seq) ||
__put_user(m64.msg_stime, &up->msg_stime) ||
__put_user(m64.msg_rtime, &up->msg_rtime) ||
__put_user(m64.msg_ctime, &up->msg_ctime) ||
__put_user(m64.msg_cbytes, &up->msg_cbytes) ||
__put_user(m64.msg_qnum, &up->msg_qnum) ||
__put_user(m64.msg_qbytes, &up->msg_qbytes) ||
__put_user(m64.msg_lspid, &up->msg_lspid) ||
__put_user(m64.msg_lrpid, &up->msg_lrpid))
return -EFAULT;
break;
}
return err;
}
static int
do_sys32_shmat (int first, int second, int third, int version, void *uptr)
{
unsigned long raddr;
u32 *uaddr = (u32 *)A((u32)third);
int err = -EINVAL;
if (version == 1)
return err;
err = sys_shmat (first, uptr, second, &raddr);
if (err)
return err;
err = put_user (raddr, uaddr);
return err;
}
static int
do_sys32_shmctl (int first, int second, void *uptr)
{
int err = -EFAULT;
struct shmid_ds s;
struct shmid64_ds s64;
struct shmid_ds32 *up = (struct shmid_ds32 *)uptr;
mm_segment_t old_fs;
struct shm_info32 {
int used_ids;
u32 shm_tot, shm_rss, shm_swp;
u32 swap_attempts, swap_successes;
} *uip = (struct shm_info32 *)uptr;
struct shm_info si;
switch (second) {
case IPC_INFO:
case IPC_RMID:
case SHM_LOCK:
case SHM_UNLOCK:
err = sys_shmctl (first, second, (struct shmid_ds *)uptr);
break;
case IPC_SET:
err = verify_area(VERIFY_READ, up, sizeof(struct shmid_ds32));
if (err)
break;
err = __get_user (s.shm_perm.uid, &up->shm_perm.uid);
err |= __get_user (s.shm_perm.gid, &up->shm_perm.gid);
err |= __get_user (s.shm_perm.mode, &up->shm_perm.mode);
if (err)
break;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_shmctl (first, second, &s);
set_fs (old_fs);
break;
case IPC_STAT:
case SHM_STAT:
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_shmctl (first, second, (void *) &s64);
set_fs (old_fs);
if (err < 0)
break;
if (verify_area(VERIFY_WRITE, up, sizeof(struct shmid_ds32)) ||
__put_user (s64.shm_perm.key, &up->shm_perm.key) ||
__put_user (s64.shm_perm.uid, &up->shm_perm.uid) ||
__put_user (s64.shm_perm.gid, &up->shm_perm.gid) ||
__put_user (s64.shm_perm.cuid, &up->shm_perm.cuid) ||
__put_user (s64.shm_perm.cgid, &up->shm_perm.cgid) ||
__put_user (s64.shm_perm.mode, &up->shm_perm.mode) ||
__put_user (s64.shm_perm.seq, &up->shm_perm.seq) ||
__put_user (s64.shm_atime, &up->shm_atime) ||
__put_user (s64.shm_dtime, &up->shm_dtime) ||
__put_user (s64.shm_ctime, &up->shm_ctime) ||
__put_user (s64.shm_segsz, &up->shm_segsz) ||
__put_user (s64.shm_nattch, &up->shm_nattch) ||
__put_user (s64.shm_cpid, &up->shm_cpid) ||
__put_user (s64.shm_lpid, &up->shm_lpid))
return -EFAULT;
break;
case SHM_INFO:
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_shmctl (first, second, (void *)&si);
set_fs (old_fs);
if (err < 0)
break;
if (verify_area(VERIFY_WRITE, uip, sizeof(struct shm_info32)) ||
__put_user (si.used_ids, &uip->used_ids) ||
__put_user (si.shm_tot, &uip->shm_tot) ||
__put_user (si.shm_rss, &uip->shm_rss) ||
__put_user (si.shm_swp, &uip->shm_swp) ||
__put_user (si.swap_attempts, &uip->swap_attempts) ||
__put_user (si.swap_successes, &uip->swap_successes))
return -EFAULT;
break;
}
return err;
}
asmlinkage long
sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
{
int version, err;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
switch (call) {
case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */
err = sys_semop (first, (struct sembuf *)AA(ptr),
second);
break;
case SEMGET:
err = sys_semget (first, second, third);
break;
case SEMCTL:
err = do_sys32_semctl (first, second, third,
(void *)AA(ptr));
break;
case MSGSND:
err = do_sys32_msgsnd (first, second, third,
(void *)AA(ptr));
break;
case MSGRCV:
err = do_sys32_msgrcv (first, second, fifth, third,
version, (void *)AA(ptr));
break;
case MSGGET:
err = sys_msgget ((key_t) first, second);
break;
case MSGCTL:
err = do_sys32_msgctl (first, second, (void *)AA(ptr));
break;
case SHMAT:
err = do_sys32_shmat (first, second, third,
version, (void *)AA(ptr));
break;
case SHMDT:
err = sys_shmdt ((char *)AA(ptr));
break;
case SHMGET:
err = sys_shmget (first, second, third);
break;
case SHMCTL:
err = do_sys32_shmctl (first, second, (void *)AA(ptr));
break;
default:
err = -EINVAL;
break;
}
return err;
}
/*
* sys_time() can be implemented in user-level using
* sys_gettimeofday(). IA64 did this but i386 Linux did not
......
......@@ -14,6 +14,7 @@
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/slab.h>
/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
static void set_bitmap(unsigned long *bitmap, short base, short extent, int new_value)
......@@ -61,27 +62,19 @@ asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
return -EINVAL;
if (turn_on && !capable(CAP_SYS_RAWIO))
return -EPERM;
/*
* If it's the first ioperm() call in this thread's lifetime, set the
* IO bitmap up. ioperm() is much less timing critical than clone(),
* this is why we delay this operation until now:
*/
if (!t->ioperm) {
/*
* just in case ...
*/
memset(t->io_bitmap,0xff,(IO_BITMAP_SIZE+1)*4);
t->ioperm = 1;
/*
* this activates it in the TSS
*/
if (!t->io_bitmap_ptr) {
t->io_bitmap_ptr = kmalloc((IO_BITMAP_SIZE+1)*4, GFP_KERNEL);
if (!t->io_bitmap_ptr)
return -ENOMEM;
memset(t->io_bitmap_ptr,0xff,(IO_BITMAP_SIZE+1)*4);
tss->io_map_base = IO_BITMAP_OFFSET;
}
/*
* do it in the per-thread copy and in the TSS ...
*/
set_bitmap((unsigned long *) t->io_bitmap, from, num, !turn_on);
set_bitmap((unsigned long *) t->io_bitmap_ptr, from, num, !turn_on);
set_bitmap((unsigned long *) tss->io_bitmap, from, num, !turn_on);
return 0;
......
......@@ -19,10 +19,14 @@
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
(For earlier history, see arch/i386/kernel/mtrr.c)
September 2001 Dave Jones <davej@suse.de>
v2.00 September 2001 Dave Jones <davej@suse.de>
Initial rewrite for x86-64.
Removal of non-Intel style MTRR code.
v2.01 June 2002 Dave Jones <davej@suse.de>
Removal of redundant abstraction layer.
64-bit fixes.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/sched.h>
......@@ -60,35 +64,19 @@
#include <asm/hardirq.h>
#include <linux/irq.h>
#define MTRR_VERSION "2.00 (20020207)"
#define MTRR_VERSION "2.01 (20020605)"
#define TRUE 1
#define FALSE 0
#define MTRRcap_MSR 0x0fe
#define MTRRdefType_MSR 0x2ff
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
#define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg))
#define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1)
#define NUM_FIXED_RANGES 88
#define MTRRfix64K_00000_MSR 0x250
#define MTRRfix16K_80000_MSR 0x258
#define MTRRfix16K_A0000_MSR 0x259
#define MTRRfix4K_C0000_MSR 0x268
#define MTRRfix4K_C8000_MSR 0x269
#define MTRRfix4K_D0000_MSR 0x26a
#define MTRRfix4K_D8000_MSR 0x26b
#define MTRRfix4K_E0000_MSR 0x26c
#define MTRRfix4K_E8000_MSR 0x26d
#define MTRRfix4K_F0000_MSR 0x26e
#define MTRRfix4K_F8000_MSR 0x26f
#ifdef CONFIG_SMP
#define MTRR_CHANGE_MASK_FIXED 0x01
#define MTRR_CHANGE_MASK_VARIABLE 0x02
#define MTRR_CHANGE_MASK_DEFTYPE 0x04
#endif
typedef u8 mtrr_type;
......@@ -97,49 +85,43 @@ typedef u8 mtrr_type;
#ifdef CONFIG_SMP
#define set_mtrr(reg,base,size,type) set_mtrr_smp (reg, base, size, type)
#else
#define set_mtrr(reg,base,size,type) (*set_mtrr_up) (reg, base, size, type, \
TRUE)
#define set_mtrr(reg,base,size,type) set_mtrr_up (reg, base, size, type, TRUE)
#endif
#if defined(CONFIG_PROC_FS) || defined(CONFIG_DEVFS_FS)
#define USERSPACE_INTERFACE
#endif
#ifndef USERSPACE_INTERFACE
#define compute_ascii() while (0)
#endif
#ifdef USERSPACE_INTERFACE
static char *ascii_buffer;
static unsigned int ascii_buf_bytes;
#endif
static unsigned int *usage_table;
static DECLARE_MUTEX (main_lock);
/* Private functions */
#ifdef USERSPACE_INTERFACE
static void compute_ascii (void);
#else
#define compute_ascii() while (0)
#endif
static unsigned int *usage_table;
static DECLARE_MUTEX (mtrr_lock);
struct set_mtrr_context {
unsigned long flags;
unsigned long deftype_lo;
unsigned long deftype_hi;
unsigned long cr4val;
u32 deftype_lo;
u32 deftype_hi;
u64 flags;
u64 cr4val;
};
/* Put the processor into a state where MTRRs can be safely set */
static void set_mtrr_prepare (struct set_mtrr_context *ctxt)
{
unsigned long cr0;
u64 cr0;
/* Disable interrupts locally */
__save_flags(ctxt->flags);
__cli();
/* Save value of CR4 and clear Page Global Enable (bit 7) */
if (cpu_has_ge) {
if (cpu_has_pge) {
ctxt->cr4val = read_cr4();
write_cr4(ctxt->cr4val & ~(1UL << 7));
}
......@@ -152,8 +134,8 @@ static void set_mtrr_prepare (struct set_mtrr_context *ctxt)
wbinvd();
/* Disable MTRRs, and set the default type to uncached */
rdmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
wrmsr(MTRRdefType_MSR, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
rdmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi);
wrmsr(MSR_MTRRdefType, ctxt->deftype_lo & 0xf300UL, ctxt->deftype_hi);
}
......@@ -164,7 +146,7 @@ static void set_mtrr_done (struct set_mtrr_context *ctxt)
wbinvd();
/* Restore MTRRdefType */
wrmsr(MTRRdefType_MSR, ctxt->deftype_lo, ctxt->deftype_hi);
wrmsr(MSR_MTRRdefType, ctxt->deftype_lo, ctxt->deftype_hi);
/* Enable caches */
write_cr0(read_cr0() & 0xbfffffff);
......@@ -181,9 +163,9 @@ static void set_mtrr_done (struct set_mtrr_context *ctxt)
/* This function returns the number of variable MTRRs */
static unsigned int get_num_var_ranges (void)
{
unsigned long config, dummy;
u32 config, dummy;
rdmsr (MTRRcap_MSR, config, dummy);
rdmsr (MSR_MTRRcap, config, dummy);
return (config & 0xff);
}
......@@ -191,21 +173,21 @@ static unsigned int get_num_var_ranges (void)
/* Returns non-zero if we have the write-combining memory type */
static int have_wrcomb (void)
{
unsigned long config, dummy;
u32 config, dummy;
rdmsr (MTRRcap_MSR, config, dummy);
rdmsr (MSR_MTRRcap, config, dummy);
return (config & (1 << 10));
}
static u32 size_or_mask, size_and_mask;
static u64 size_or_mask, size_and_mask;
static void get_mtrr (unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type * type)
static void get_mtrr (unsigned int reg, u64 *base, u32 *size, mtrr_type * type)
{
unsigned long mask_lo, mask_hi, base_lo, base_hi;
u32 mask_lo, mask_hi, base_lo, base_hi;
u64 newsize;
rdmsr (MTRRphysMask_MSR (reg), mask_lo, mask_hi);
rdmsr (MSR_MTRRphysMask(reg), mask_lo, mask_hi);
if ((mask_lo & 0x800) == 0) {
/* Invalid (i.e. free) range */
*base = 0;
......@@ -214,32 +196,29 @@ static void get_mtrr (unsigned int reg, unsigned long *base,
return;
}
rdmsr (MTRRphysBase_MSR (reg), base_lo, base_hi);
rdmsr (MSR_MTRRphysBase(reg), base_lo, base_hi);
/* Work out the shifted address mask. */
mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT)
| mask_lo >> PAGE_SHIFT;
/* This works correctly if size is a power of two, i.e. a
contiguous range. */
*size = -mask_lo;
newsize = (u64) mask_hi << 32 | (mask_lo & ~0x800);
newsize = ~newsize+1;
*size = (u32) newsize >> PAGE_SHIFT;
*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
*type = base_lo & 0xff;
}
static void set_mtrr_up (unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type, int do_safe)
/* [SUMMARY] Set variable MTRR register on the local CPU.
<reg> The register to set.
<base> The base address of the region.
<size> The size of the region. If this is 0 the region is disabled.
<type> The type of the region.
<do_safe> If TRUE, do the change safely. If FALSE, safety measures should
be done externally.
[RETURNS] Nothing.
*/
/*
* Set variable MTRR register on the local CPU.
* <reg> The register to set.
* <base> The base address of the region.
* <size> The size of the region. If this is 0 the region is disabled.
* <type> The type of the region.
* <do_safe> If TRUE, do the change safely. If FALSE, safety measures should
* be done externally.
*/
static void set_mtrr_up (unsigned int reg, u64 base,
u32 size, mtrr_type type, int do_safe)
{
struct set_mtrr_context ctxt;
......@@ -249,12 +228,12 @@ static void set_mtrr_up (unsigned int reg, unsigned long base,
if (size == 0) {
/* The invalid bit is kept in the mask, so we simply clear the
relevant mask register to disable a range. */
wrmsr (MTRRphysMask_MSR (reg), 0, 0);
wrmsr (MSR_MTRRphysMask(reg), 0, 0);
} else {
wrmsr (MTRRphysBase_MSR (reg), base << PAGE_SHIFT | type,
wrmsr (MSR_MTRRphysBase(reg), base << PAGE_SHIFT | type,
(base & size_and_mask) >> (32 - PAGE_SHIFT));
wrmsr (MTRRphysMask_MSR (reg), -size << PAGE_SHIFT | 0x800,
(-size & size_and_mask) >> (32 - PAGE_SHIFT));
wrmsr (MSR_MTRRphysMask(reg), (-size-1) << PAGE_SHIFT | 0x800,
((-size-1) & size_and_mask) >> (32 - PAGE_SHIFT));
}
if (do_safe)
set_mtrr_done (&ctxt);
......@@ -264,41 +243,40 @@ static void set_mtrr_up (unsigned int reg, unsigned long base,
#ifdef CONFIG_SMP
struct mtrr_var_range {
unsigned long base_lo;
unsigned long base_hi;
unsigned long mask_lo;
unsigned long mask_hi;
u32 base_lo;
u32 base_hi;
u32 mask_lo;
u32 mask_hi;
};
/* Get the MSR pair relating to a var range */
static void __init get_mtrr_var_range (unsigned int index,
struct mtrr_var_range *vr)
{
rdmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi);
rdmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi);
rdmsr (MSR_MTRRphysBase(index), vr->base_lo, vr->base_hi);
rdmsr (MSR_MTRRphysMask(index), vr->mask_lo, vr->mask_hi);
}
/* Set the MSR pair relating to a var range. Returns TRUE if
changes are made */
static int __init
set_mtrr_var_range_testing (unsigned int index, struct mtrr_var_range *vr)
static int __init set_mtrr_var_range_testing (unsigned int index,
struct mtrr_var_range *vr)
{
unsigned int lo, hi;
u32 lo, hi;
int changed = FALSE;
rdmsr (MTRRphysBase_MSR (index), lo, hi);
if ((vr->base_lo & 0xfffff0ffUL) != (lo & 0xfffff0ffUL)
|| (vr->base_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr (MTRRphysBase_MSR (index), vr->base_lo, vr->base_hi);
rdmsr (MSR_MTRRphysBase(index), lo, hi);
if ((vr->base_lo & 0xfffff0ff) != (lo & 0xfffff0ff)
|| (vr->base_hi & 0x000fffff) != (hi & 0x000fffff)) {
wrmsr (MSR_MTRRphysBase(index), vr->base_lo, vr->base_hi);
changed = TRUE;
}
rdmsr (MTRRphysMask_MSR (index), lo, hi);
if ((vr->mask_lo & 0xfffff800UL) != (lo & 0xfffff800UL)
|| (vr->mask_hi & 0xfUL) != (hi & 0xfUL)) {
wrmsr (MTRRphysMask_MSR (index), vr->mask_lo, vr->mask_hi);
rdmsr (MSR_MTRRphysMask(index), lo, hi);
if ((vr->mask_lo & 0xfffff800) != (lo & 0xfffff800)
|| (vr->mask_hi & 0x000fffff) != (hi & 0x000fffff)) {
wrmsr (MSR_MTRRphysMask(index), vr->mask_lo, vr->mask_hi);
changed = TRUE;
}
return changed;
......@@ -307,45 +285,50 @@ set_mtrr_var_range_testing (unsigned int index, struct mtrr_var_range *vr)
static void __init get_fixed_ranges (mtrr_type * frs)
{
unsigned long *p = (unsigned long *) frs;
u32 *p = (u32 *) frs;
int i;
rdmsr (MTRRfix64K_00000_MSR, p[0], p[1]);
rdmsr (MSR_MTRRfix64K_00000, p[0], p[1]);
for (i = 0; i < 2; i++)
rdmsr (MTRRfix16K_80000_MSR + i, p[2 + i * 2], p[3 + i * 2]);
rdmsr (MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
for (i = 0; i < 8; i++)
rdmsr (MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
rdmsr (MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
}
static int __init set_fixed_ranges_testing (mtrr_type * frs)
{
unsigned long *p = (unsigned long *) frs;
u32 *p = (u32 *) frs;
int changed = FALSE;
int i;
unsigned long lo, hi;
u32 lo, hi;
rdmsr (MTRRfix64K_00000_MSR, lo, hi);
printk (KERN_INFO "mtrr: rdmsr 64K_00000\n");
rdmsr (MSR_MTRRfix64K_00000, lo, hi);
if (p[0] != lo || p[1] != hi) {
wrmsr (MTRRfix64K_00000_MSR, p[0], p[1]);
printk (KERN_INFO "mtrr: Writing %x:%x to 64K MSR. lohi were %x:%x\n", p[0], p[1], lo, hi);
wrmsr (MSR_MTRRfix64K_00000, p[0], p[1]);
changed = TRUE;
}
printk (KERN_INFO "mtrr: rdmsr 16K_80000\n");
for (i = 0; i < 2; i++) {
rdmsr (MTRRfix16K_80000_MSR + i, lo, hi);
rdmsr (MSR_MTRRfix16K_80000 + i, lo, hi);
if (p[2 + i * 2] != lo || p[3 + i * 2] != hi) {
wrmsr (MTRRfix16K_80000_MSR + i, p[2 + i * 2],
p[3 + i * 2]);
printk (KERN_INFO "mtrr: Writing %x:%x to 16K MSR%d. lohi were %x:%x\n", p[2 + i * 2], p[3 + i * 2], i, lo, hi );
wrmsr (MSR_MTRRfix16K_80000 + i, p[2 + i * 2], p[3 + i * 2]);
changed = TRUE;
}
}
printk (KERN_INFO "mtrr: rdmsr 4K_C0000\n");
for (i = 0; i < 8; i++) {
rdmsr (MTRRfix4K_C0000_MSR + i, lo, hi);
rdmsr (MSR_MTRRfix4K_C0000 + i, lo, hi);
printk (KERN_INFO "mtrr: MTRRfix4K_C0000+%d = %x:%x\n", i, lo, hi);
if (p[6 + i * 2] != lo || p[7 + i * 2] != hi) {
wrmsr (MTRRfix4K_C0000_MSR + i, p[6 + i * 2],
p[7 + i * 2]);
printk (KERN_INFO "mtrr: Writing %x:%x to 4K MSR%d. lohi were %x:%x\n", p[6 + i * 2], p[7 + i * 2], i, lo, hi);
wrmsr (MSR_MTRRfix4K_C0000 + i, p[6 + i * 2], p[7 + i * 2]);
changed = TRUE;
}
}
......@@ -357,8 +340,8 @@ struct mtrr_state {
unsigned int num_var_ranges;
struct mtrr_var_range *var_ranges;
mtrr_type fixed_ranges[NUM_FIXED_RANGES];
unsigned char enabled;
mtrr_type def_type;
unsigned char enabled;
};
......@@ -367,9 +350,9 @@ static void __init get_mtrr_state (struct mtrr_state *state)
{
unsigned int nvrs, i;
struct mtrr_var_range *vrs;
unsigned long lo, dummy;
u32 lo, dummy;
nvrs = state->num_var_ranges = get_num_var_ranges ();
nvrs = state->num_var_ranges = get_num_var_ranges();
vrs = state->var_ranges
= kmalloc (nvrs * sizeof (struct mtrr_var_range), GFP_KERNEL);
if (vrs == NULL)
......@@ -379,7 +362,7 @@ static void __init get_mtrr_state (struct mtrr_state *state)
get_mtrr_var_range (i, &vrs[i]);
get_fixed_ranges (state->fixed_ranges);
rdmsr (MTRRdefType_MSR, lo, dummy);
rdmsr (MSR_MTRRdefType, lo, dummy);
state->def_type = (lo & 0xff);
state->enabled = (lo & 0xc00) >> 10;
}
......@@ -393,17 +376,18 @@ static void __init finalize_mtrr_state (struct mtrr_state *state)
}
static unsigned long __init set_mtrr_state (struct mtrr_state *state,
/*
* Set the MTRR state for this CPU.
* <state> The MTRR state information to read.
* <ctxt> Some relevant CPU context.
* [NOTE] The CPU must already be in a safe state for MTRR changes.
* [RETURNS] 0 if no changes made, else a mask indication what was changed.
*/
static u64 __init set_mtrr_state (struct mtrr_state *state,
struct set_mtrr_context *ctxt)
/* [SUMMARY] Set the MTRR state for this CPU.
<state> The MTRR state information to read.
<ctxt> Some relevant CPU context.
[NOTE] The CPU must already be in a safe state for MTRR changes.
[RETURNS] 0 if no changes made, else a mask indication what was changed.
*/
{
unsigned int i;
unsigned long change_mask = 0;
u64 change_mask = 0;
for (i = 0; i < state->num_var_ranges; i++)
if (set_mtrr_var_range_testing (i, &state->var_ranges[i]))
......@@ -428,16 +412,16 @@ static volatile int wait_barrier_execute = FALSE;
static volatile int wait_barrier_cache_enable = FALSE;
struct set_mtrr_data {
unsigned long smp_base;
unsigned long smp_size;
u64 smp_base;
u32 smp_size;
unsigned int smp_reg;
mtrr_type smp_type;
};
/*
* Synchronisation handler. Executed by "other" CPUs.
*/
static void ipi_handler (void *info)
/* [SUMMARY] Synchronisation handler. Executed by "other" CPUs.
[RETURNS] Nothing.
*/
{
struct set_mtrr_data *data = info;
struct set_mtrr_context ctxt;
......@@ -449,7 +433,7 @@ static void ipi_handler (void *info)
barrier ();
/* The master has cleared me to execute */
(*set_mtrr_up) (data->smp_reg, data->smp_base, data->smp_size,
set_mtrr_up (data->smp_reg, data->smp_base, data->smp_size,
data->smp_type, FALSE);
/* Notify master CPU that I've executed the function */
......@@ -462,8 +446,7 @@ static void ipi_handler (void *info)
}
static void set_mtrr_smp (unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type)
static void set_mtrr_smp (unsigned int reg, u64 base, u32 size, mtrr_type type)
{
struct set_mtrr_data data;
struct set_mtrr_context ctxt;
......@@ -490,7 +473,7 @@ static void set_mtrr_smp (unsigned int reg, unsigned long base,
/* Set up for completion wait and then release other CPUs to change MTRRs */
atomic_set (&undone_count, smp_num_cpus - 1);
wait_barrier_execute = FALSE;
(*set_mtrr_up) (reg, base, size, type, FALSE);
set_mtrr_up (reg, base, size, type, FALSE);
/* Now wait for other CPUs to complete the function */
while (atomic_read (&undone_count) > 0)
......@@ -505,7 +488,7 @@ static void set_mtrr_smp (unsigned int reg, unsigned long base,
/* Some BIOS's are fucked and don't set all MTRRs the same! */
static void __init mtrr_state_warn (unsigned long mask)
static void __init mtrr_state_warn (u32 mask)
{
if (!mask)
return;
......@@ -521,7 +504,7 @@ static void __init mtrr_state_warn (unsigned long mask)
#endif /* CONFIG_SMP */
static char inline * attrib_to_str (int x)
static inline char * attrib_to_str (int x)
{
return (x <= 6) ? mtrr_strings[x] : "?";
}
......@@ -551,21 +534,20 @@ static void __init init_table (void)
}
static int generic_get_free_region (unsigned long base,
unsigned long size)
/* [SUMMARY] Get a free MTRR.
<base> The starting (base) address of the region.
<size> The size (in bytes) of the region.
[RETURNS] The index of the region on success, else -1 on error.
/*
* Get a free MTRR.
* returns the index of the region on success, else -1 on error.
*/
static int get_free_region(void)
{
int i, max;
mtrr_type ltype;
unsigned long lbase, lsize;
u64 lbase;
u32 lsize;
max = get_num_var_ranges ();
for (i = 0; i < max; ++i) {
(*get_mtrr) (i, &lbase, &lsize, &ltype);
get_mtrr (i, &lbase, &lsize, &ltype);
if (lsize == 0)
return i;
}
......@@ -573,22 +555,19 @@ static int generic_get_free_region (unsigned long base,
}
static int (*get_free_region) (unsigned long base,
unsigned long size) = generic_get_free_region;
/**
* mtrr_add_page - Add a memory type region
* @base: Physical base address of region in pages (4 KB)
* @size: Physical size of region in pages (4 KB)
* @type: Type of MTRR desired
* @increment: If this is true do usage counting on the region
* Returns The MTRR register on success, else a negative number
* indicating the error code.
*
* Memory type region registers control the caching on newer Intel and
* non Intel processors. This function allows drivers to request an
* MTRR is added. The details and hardware specifics of each processor's
* implementation are hidden from the caller, but nevertheless the
* caller should expect to need to provide a power of two size on an
* equivalent power of two boundary.
* Memory type region registers control the caching on newer
* processors. This function allows drivers to request an MTRR is added.
* The caller should expect to need to provide a power of two size on
* an equivalent power of two boundary.
*
* If the region cannot be added either because all regions are in use
* or the CPU cannot support it a negative value is returned. On success
......@@ -596,42 +575,28 @@ static int (*get_free_region) (unsigned long base,
* as a cookie only.
*
* On a multiprocessor machine the changes are made to all processors.
* This is required on x86 by the Intel processors.
*
* The available types are
*
* %MTRR_TYPE_UNCACHABLE - No caching
*
* %MTRR_TYPE_WRBACK - Write data back in bursts whenever
*
* %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
*
* %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
*
* BUGS: Needs a quiet flag for the cases where drivers do not mind
* failures and do not wish system log messages to be sent.
*/
int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment)
int mtrr_add_page (u64 base, u32 size, unsigned int type, char increment)
{
/* [SUMMARY] Add an MTRR entry.
<base> The starting (base, in pages) address of the region.
<size> The size of the region. (in pages)
<type> The type of the new region.
<increment> If true and the region already exists, the usage count will be
incremented.
[RETURNS] The MTRR register on success, else a negative number indicating
the error code.
[NOTE] This routine uses a spinlock.
*/
int i, max;
mtrr_type ltype;
unsigned long lbase, lsize, last;
u64 lbase, last;
u32 lsize;
if (base + size < 0x100) {
printk (KERN_WARNING
"mtrr: cannot set region below 1 MiB (0x%lx000,0x%lx000)\n",
"mtrr: cannot set region below 1 MiB (0x%lx000,0x%x000)\n",
base, size);
return -EINVAL;
}
......@@ -644,7 +609,7 @@ int mtrr_add_page (unsigned long base, unsigned long size,
if (lbase != last) {
printk (KERN_WARNING
"mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n",
"mtrr: base(0x%lx000) is not aligned on a size(0x%x000) boundary\n",
base, size);
return -EINVAL;
}
......@@ -655,7 +620,7 @@ int mtrr_add_page (unsigned long base, unsigned long size,
}
/* If the type is WC, check that this processor supports it */
if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb ()) {
if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
printk (KERN_WARNING
"mtrr: your processor doesn't support write-combining\n");
return -ENOSYS;
......@@ -669,9 +634,9 @@ int mtrr_add_page (unsigned long base, unsigned long size,
increment = increment ? 1 : 0;
max = get_num_var_ranges ();
/* Search for existing MTRR */
down (&main_lock);
down (&mtrr_lock);
for (i = 0; i < max; ++i) {
(*get_mtrr) (i, &lbase, &lsize, &ltype);
get_mtrr (i, &lbase, &lsize, &ltype);
if (base >= lbase + lsize)
continue;
if ((base < lbase) && (base + size <= lbase))
......@@ -679,41 +644,41 @@ int mtrr_add_page (unsigned long base, unsigned long size,
/* At this point we know there is some kind of overlap/enclosure */
if ((base < lbase) || (base + size > lbase + lsize)) {
up (&main_lock);
up (&mtrr_lock);
printk (KERN_WARNING
"mtrr: 0x%lx000,0x%lx000 overlaps existing"
" 0x%lx000,0x%lx000\n", base, size, lbase,
lsize);
"mtrr: 0x%lx000,0x%x000 overlaps existing"
" 0x%lx000,0x%x000\n", base, size, lbase, lsize);
return -EINVAL;
}
/* New region is enclosed by an existing region */
if (ltype != type) {
if (type == MTRR_TYPE_UNCACHABLE)
continue;
up (&main_lock);
up (&mtrr_lock);
printk
("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
base, size, attrib_to_str (ltype),
("mtrr: type mismatch for %lx000,%x000 old: %s new: %s\n",
base, size,
attrib_to_str (ltype),
attrib_to_str (type));
return -EINVAL;
}
if (increment)
++usage_table[i];
compute_ascii ();
up (&main_lock);
up (&mtrr_lock);
return i;
}
/* Search for an empty MTRR */
i = (*get_free_region) (base, size);
i = get_free_region();
if (i < 0) {
up (&main_lock);
up (&mtrr_lock);
printk ("mtrr: no more MTRRs available\n");
return i;
}
set_mtrr (i, base, size, type);
usage_table[i] = 1;
compute_ascii ();
up (&main_lock);
up (&mtrr_lock);
return i;
}
......@@ -724,13 +689,13 @@ int mtrr_add_page (unsigned long base, unsigned long size,
* @size: Physical size of region
* @type: Type of MTRR desired
* @increment: If this is true do usage counting on the region
* Return the MTRR register on success, else a negative numbe
* indicating the error code.
*
* Memory type region registers control the caching on newer Intel and
* non Intel processors. This function allows drivers to request an
* MTRR is added. The details and hardware specifics of each processor's
* implementation are hidden from the caller, but nevertheless the
* caller should expect to need to provide a power of two size on an
* equivalent power of two boundary.
* Memory type region registers control the caching on newer processors.
* This function allows drivers to request an MTRR is added.
* The caller should expect to need to provide a power of two size on
* an equivalent power of two boundary.
*
* If the region cannot be added either because all regions are in use
* or the CPU cannot support it a negative value is returned. On success
......@@ -743,33 +708,19 @@ int mtrr_add_page (unsigned long base, unsigned long size,
* The available types are
*
* %MTRR_TYPE_UNCACHABLE - No caching
*
* %MTRR_TYPE_WRBACK - Write data back in bursts whenever
*
* %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts
*
* %MTRR_TYPE_WRTHROUGH - Cache reads but not writes
*
* BUGS: Needs a quiet flag for the cases where drivers do not mind
* failures and do not wish system log messages to be sent.
*/
int mtrr_add (unsigned long base, unsigned long size, unsigned int type,
char increment)
int mtrr_add (u64 base, u32 size, unsigned int type, char increment)
{
/* [SUMMARY] Add an MTRR entry.
<base> The starting (base) address of the region.
<size> The size (in bytes) of the region.
<type> The type of the new region.
<increment> If true and the region already exists, the usage count will be
incremented.
[RETURNS] The MTRR register on success, else a negative number indicating
the error code.
*/
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
printk ("mtrr: size and base must be multiples of 4 kiB\n");
printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base);
return -EINVAL;
}
return mtrr_add_page (base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
......@@ -792,55 +743,46 @@ int mtrr_add (unsigned long base, unsigned long size, unsigned int type,
* code.
*/
int mtrr_del_page (int reg, unsigned long base, unsigned long size)
/* [SUMMARY] Delete MTRR/decrement usage count.
<reg> The register. If this is less than 0 then <<base>> and <<size>> must
be supplied.
<base> The base address of the region. This is ignored if <<reg>> is >= 0.
<size> The size of the region. This is ignored if <<reg>> is >= 0.
[RETURNS] The register on success, else a negative number indicating
the error code.
[NOTE] This routine uses a spinlock.
*/
int mtrr_del_page (int reg, u64 base, u32 size)
{
int i, max;
mtrr_type ltype;
unsigned long lbase, lsize;
u64 lbase;
u32 lsize;
max = get_num_var_ranges ();
down (&main_lock);
down (&mtrr_lock);
if (reg < 0) {
/* Search for existing MTRR */
for (i = 0; i < max; ++i) {
(*get_mtrr) (i, &lbase, &lsize, &ltype);
get_mtrr (i, &lbase, &lsize, &ltype);
if (lbase == base && lsize == size) {
reg = i;
break;
}
}
if (reg < 0) {
up (&main_lock);
printk ("mtrr: no MTRR for %lx000,%lx000 found\n", base,
size);
up (&mtrr_lock);
printk ("mtrr: no MTRR for %lx000,%x000 found\n", base, size);
return -EINVAL;
}
}
if (reg >= max) {
up (&main_lock);
up (&mtrr_lock);
printk ("mtrr: register: %d too big\n", reg);
return -EINVAL;
}
(*get_mtrr) (reg, &lbase, &lsize, &ltype);
get_mtrr (reg, &lbase, &lsize, &ltype);
if (lsize < 1) {
up (&main_lock);
up (&mtrr_lock);
printk ("mtrr: MTRR %d not used\n", reg);
return -EINVAL;
}
if (usage_table[reg] < 1) {
up (&main_lock);
up (&mtrr_lock);
printk ("mtrr: reg: %d has count=0\n", reg);
return -EINVAL;
}
......@@ -848,7 +790,7 @@ int mtrr_del_page (int reg, unsigned long base, unsigned long size)
if (--usage_table[reg] < 1)
set_mtrr (reg, 0, 0, 0);
compute_ascii ();
up (&main_lock);
up (&mtrr_lock);
return reg;
}
......@@ -868,19 +810,11 @@ int mtrr_del_page (int reg, unsigned long base, unsigned long size)
* code.
*/
int mtrr_del (int reg, unsigned long base, unsigned long size)
/* [SUMMARY] Delete MTRR/decrement usage count.
<reg> The register. If this is less than 0 then <<base>> and <<size>> must
be supplied.
<base> The base address of the region. This is ignored if <<reg>> is >= 0.
<size> The size of the region. This is ignored if <<reg>> is >= 0.
[RETURNS] The register on success, else a negative number indicating
the error code.
*/
int mtrr_del (int reg, u64 base, u32 size)
{
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
printk ("mtrr: size and base must be multiples of 4 kiB\n");
printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base);
return -EINVAL;
}
return mtrr_del_page (reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
......@@ -889,8 +823,8 @@ int mtrr_del (int reg, unsigned long base, unsigned long size)
#ifdef USERSPACE_INTERFACE
static int mtrr_file_add (unsigned long base, unsigned long size,
unsigned int type, char increment, struct file *file, int page)
static int mtrr_file_add (u64 base, u32 size, unsigned int type,
struct file *file, int page)
{
int reg, max;
unsigned int *fcount = file->private_data;
......@@ -910,7 +844,7 @@ static int mtrr_file_add (unsigned long base, unsigned long size,
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
printk
("mtrr: size and base must be multiples of 4 kiB\n");
printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base);
return -EINVAL;
}
base >>= PAGE_SHIFT;
......@@ -925,7 +859,7 @@ static int mtrr_file_add (unsigned long base, unsigned long size,
}
static int mtrr_file_del (unsigned long base, unsigned long size,
static int mtrr_file_del (u64 base, u32 size,
struct file *file, int page)
{
int reg;
......@@ -935,7 +869,7 @@ static int mtrr_file_del (unsigned long base, unsigned long size,
if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
printk
("mtrr: size and base must be multiples of 4 kiB\n");
printk ("mtrr: size: 0x%lx base: 0x%lx\n", size, base);
printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base);
return -EINVAL;
}
base >>= PAGE_SHIFT;
......@@ -977,9 +911,9 @@ static ssize_t mtrr_write (struct file *file, const char *buf,
"disable=%d"
*/
{
int i, err;
unsigned long reg;
unsigned long long base, size;
int i, err, reg;
u64 base;
u32 size;
char *ptr;
char line[LINE_SIZE];
......@@ -1027,7 +961,7 @@ static ssize_t mtrr_write (struct file *file, const char *buf,
if ((base & 0xfff) || (size & 0xfff)) {
printk ("mtrr: size and base must be multiples of 4 kiB\n");
printk ("mtrr: size: 0x%Lx base: 0x%Lx\n", size, base);
printk ("mtrr: size: 0x%x base: 0x%lx\n", size, base);
return -EINVAL;
}
......@@ -1046,9 +980,7 @@ static ssize_t mtrr_write (struct file *file, const char *buf,
continue;
base >>= PAGE_SHIFT;
size >>= PAGE_SHIFT;
err =
mtrr_add_page ((unsigned long) base, (unsigned long) size,
i, 1);
err = mtrr_add_page ((u64) base, size, i, 1);
if (err < 0)
return err;
return len;
......@@ -1076,7 +1008,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT;
err =
mtrr_file_add (sentry.base, sentry.size, sentry.type, 1,
mtrr_file_add (sentry.base, sentry.size, sentry.type,
file, 0);
if (err < 0)
return err;
......@@ -1117,7 +1049,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
return -EFAULT;
if (gentry.regnum >= get_num_var_ranges ())
return -EINVAL;
(*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
get_mtrr (gentry.regnum, &gentry.base, &gentry.size, &type);
/* Hide entries that go above 4GB */
if (gentry.base + gentry.size > 0x100000
......@@ -1139,7 +1071,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
if (copy_from_user (&sentry, (void *) arg, sizeof sentry))
return -EFAULT;
err =
mtrr_file_add (sentry.base, sentry.size, sentry.type, 1,
mtrr_file_add (sentry.base, sentry.size, sentry.type,
file, 1);
if (err < 0)
return err;
......@@ -1180,7 +1112,7 @@ static int mtrr_ioctl (struct inode *inode, struct file *file,
return -EFAULT;
if (gentry.regnum >= get_num_var_ranges ())
return -EINVAL;
(*get_mtrr) (gentry.regnum, &gentry.base, &gentry.size, &type);
get_mtrr (gentry.regnum, &gentry.base, &gentry.size, &type);
gentry.type = type;
if (copy_to_user ((void *) arg, &gentry, sizeof gentry))
......@@ -1199,7 +1131,6 @@ static int mtrr_close (struct inode *ino, struct file *file)
if (fcount == NULL)
return 0;
lock_kernel ();
max = get_num_var_ranges ();
for (i = 0; i < max; ++i) {
while (fcount[i] > 0) {
......@@ -1208,7 +1139,6 @@ static int mtrr_close (struct inode *ino, struct file *file)
--fcount[i];
}
}
unlock_kernel ();
kfree (fcount);
file->private_data = NULL;
return 0;
......@@ -1234,12 +1164,13 @@ static void compute_ascii (void)
char factor;
int i, max;
mtrr_type type;
unsigned long base, size;
u64 base;
u32 size;
ascii_buf_bytes = 0;
max = get_num_var_ranges ();
for (i = 0; i < max; i++) {
(*get_mtrr) (i, &base, &size, &type);
get_mtrr (i, &base, &size, &type);
if (size == 0)
usage_table[i] = 0;
else {
......@@ -1253,11 +1184,10 @@ static void compute_ascii (void)
}
sprintf
(ascii_buffer + ascii_buf_bytes,
"reg%02i: base=0x%05lx000 (%4liMB), size=%4li%cB: %s, count=%d\n",
"reg%02i: base=0x%05lx000 (%4liMB), size=%4i%cB: %s, count=%d\n",
i, base, base >> (20 - PAGE_SHIFT), size, factor,
attrib_to_str (type), usage_table[i]);
ascii_buf_bytes +=
strlen (ascii_buffer + ascii_buf_bytes);
ascii_buf_bytes += strlen (ascii_buffer + ascii_buf_bytes);
}
}
devfs_set_file_size (devfs_handle, ascii_buf_bytes);
......@@ -1283,22 +1213,16 @@ static void __init mtrr_setup (void)
if ((cpuid_eax (0x80000000) >= 0x80000008)) {
u32 phys_addr;
phys_addr = cpuid_eax (0x80000008) & 0xff;
size_or_mask =
~((1 << (phys_addr - PAGE_SHIFT)) - 1);
size_and_mask = ~size_or_mask & 0xfff00000;
} else {
/* FIXME: This is to make it work on Athlon during debugging. */
size_or_mask = 0xff000000; /* 36 bits */
size_and_mask = 0x00f00000;
size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
size_and_mask = ~size_or_mask & 0xfffffffffff00000;
}
printk ("mtrr: detected mtrr type: x86-64\n");
}
}
#ifdef CONFIG_SMP
static volatile unsigned long smp_changes_mask __initdata = 0;
static volatile u32 smp_changes_mask __initdata = 0;
static struct mtrr_state smp_mtrr_state __initdata = { 0, 0 };
void __init mtrr_init_boot_cpu (void)
......@@ -1310,7 +1234,8 @@ void __init mtrr_init_boot_cpu (void)
void __init mtrr_init_secondary_cpu (void)
{
unsigned long mask, count;
u64 mask;
int count;
struct set_mtrr_context ctxt;
/* Note that this is not ideal, since the cache is only flushed/disabled
......@@ -1357,4 +1282,3 @@ int __init mtrr_init (void)
init_table ();
return 0;
}
......@@ -39,6 +39,7 @@
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
......@@ -320,9 +321,6 @@ void show_regs(struct pt_regs * regs)
printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
}
#define __STR(x) #x
#define __STR2(x) __STR(x)
extern void load_gs_index(unsigned);
/*
......@@ -330,7 +328,13 @@ extern void load_gs_index(unsigned);
*/
void exit_thread(void)
{
/* nothing to do ... */
struct task_struct *me = current;
if (me->thread.io_bitmap_ptr) {
kfree(me->thread.io_bitmap_ptr);
me->thread.io_bitmap_ptr = NULL;
(init_tss + smp_processor_id())->io_map_base =
INVALID_IO_BITMAP_OFFSET;
}
}
void flush_thread(void)
......@@ -392,6 +396,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
unlazy_fpu(current);
p->thread.i387 = current->thread.i387;
if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
p->thread.io_bitmap_ptr = kmalloc((IO_BITMAP_SIZE+1)*4, GFP_KERNEL);
if (!p->thread.io_bitmap_ptr)
return -ENOMEM;
memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
(IO_BITMAP_SIZE+1)*4);
}
return 0;
}
......@@ -491,21 +503,14 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
/*
* Handle the IO bitmap
*/
if (unlikely(prev->ioperm || next->ioperm)) {
if (next->ioperm) {
if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
if (next->io_bitmap_ptr) {
/*
* 4 cachelines copy ... not good, but not that
* bad either. Anyone got something better?
* This only affects processes which use ioperm().
* [Putting the TSSs into 4k-tlb mapped regions
* and playing VM tricks to switch the IO bitmap
* is not really acceptable.]
* On x86-64 we could put multiple bitmaps into
* the GDT and just switch offsets
* This would require ugly special cases on overflow
* though -AK
*/
memcpy(tss->io_bitmap, next->io_bitmap,
memcpy(tss->io_bitmap, next->io_bitmap_ptr,
IO_BITMAP_SIZE*sizeof(u32));
tss->io_map_base = IO_BITMAP_OFFSET;
} else {
......
......@@ -91,6 +91,9 @@ void pda_init(int cpu)
pda->me = pda;
pda->cpudata_offset = 0;
pda->active_mm = &init_mm;
pda->mmu_state = 0;
asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
}
......
......@@ -84,7 +84,6 @@ struct rt_sigframe
char *pretcode;
struct ucontext uc;
struct siginfo info;
struct _fpstate fpstate;
};
static int
......@@ -186,8 +185,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs regs)
*/
static int
setup_sigcontext(struct sigcontext *sc, struct _fpstate *fpstate,
struct pt_regs *regs, unsigned long mask)
setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, unsigned long mask)
{
int tmp, err = 0;
struct task_struct *me = current;
......@@ -221,20 +219,17 @@ setup_sigcontext(struct sigcontext *sc, struct _fpstate *fpstate,
err |= __put_user(mask, &sc->oldmask);
err |= __put_user(me->thread.cr2, &sc->cr2);
tmp = save_i387(fpstate);
if (tmp < 0)
err = 1;
else
err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
return err;
}
/*
* Determine which stack to use..
*/
static inline struct rt_sigframe *
get_sigframe(struct k_sigaction *ka, struct pt_regs * regs)
#define round_down(p, r) ((void *) ((unsigned long)((p) - (r) + 1) & ~((r)-1)))
static void *
get_stack(struct k_sigaction *ka, struct pt_regs *regs, unsigned long size)
{
unsigned long rsp;
......@@ -247,22 +242,34 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs * regs)
rsp = current->sas_ss_sp + current->sas_ss_size;
}
rsp = (rsp - sizeof(struct _fpstate)) & ~(15UL);
rsp -= offsetof(struct rt_sigframe, fpstate);
return (struct rt_sigframe *) rsp;
return round_down(rsp - size, 16);
}
static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs * regs)
{
struct rt_sigframe *frame;
struct rt_sigframe *frame = NULL;
struct _fpstate *fp = NULL;
int err = 0;
frame = get_sigframe(ka, regs);
if (current->used_math) {
fp = get_stack(ka, regs, sizeof(struct _fpstate));
frame = round_down((char *)fp - sizeof(struct rt_sigframe), 16) - 8;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate))) {
goto give_sigsegv;
}
if (save_i387(fp) < 0)
err |= -1;
}
if (!frame)
frame = get_stack(ka, regs, sizeof(struct rt_sigframe)) - 8;
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
goto give_sigsegv;
}
if (ka->sa.sa_flags & SA_SIGINFO) {
err |= copy_siginfo_to_user(&frame->info, info);
......@@ -278,14 +285,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(sas_ss_flags(regs->rsp),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpstate,
regs, set->sig[0]);
err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]);
err |= __put_user(fp, &frame->uc.uc_mcontext.fpstate);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
if (err) {
goto give_sigsegv;
}
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
/* x86-64 should always use SA_RESTORER. */
......@@ -297,7 +300,6 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
}
if (err) {
printk("fault 3\n");
goto give_sigsegv;
}
......@@ -305,7 +307,6 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
printk("%d old rip %lx old rsp %lx old rax %lx\n", current->pid,regs->rip,regs->rsp,regs->rax);
#endif
/* Set up registers for signal handler */
{
struct exec_domain *ed = current_thread_info()->exec_domain;
......@@ -320,9 +321,10 @@ static void setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
next argument after the signal number on the stack. */
regs->rsi = (unsigned long)&frame->info;
regs->rdx = (unsigned long)&frame->uc;
regs->rsp = (unsigned long) frame;
regs->rip = (unsigned long) ka->sa.sa_handler;
regs->rsp = (unsigned long)frame;
set_fs(USER_DS);
regs->eflags &= ~TF_MASK;
......
......@@ -25,8 +25,6 @@
/* The 'big kernel lock' */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
struct tlb_state cpu_tlbstate[NR_CPUS] = {[0 ... NR_CPUS-1] = { &init_mm, 0 }};
/*
* the following functions deal with sending IPIs between CPUs.
*
......@@ -147,9 +145,9 @@ static spinlock_t tlbstate_lock = SPIN_LOCK_UNLOCKED;
*/
static void inline leave_mm (unsigned long cpu)
{
if (cpu_tlbstate[cpu].state == TLBSTATE_OK)
if (read_pda(mmu_state) == TLBSTATE_OK)
BUG();
clear_bit(cpu, &cpu_tlbstate[cpu].active_mm->cpu_vm_mask);
clear_bit(cpu, &read_pda(active_mm)->cpu_vm_mask);
__flush_tlb();
}
......@@ -164,18 +162,18 @@ static void inline leave_mm (unsigned long cpu)
* the other cpus, but smp_invalidate_interrupt ignore flush ipis
* for the wrong mm, and in the worst case we perform a superflous
* tlb flush.
* 1a2) set cpu_tlbstate to TLBSTATE_OK
* 1a2) set cpu mmu_state to TLBSTATE_OK
* Now the smp_invalidate_interrupt won't call leave_mm if cpu0
* was in lazy tlb mode.
* 1a3) update cpu_tlbstate[].active_mm
* 1a3) update cpu active_mm
* Now cpu0 accepts tlb flushes for the new mm.
* 1a4) set_bit(cpu, &new_mm->cpu_vm_mask);
* Now the other cpus will send tlb flush ipis.
* 1a4) change cr3.
* 1b) thread switch without mm change
* cpu_tlbstate[].active_mm is correct, cpu0 already handles
* cpu active_mm is correct, cpu0 already handles
* flush ipis.
* 1b1) set cpu_tlbstate to TLBSTATE_OK
* 1b1) set cpu mmu_state to TLBSTATE_OK
* 1b2) test_and_set the cpu bit in cpu_vm_mask.
* Atomically set the bit [other cpus will start sending flush ipis],
* and test the bit.
......@@ -188,7 +186,7 @@ static void inline leave_mm (unsigned long cpu)
* runs in kernel space, the cpu could load tlb entries for user space
* pages.
*
* The good news is that cpu_tlbstate is local to each cpu, no
* The good news is that cpu mmu_state is local to each cpu, no
* write/read ordering problems.
*/
......@@ -216,8 +214,8 @@ asmlinkage void smp_invalidate_interrupt (void)
* BUG();
*/
if (flush_mm == cpu_tlbstate[cpu].active_mm) {
if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
if (flush_mm == read_pda(active_mm)) {
if (read_pda(mmu_state) == TLBSTATE_OK) {
if (flush_va == FLUSH_ALL)
local_flush_tlb();
else
......@@ -335,7 +333,7 @@ static inline void do_flush_tlb_all_local(void)
unsigned long cpu = smp_processor_id();
__flush_tlb_all();
if (cpu_tlbstate[cpu].state == TLBSTATE_LAZY)
if (read_pda(mmu_state) == TLBSTATE_LAZY)
leave_mm(cpu);
}
......
......@@ -47,7 +47,7 @@
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
#define NO_VSYSCALL 1
//#define NO_VSYSCALL 1
#ifdef NO_VSYSCALL
#include <asm/unistd.h>
......
......@@ -189,3 +189,5 @@ EXPORT_SYMBOL_NOVERS(do_softirq_thunk);
void out_of_line_bug(void);
EXPORT_SYMBOL(out_of_line_bug);
EXPORT_SYMBOL(init_level4_pgt);
......@@ -12,7 +12,7 @@ obj-y = csum-partial.o csum-copy.o csum-wrappers.o delay.o \
thunk.o io.o clear_page.o copy_page.o
obj-y += memcpy.o
obj-y += memmove.o
#obj-y += memset.o
obj-y += memset.o
obj-y += copy_user.o
export-objs := io.o csum-wrappers.o csum-partial.o
......
/* Copyright 2002 Andi Kleen, SuSE Labs */
// #define FIX_ALIGNMENT 1
/* Copyright 2002 Andi Kleen */
/*
* ISO C memset - set a memory block to a byte value.
......@@ -11,51 +9,51 @@
*
* rax original destination
*/
.globl ____memset
.globl __memset
.globl memset
.p2align
____memset:
movq %rdi,%r10 /* save destination for return address */
movq %rdx,%r11 /* save count */
memset:
__memset:
movq %rdi,%r10
movq %rdx,%r11
/* expand byte value */
movzbl %sil,%ecx /* zero extend char value */
movabs $0x0101010101010101,%rax /* expansion pattern */
mul %rcx /* expand with rax, clobbers rdx */
movzbl %sil,%ecx
movabs $0x0101010101010101,%rax
mul %rcx /* with rax, clobbers rdx */
#ifdef FIX_ALIGNMENT
/* align dst */
movl %edi,%r9d
andl $7,%r9d /* test unaligned bits */
andl $7,%r9d
jnz bad_alignment
after_bad_alignment:
#endif
movq %r11,%rcx /* restore count */
shrq $6,%rcx /* divide by 64 */
jz handle_tail /* block smaller than 64 bytes? */
movl $64,%r8d /* CSE loop block size */
movq %r11,%rcx
movl $64,%r8d
shrq $6,%rcx
jz handle_tail
loop_64:
movnti %rax,0*8(%rdi)
movnti %rax,1*8(%rdi)
movnti %rax,2*8(%rdi)
movnti %rax,3*8(%rdi)
movnti %rax,4*8(%rdi)
movnti %rax,5*8(%rdi)
movnti %rax,6*8(%rdi)
movnti %rax,7*8(%rdi) /* clear 64 byte blocks */
addq %r8,%rdi /* increase pointer by 64 bytes */
loop loop_64 /* decrement rcx and if not zero loop */
movnti %rax,(%rdi)
movnti %rax,8(%rdi)
movnti %rax,16(%rdi)
movnti %rax,24(%rdi)
movnti %rax,32(%rdi)
movnti %rax,40(%rdi)
movnti %rax,48(%rdi)
movnti %rax,56(%rdi)
addq %r8,%rdi
loop loop_64
/* Handle tail in loops. The loops should be faster than hard
to predict jump tables. */
handle_tail:
movl %r11d,%ecx
andl $63,%ecx
shrl $3,%ecx
andl $63&(~7),%ecx
jz handle_7
shrl $3,%ecx
loop_8:
movnti %rax,(%rdi) /* long words */
movnti %rax,(%rdi)
addq $8,%rdi
loop loop_8
......@@ -64,22 +62,20 @@ handle_7:
andl $7,%ecx
jz ende
loop_1:
movb %al,(%rdi) /* bytes */
incq %rdi
movb %al,(%rdi)
addq $1,%rdi
loop loop_1
ende:
movq %r10,%rax
ret
#ifdef FIX_ALIGNMENT
bad_alignment:
andq $-8,%r11 /* shorter than 8 bytes */
jz handle_7 /* if yes handle it in the tail code */
movnti %rax,(%rdi) /* unaligned store of 8 bytes */
cmpq $7,%r11
jbe handle_7
movnti %rax,(%rdi) /* unaligned store */
movq $8,%r8
subq %r9,%r8 /* compute alignment (8-misalignment) */
addq %r8,%rdi /* fix destination */
subq %r8,%r11 /* fix count */
subq %r9,%r8
addq %r8,%rdi
subq %r8,%r11
jmp after_bad_alignment
#endif
......@@ -16,11 +16,22 @@
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <asm/user.h>
#include <asm/thread_info.h>
extern void fpu_init(void);
extern void init_fpu(void);
int save_i387(struct _fpstate *buf);
static inline int need_signal_i387(struct task_struct *me)
{
if (!me->used_math)
return 0;
me->used_math = 0;
if (!test_thread_flag(TIF_USEDFPU))
return 0;
return 1;
}
/*
* FPU lazy state save handling...
*/
......
......@@ -18,7 +18,9 @@ typedef int __kernel_clock_t32;
typedef int __kernel_pid_t32;
typedef unsigned short __kernel_ipc_pid_t32;
typedef unsigned short __kernel_uid_t32;
typedef unsigned __kernel_uid32_t32;
typedef unsigned short __kernel_gid_t32;
typedef unsigned __kernel_gid32_t32;
typedef unsigned short __kernel_dev_t32;
typedef unsigned int __kernel_ino_t32;
typedef unsigned short __kernel_mode_t32;
......
#ifndef __i386_IPC_H__
#define __i386_IPC_H__
/*
* These are used to wrap system calls on x86.
*
* See arch/i386/kernel/sys_i386.c for ugly details..
*
* (on x86-64 only used for 32bit emulation)
*/
struct ipc_kludge {
struct msgbuf *msgp;
long msgtyp;
};
#define SEMOP 1
#define SEMGET 2
#define SEMCTL 3
#define MSGSND 11
#define MSGRCV 12
#define MSGGET 13
#define MSGCTL 14
#define SHMAT 21
#define SHMDT 22
#define SHMGET 23
#define SHMCTL 24
/* Used by the DIPC package, try and avoid reusing it */
#define DIPC 25
#define IPCCALL(version,op) ((version)<<16 | (op))
/* dummy */
#endif
......@@ -19,8 +19,8 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
if(cpu_tlbstate[cpu].state == TLBSTATE_OK)
cpu_tlbstate[cpu].state = TLBSTATE_LAZY;
if (read_pda(mmu_state) == TLBSTATE_OK)
write_pda(mmu_state, TLBSTATE_LAZY);
}
#else
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
......@@ -35,8 +35,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* stop flush ipis for the previous mm */
clear_bit(cpu, &prev->cpu_vm_mask);
#ifdef CONFIG_SMP
cpu_tlbstate[cpu].state = TLBSTATE_OK;
cpu_tlbstate[cpu].active_mm = next;
write_pda(mmu_state, TLBSTATE_OK);
write_pda(active_mm, next);
#endif
set_bit(cpu, &next->cpu_vm_mask);
/* Re-load page tables */
......@@ -48,8 +48,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
#ifdef CONFIG_SMP
else {
cpu_tlbstate[cpu].state = TLBSTATE_OK;
if(cpu_tlbstate[cpu].active_mm != next)
write_pda(mmu_state, TLBSTATE_OK);
if (read_pda(active_mm) != next)
out_of_line_bug();
if(!test_and_set_bit(cpu, &next->cpu_vm_mask)) {
/* We were in lazy tlb mode and leave_mm disabled
......
......@@ -95,6 +95,7 @@
#define MSR_IA32_PERFCTR0 0xc1
#define MSR_IA32_PERFCTR1 0xc2
#define MSR_MTRRcap 0x0fe
#define MSR_IA32_BBL_CR_CTL 0x119
#define MSR_IA32_MCG_CAP 0x179
......@@ -110,6 +111,19 @@
#define MSR_IA32_LASTINTFROMIP 0x1dd
#define MSR_IA32_LASTINTTOIP 0x1de
#define MSR_MTRRfix64K_00000 0x250
#define MSR_MTRRfix16K_80000 0x258
#define MSR_MTRRfix16K_A0000 0x259
#define MSR_MTRRfix4K_C0000 0x268
#define MSR_MTRRfix4K_C8000 0x269
#define MSR_MTRRfix4K_D0000 0x26a
#define MSR_MTRRfix4K_D8000 0x26b
#define MSR_MTRRfix4K_E0000 0x26c
#define MSR_MTRRfix4K_E8000 0x26d
#define MSR_MTRRfix4K_F0000 0x26e
#define MSR_MTRRfix4K_F8000 0x26f
#define MSR_MTRRdefType 0x2ff
#define MSR_IA32_MC0_CTL 0x400
#define MSR_IA32_MC0_STATUS 0x401
#define MSR_IA32_MC0_ADDR 0x402
......@@ -171,11 +185,4 @@
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
#define MSR_IA32_THERM_CONTROL 0x19a
#define MSR_IA32_THERM_INTERRUPT 0x19b
#define MSR_IA32_THERM_STATUS 0x19c
#define MSR_IA32_MISC_ENABLE 0x1a0
#endif
......@@ -30,16 +30,16 @@
struct mtrr_sentry
{
unsigned long base; /* Base address */
unsigned long size; /* Size of region */
__u64 base; /* Base address */
__u32 size; /* Size of region */
unsigned int type; /* Type of region */
};
struct mtrr_gentry
{
__u64 base; /* Base address */
__u32 size; /* Size of region */
unsigned int regnum; /* Register number */
unsigned long base; /* Base address */
unsigned long size; /* Size of region */
unsigned int type; /* Type of region */
};
......@@ -81,46 +81,38 @@ static char *mtrr_strings[MTRR_NUM_TYPES] =
#ifdef __KERNEL__
/* The following functions are for use by other drivers */
# ifdef CONFIG_MTRR
extern int mtrr_add (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_add_page (unsigned long base, unsigned long size,
unsigned int type, char increment);
extern int mtrr_del (int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page (int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
# else
static __inline__ int mtrr_add (unsigned long base, unsigned long size,
#ifdef CONFIG_MTRR
extern int mtrr_add (__u64 base, __u32 size, unsigned int type, char increment);
extern int mtrr_add_page (__u64 base, __u32 size, unsigned int type, char increment);
extern int mtrr_del (int reg, __u64 base, __u32 size);
extern int mtrr_del_page (int reg, __u64 base, __u32 size);
#else
static __inline__ int mtrr_add (__u64 base, __u32 size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_add_page (unsigned long base, unsigned long size,
static __inline__ int mtrr_add_page (__u64 base, __u32 size,
unsigned int type, char increment)
{
return -ENODEV;
}
static __inline__ int mtrr_del (int reg, unsigned long base,
unsigned long size)
static __inline__ int mtrr_del (int reg, __u64 base, __u32 size)
{
return -ENODEV;
}
static __inline__ int mtrr_del_page (int reg, unsigned long base,
unsigned long size)
static __inline__ int mtrr_del_page (int reg, __u64 base, __u32 size)
{
return -ENODEV;
}
static __inline__ void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi) {;}
# endif
#endif
/* The following functions are for initialisation: don't use them! */
extern int mtrr_init (void);
# if defined(CONFIG_SMP) && defined(CONFIG_MTRR)
#if defined(CONFIG_SMP) && defined(CONFIG_MTRR)
extern void mtrr_init_boot_cpu (void);
extern void mtrr_init_secondary_cpu (void);
# endif
#endif
#endif
......
......@@ -22,6 +22,8 @@ struct x8664_pda {
unsigned int __local_bh_count;
unsigned int __nmi_count; /* arch dependent */
struct task_struct * __ksoftirqd_task; /* waitqueue is too large */
struct mm_struct *active_mm;
int mmu_state;
} ____cacheline_aligned;
#define PDA_STACKOFFSET (5*8)
......
......@@ -45,21 +45,12 @@ struct cpuinfo_x86 {
__u8 x86_vendor; /* CPU vendor */
__u8 x86_model;
__u8 x86_mask;
/* We know that wp_works_ok = 1, hlt_works_ok = 1, hard_math = 1,
etc... */
char wp_works_ok; /* It doesn't on 386's */
char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
char hard_math;
char rfu;
int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
__u32 x86_capability[NCAPINTS];
char x86_vendor_id[16];
char x86_model_id[64];
int x86_cache_size; /* in KB - valid for CPUS which support this
call */
int fdiv_bug;
int f00f_bug;
int coma_bug;
unsigned long loops_per_jiffy;
} ____cacheline_aligned;
......@@ -323,7 +314,7 @@ struct thread_struct {
/* IO permissions. the bitmap could be moved into the GDT, that would make
switch faster for a limited number of ioperm using tasks. -AK */
int ioperm;
u32 io_bitmap[IO_BITMAP_SIZE+1];
u32 *io_bitmap_ptr;
};
#define INIT_THREAD { \
......
......@@ -15,7 +15,7 @@ extern int printk(const char * fmt, ...)
typedef struct {
volatile unsigned int lock;
#ifdef CONFIG_DEBUG_SPINLOCK
#if SPINLOCK_DEBUG
unsigned magic;
#endif
} spinlock_t;
......@@ -39,7 +39,7 @@ typedef struct {
* We make no fairness assumptions. They have a cost.
*/
#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
#define spin_is_locked(x) (*(volatile signed char *)(&(x)->lock) <= 0)
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define spin_lock_string \
......@@ -62,7 +62,7 @@ typedef struct {
static inline int _raw_spin_trylock(spinlock_t *lock)
{
char oldval;
signed char oldval;
__asm__ __volatile__(
"xchgb %b0,%1"
:"=q" (oldval), "=m" (lock->lock)
......
......@@ -40,18 +40,9 @@ extern void *__memcpy(void *to, const void *from, size_t len);
__ret = __builtin_memcpy((dst),(src),__len); \
__ret; })
#if 0
#define __HAVE_ARCH_MEMSET
extern void *__memset(void *mem, int val, size_t len);
#define memset(dst,val,len) \
({ size_t __len = (len); \
void *__ret; \
if (__builtin_constant_p(len) && __len >= 64) \
__ret = __memset((dst),(val),__len); \
else \
__ret = __builtin_memset((dst),(val),__len); \
__ret; })
#endif
#define memset __builtin_memset
#define __HAVE_ARCH_MEMMOVE
void * memmove(void * dest,const void *src,size_t count);
......
#ifndef SUSPEND_H
#define SUSPEND_H 1
/* dummy for now */
#endif
......@@ -13,7 +13,10 @@
#define LOCK_PREFIX ""
#endif
#define prepare_to_switch() do {} while(0)
#define prepare_arch_schedule(prev) do { } while(0)
#define finish_arch_schedule(prev) do { } while(0)
#define prepare_arch_switch(rq) do { } while(0)
#define finish_arch_switch(rq) spin_unlock_irq(&(rq)->lock)
#define __STR(x) #x
#define STR(x) __STR(x)
......@@ -41,7 +44,7 @@
__POP(rax) __POP(r15) __POP(r14) __POP(r13) __POP(r12) __POP(r11) __POP(r10) \
__POP(r9) __POP(r8)
#define switch_to(prev,next) \
#define switch_to(prev,next,last) \
asm volatile(SAVE_CONTEXT \
"movq %%rsp,%[prevrsp]\n\t" \
"movq %[nextrsp],%%rsp\n\t" \
......
......@@ -48,6 +48,4 @@ static inline cycles_t get_cycles (void)
extern unsigned int cpu_khz;
#define ARCH_HAS_JIFFIES_64
#endif
......@@ -106,15 +106,6 @@ static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long st
#define TLBSTATE_OK 1
#define TLBSTATE_LAZY 2
struct tlb_state
{
struct mm_struct *active_mm;
int state;
char __cacheline_padding[24];
};
extern struct tlb_state cpu_tlbstate[NR_CPUS];
#endif
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment