Commit fbf60bdb authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.112

parent e19d7734
......@@ -13,12 +13,12 @@
#include <linux/sched.h>
#include <linux/tty.h>
#include <linux/delay.h>
#include <linux/smp_lock.h>
#include <asm/gentrap.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <asm/sysinfo.h>
#include <asm/smp_lock.h>
static void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
......
/*
* bios32.c - Low-Level PCI Access
*
* $Id: bios32.c,v 1.40 1998/07/16 21:16:03 mj Exp $
* $Id: bios32.c,v 1.42 1998/07/26 09:33:07 mj Exp $
*
* Copyright 1993, 1994 Drew Eckhardt
* Visionary Computing
......@@ -86,7 +86,6 @@
#include "irq.h"
#undef DEBUG
#define DEBUG
#ifdef DEBUG
#define DBG(x...) printk(x)
......@@ -914,29 +913,31 @@ __initfunc(void pcibios_fixup_ghosts(struct pci_bus *b))
* Although several sources claim that the host bridges should have
* header type 1 and be assigned a bus number as for PCI2PCI bridges,
* the reality doesn't pass this test and the bus number is usually
* hard-wired to 1.
* set by BIOS to the first free value.
*/
__initfunc(void pcibios_fixup_peer_bridges(void))
{
struct pci_dev *dev;
int cnt = 0;
for(dev=pci_root.devices; dev; dev=dev->sibling)
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
DBG("PCI: Host bridge at %02x\n", dev->devfn);
if (cnt) {
struct pci_bus *b = kmalloc(sizeof(struct pci_bus), GFP_KERNEL);
struct pci_bus *b = &pci_root;
int i;
do {
int n = b->subordinate+1;
u16 l;
for(i=0; i<256; i += 8)
if (!pcibios_read_config_word(n, i, PCI_VENDOR_ID, &l) &&
l != 0x0000 && l != 0xffff) {
DBG("Found device at %02x:%02x\n", n, i);
printk("PCI: Discovered primary peer bus %02x\n", n);
b = kmalloc(sizeof(*b), GFP_KERNEL);
memset(b, 0, sizeof(*b));
b->parent = &pci_root;
b->next = pci_root.next;
pci_root.next = b;
b->self = dev;
b->number = b->secondary = cnt;
b->number = b->secondary = n;
b->subordinate = 0xff;
b->subordinate = pci_scan_bus(b);
break;
}
cnt++;
}
} while (i < 256);
}
/*
......
......@@ -20,7 +20,6 @@
extern void dump_thread(struct pt_regs *, struct user *);
extern int dump_fpu(elf_fpregset_t *);
extern void __lock_kernel(void);
#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
extern struct drive_info_struct drive_info;
......@@ -68,11 +67,8 @@ EXPORT_SYMBOL(strlen_user);
#ifdef __SMP__
EXPORT_SYMBOL(cpu_data);
EXPORT_SYMBOL_NOVERS(kernel_flag);
EXPORT_SYMBOL_NOVERS(active_kernel_processor);
EXPORT_SYMBOL(kernel_flag);
EXPORT_SYMBOL(smp_invalidate_needed);
EXPORT_SYMBOL_NOVERS(__lock_kernel);
EXPORT_SYMBOL(lk_lockmsg);
EXPORT_SYMBOL(__cpu_logical_map);
EXPORT_SYMBOL(smp_num_cpus);
......
......@@ -138,6 +138,7 @@
#define MTRR_NEED_STRINGS
#include <asm/mtrr.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <asm/uaccess.h>
#include <asm/io.h>
......@@ -146,9 +147,7 @@
#include <asm/pgtable.h>
#include <asm/segment.h>
#include <asm/bitops.h>
#include <asm/smp_lock.h>
#include <asm/atomic.h>
#include <linux/smp.h>
#define MTRR_VERSION "1.22 (19980611)"
......
......@@ -100,8 +100,15 @@ extern void update_one_process( struct task_struct *p,
*/
/* Kernel spinlock */
spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
/*
* Why isn't this somewhere standard ??
*
* Maybe because this procedure is horribly buggy, and does
* not deserve to live. Think about signedness issues for five
* seconds to see why. - Linus
*/
extern __inline int max(int a,int b)
......@@ -135,8 +142,6 @@ unsigned long apic_retval; /* Just debugging the assembler.. */
static volatile unsigned char smp_cpu_in_msg[NR_CPUS]; /* True if this processor is sending an IPI */
volatile unsigned long kernel_flag=0; /* Kernel spinlock */
volatile unsigned char active_kernel_processor = NO_PROC_ID; /* Processor holding kernel spinlock */
volatile unsigned long kernel_counter=0; /* Number of times the processor holds the lock */
volatile unsigned long syscall_count=0; /* Number of times the processor holds the syscall lock */
......@@ -990,7 +995,6 @@ __initfunc(void smp_boot_cpus(void))
cpu_present_map |= (1 << hard_smp_processor_id());
cpu_number_map[boot_cpu_id] = 0;
active_kernel_processor=boot_cpu_id;
/*
* If we don't conform to the Intel MPS standard, get out
......@@ -1364,12 +1368,6 @@ void smp_flush_tlb(void)
{
unsigned long flags;
#if 0
if(smp_activated && smp_processor_id()!=active_kernel_processor) {
printk("CPU #%d:Attempted flush tlb IPI when not AKP(=%d)\n",smp_processor_id(),active_kernel_processor);
*(char *)0=0;
}
#endif
/* printk("SMI-");*/
/*
......@@ -1412,7 +1410,7 @@ void smp_send_reschedule(int cpu)
__save_flags(flags);
__cli();
smp_message_pass(cpu, MSG_RESCHEDULE, 0L, 2);
smp_message_pass(cpu, MSG_RESCHEDULE, 0L, 0);
__restore_flags(flags);
}
......
......@@ -6,6 +6,6 @@
$(CC) -D__ASSEMBLY__ $(AFLAGS) -traditional -c $< -o $*.o
L_TARGET = lib.a
L_OBJS = checksum.o semaphore.o locks.o delay.o usercopy.o getuser.o putuser.o
L_OBJS = checksum.o semaphore.o delay.o usercopy.o getuser.o putuser.o
include $(TOPDIR)/Rules.make
/* locks.S: Wheee... I'm coding Intel assembly...
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/linkage.h>
/* Caller does atomic increment on current->lock_depth,
* if it was found to originally be zero then we get here,
* %eax contains caller's PC and %edx holds this CPU ID.
*/
ENTRY(__lock_kernel)
1:
lock
btsl $0, SYMBOL_NAME(kernel_flag)
jnc 3f
sti
2:
btl %edx, SYMBOL_NAME(smp_invalidate_needed)
jnc 0f
lock
btrl %edx, SYMBOL_NAME(smp_invalidate_needed)
jnc 0f
pushl %eax
movl %cr3, %eax
movl %eax, %cr3
popl %eax
0:
btl $0, SYMBOL_NAME(kernel_flag)
jc 2b
cli
jmp 1b
3:
movb %dl, SYMBOL_NAME(active_kernel_processor)
ret
......@@ -2694,7 +2694,7 @@ void putconsxy(int currcons, char *p)
u16 vcs_scr_readw(int currcons, u16 *org)
{
if (org == pos && softcursor_original != -1)
if (org == (u16 *)pos && softcursor_original != -1)
return softcursor_original;
return scr_readw(org);
}
......@@ -2702,7 +2702,7 @@ u16 vcs_scr_readw(int currcons, u16 *org)
void vcs_scr_writew(int currcons, u16 val, u16 *org)
{
scr_writew(val, org);
if (org == pos) {
if (org == (u16 *)pos) {
softcursor_original = -1;
add_softcursor(currcons);
}
......
......@@ -500,7 +500,7 @@ register_framebuffer(struct fb_info *fb_info)
if (first) {
first = 0;
take_over_console(&fb_con, 12, MAX_NR_CONSOLES-1, 1);
take_over_console(&fb_con, 0, MAX_NR_CONSOLES-1, 1);
}
return 0;
......
......@@ -37,7 +37,6 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/malloc.h>
/* #include <asm/smp_lock.h> */
/* kernel_thread */
#define __KERNEL_SYSCALLS__
......
......@@ -20,8 +20,9 @@
#include <linux/sysrq.h>
#include <linux/kbd_kern.h>
#include <linux/quotaops.h>
#include <linux/smp_lock.h>
#include <asm/ptrace.h>
#include <asm/smp_lock.h>
#ifdef CONFIG_APM
#include <linux/apm_bios.h>
......
This diff is collapsed.
/*
* $Id: oldproc.c,v 1.13 1998/05/07 20:49:50 davem Exp $
* $Id: oldproc.c,v 1.16 1998/07/19 17:50:18 davem Exp $
*
* Backward-compatible procfs interface for PCI.
*
......@@ -255,6 +255,7 @@ struct pci_dev_info dev_info[] = {
DEVICE( DATABOOK, DATABOOK_87144, "DB87144"),
DEVICE( PLX, PLX_9080, "PCI9080 I2O"),
DEVICE( MADGE, MADGE_MK2, "Smart 16/4 BM Mk2 Ringnode"),
DEVICE( MADGE, MADGE_C155S, "Collage 155 Server"),
DEVICE( 3COM, 3COM_3C339, "3C339 TokenRing"),
DEVICE( 3COM, 3COM_3C590, "3C590 10bT"),
DEVICE( 3COM, 3COM_3C595TX, "3C595 100bTX"),
......@@ -433,6 +434,7 @@ struct pci_dev_info dev_info[] = {
DEVICE( SATSAGEM, SATSAGEM_TELSATTURBO,"Telsat Turbo DVB"),
DEVICE( HUGHES, HUGHES_DIRECPC, "DirecPC"),
DEVICE( ENSONIQ, ENSONIQ_AUDIOPCI,"AudioPCI"),
DEVICE( ALTEON, ALTEON_ACENIC,"AceNIC"),
DEVICE( PICTUREL, PICTUREL_PCIVST,"PCIVST"),
DEVICE( NVIDIA_SGS, NVIDIA_SGS_RIVA128, "Riva 128"),
DEVICE( CBOARDS, CBOARDS_DAS1602_16,"DAS1602/16"),
......@@ -746,6 +748,7 @@ static const char *pci_strvendor(unsigned int vendor)
case PCI_VENDOR_ID_COMPEX: return "Compex";
case PCI_VENDOR_ID_RP: return "Comtrol";
case PCI_VENDOR_ID_CYCLADES: return "Cyclades";
case PCI_VENDOR_ID_ESSENTIAL: return "Essential Communications";
case PCI_VENDOR_ID_O2: return "O2 Micro";
case PCI_VENDOR_ID_3DFX: return "3Dfx";
case PCI_VENDOR_ID_SIGMADES: return "Sigma Designs";
......@@ -756,6 +759,7 @@ static const char *pci_strvendor(unsigned int vendor)
case PCI_VENDOR_ID_SATSAGEM: return "SatSagem";
case PCI_VENDOR_ID_HUGHES: return "Hughes";
case PCI_VENDOR_ID_ENSONIQ: return "Ensoniq";
case PCI_VENDOR_ID_ALTEON: return "Alteon";
case PCI_VENDOR_ID_PICTUREL: return "Picture Elements";
case PCI_VENDOR_ID_NVIDIA_SGS: return "NVidia/SGS Thomson";
case PCI_VENDOR_ID_CBOARDS: return "ComputerBoards";
......
......@@ -274,6 +274,7 @@ static struct dev_info device_list[] =
{"NRC","MBR-7","*", BLIST_FORCELUN | BLIST_SINGLELUN},
{"NRC","MBR-7.4","*", BLIST_FORCELUN | BLIST_SINGLELUN},
{"NAKAMICH","MJ-4.8S","*", BLIST_FORCELUN | BLIST_SINGLELUN},
{"NAKAMICH","MJ-5.16S","*", BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER","CD-ROM DRM-600","*", BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER","CD-ROM DRM-602X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
{"PIONEER","CD-ROM DRM-604X","*", BLIST_FORCELUN | BLIST_SINGLELUN},
......
......@@ -21,7 +21,7 @@
#include <linux/blk.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/smp_lock.h>
#include <linux/smp_lock.h>
#define __KERNEL_SYSCALLS__
......
......@@ -23,7 +23,7 @@
#include <linux/blk.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/smp_lock.h>
#include <linux/smp_lock.h>
#define __KERNEL_SYSCALLS__
......
......@@ -37,7 +37,6 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <asm/smp_lock.h>
#include <asm/system.h>
#include <asm/io.h>
......
......@@ -50,8 +50,8 @@
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/major.h>
#include <linux/smp_lock.h>
#include <asm/smp_lock.h>
#include <asm/shmiq.h>
#include <asm/mman.h>
#include <asm/uaccess.h>
......
......@@ -31,8 +31,8 @@
#include <linux/dcache.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <asm/smp_lock.h>
#include <asm/usioctl.h>
#include <asm/mman.h>
#include <asm/uaccess.h>
......
......@@ -674,6 +674,7 @@ static int do_open(const char * filename, int flags, int mode, int fd)
if (f->f_mode & FMODE_WRITE)
put_write_access(inode);
cleanup_dentry:
f->f_dentry = NULL;
dput(dentry);
cleanup_file:
put_filp(f);
......
......@@ -43,6 +43,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
......
#ifndef __ALPHA_SMPLOCK_H
#define __ALPHA_SMPLOCK_H
#ifndef __SMP__
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while (0)
#else
#include <asm/system.h>
#include <asm/current.h>
#include <asm/bitops.h>
#include <asm/hardirq.h>
#define kernel_lock_held() \
(klock_info.kernel_flag && (klock_info.akp == smp_processor_id()))
/* Release global kernel lock and global interrupt lock */
#define release_kernel_lock(task, cpu, depth) \
do { \
if ((depth = (task)->lock_depth) != 0) { \
__cli(); \
(task)->lock_depth = 0; \
klock_info.akp = NO_PROC_ID; \
klock_info.kernel_flag = 0; \
mb(); \
} \
release_irqlock(cpu); \
__sti(); \
} while (0)
#if 1
#define DEBUG_KERNEL_LOCK
#else
#undef DEBUG_KERNEL_LOCK
#endif
#ifdef DEBUG_KERNEL_LOCK
extern void ___lock_kernel(klock_info_t *klip, int cpu, long ipl);
#else /* DEBUG_KERNEL_LOCK */
static inline void ___lock_kernel(klock_info_t *klip, int cpu, long ipl)
{
long regx;
__asm__ __volatile__(
"1: ldl_l %1,%0;"
" blbs %1,6f;"
" or %1,1,%1;"
" stl_c %1,%0;"
" beq %1,6f;"
"4: mb\n"
".section .text2,\"ax\"\n"
"6: mov %4,$16;"
" call_pal %3;"
"7: ldl %1,%0;"
" blbs %1,7b;"
" bis $31,7,$16;"
" call_pal %3;"
" br 1b\n"
".previous"
: "=m,=m" (__dummy_lock(klip)), "=&r,=&r" (regx)
: "0,0" (__dummy_lock(klip)), "i,i" (PAL_swpipl), "i,r" (ipl)
: "$0", "$1", "$16", "$22", "$23", "$24", "$25", "memory"
);
}
#endif /* DEBUG_KERNEL_LOCK */
#define reacquire_kernel_lock(task, cpu, depth) \
do { \
if (depth) { \
long ipl; \
klock_info_t *klip = &klock_info; \
__save_and_cli(ipl); \
___lock_kernel(klip, cpu, ipl); \
klip->akp = cpu; \
(task)->lock_depth = depth; \
__restore_flags(ipl); \
} \
} while (0)
/* The following acquire and release the master kernel global lock,
* the idea is that the usage of this mechanmism becomes less and less
* as time goes on, to the point where they are no longer needed at all
* and can thus disappear.
*/
#define lock_kernel() \
if (current->lock_depth > 0) { \
++current->lock_depth; \
} else { \
long ipl; \
int cpu = smp_processor_id(); \
klock_info_t *klip = &klock_info; \
__save_and_cli(ipl); \
___lock_kernel(klip, cpu, ipl); \
klip->akp = cpu; \
current->lock_depth = 1; \
__restore_flags(ipl); \
}
/* Release kernel global lock. */
#define unlock_kernel() \
if (current->lock_depth > 1) { \
--current->lock_depth; \
} else { \
long ipl; \
__save_and_cli(ipl); \
klock_info.akp = NO_PROC_ID; \
klock_info.kernel_flag = KLOCK_CLEAR; \
mb(); \
current->lock_depth = 0; \
__restore_flags(ipl); \
}
#endif /* __SMP__ */
#endif /* __ALPHA_SMPLOCK_H */
......@@ -43,6 +43,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
......
#ifndef __I386_SMPLOCK_H
#define __I386_SMPLOCK_H
#define __STR(x) #x
#ifndef __SMP__
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#error SMP not supported
#endif /* __SMP__ */
#endif /* __I386_SMPLOCK_H */
......@@ -43,6 +43,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
......
......@@ -7,6 +7,7 @@
#include <asm/i82489.h>
#include <asm/bitops.h>
#include <asm/fixmap.h>
#include <linux/tasks.h>
#include <linux/ptrace.h>
......@@ -161,9 +162,8 @@ extern unsigned long cpu_present_map;
extern volatile int cpu_number_map[NR_CPUS];
extern volatile unsigned long smp_invalidate_needed;
extern void smp_flush_tlb(void);
extern volatile unsigned long kernel_flag, kernel_counter;
extern volatile unsigned long cpu_callin_map[NR_CPUS];
extern volatile unsigned char active_kernel_processor;
extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
extern void smp_send_reschedule(int cpu);
extern unsigned long ipi_count;
......
#ifndef __I386_SMPLOCK_H
#define __I386_SMPLOCK_H
#define __STR(x) #x
#ifndef __SMP__
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#include <asm/hardirq.h>
/* Release global kernel lock and global interrupt lock */
#define release_kernel_lock(task, cpu, depth) \
do { \
if ((depth = (task)->lock_depth) != 0) { \
__cli(); \
(task)->lock_depth = 0; \
active_kernel_processor = NO_PROC_ID; \
clear_bit(0,&kernel_flag); \
} \
release_irqlock(cpu); \
__sti(); \
} while (0)
/* Re-acquire the kernel lock */
#define reacquire_kernel_lock(task, cpu, depth) \
do { if (depth) __asm__ __volatile__( \
"cli\n\t" \
"call __lock_kernel\n\t" \
"movl %2,%0\n\t" \
"sti" \
: "=m" (task->lock_depth) \
: "d" (cpu), "c" (depth)); \
} while (0)
extern const char lk_lockmsg[];
/* Locking the kernel */
extern __inline__ void lock_kernel(void)
{
int cpu = smp_processor_id();
if (local_irq_count[cpu]) {
__label__ l1;
l1: printk(lk_lockmsg, &&l1);
}
if (cpu == global_irq_holder) {
__label__ l2;
l2: printk("Ugh at %p\n", &&l2);
sti();
}
__asm__ __volatile__("
pushfl
cli
cmpl $0, %0
jne 0f
call __lock_kernel
0: incl %0
popfl
" :
: "m" (current->lock_depth), "d" (cpu)
: "memory");
}
extern __inline__ void unlock_kernel(void)
{
__asm__ __volatile__("
pushfl
cli
decl %0
jnz 1f
movb %1, " __STR(active_kernel_processor) "
lock
btrl $0, " __STR(kernel_flag) "
1:
popfl
" : /* no outputs */
: "m" (current->lock_depth), "i" (NO_PROC_ID)
: "ax", "memory");
}
#endif /* __SMP__ */
#endif /* __I386_SMPLOCK_H */
......@@ -43,6 +43,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
......
#ifndef __M68K_SMPLOCK_H
#define __M68K_SMPLOCK_H
/*
* We don't do SMP so this is again one of these silly dummy files
* to keep the kernel source looking nice ;-(.
*/
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#endif
......@@ -43,6 +43,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996 Ralf Baechle
*
* Linux/MIPS SMP support. Just a dummy to make building possible.
*/
#ifndef __ASM_MIPS_SMPLOCK_H
#define __ASM_MIPS_SMPLOCK_H
#ifndef __SMP__
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#error "We do not support SMP on MIPS yet"
#endif /* __SMP__ */
#endif /* __ASM_MIPS_SMPLOCK_H */
......@@ -43,6 +43,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
......
#ifndef __PPC_SMPLOCK_H
#define __PPC_SMPLOCK_H
#ifndef __SMP__
#define lock_kernel() do { } while (0)
#define unlock_kernel() do { } while (0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else /* __SMP__ */
/* Release global kernel lock and global interrupt lock */
#define release_kernel_lock(task, cpu, depth) \
do { \
if((depth = (task)->lock_depth) != 0) { \
__cli(); \
(task)->lock_depth = 0; \
klock_info.akp = NO_PROC_ID; \
klock_info.kernel_flag = 0; \
} \
release_irqlock(cpu); \
__sti(); \
} while(0)
extern void reacquire_kernel_lock(struct task_struct *, int,int);
/* The following acquire and release the master kernel global lock,
* the idea is that the usage of this mechanmism becomes less and less
* as time goes on, to the point where they are no longer needed at all
* and can thus disappear.
*/
extern void __lock_kernel(struct task_struct *);
extern void __unlock_kernel(struct task_struct *);
extern __inline__ void lock_kernel(void)
{
__lock_kernel(current);
}
/* Release kernel global lock. */
extern __inline__ void unlock_kernel(void)
{
__unlock_kernel(current);
}
#endif /* __SMP__ */
#endif /* __PPC_SMPLOCK_H */
......@@ -45,6 +45,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
......
/* smp_lock.h: 32-bit Sparc SMP master lock primitives.
*
* Copyright (C) 1996,1997 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef __SPARC_SMPLOCK_H
#define __SPARC_SMPLOCK_H
#include <asm/smp.h>
#include <asm/ptrace.h>
#ifndef __SMP__
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#include <asm/hardirq.h>
/* Release global kernel lock and global interrupt lock */
#define release_kernel_lock(task, cpu, depth) \
do { \
if((depth = (task)->lock_depth) != 0) { \
__cli(); \
(task)->lock_depth = 0; \
klock_info.akp = NO_PROC_ID; \
klock_info.kernel_flag = 0; \
} \
release_irqlock(cpu); \
__sti(); \
} while(0)
/* Do not fuck with this without consulting arch/sparc/lib/locks.S first! */
#define reacquire_kernel_lock(task, cpu, depth) \
do { \
if(depth) { \
register struct klock_info *klip asm("g1"); \
register int proc asm("g5"); \
klip = &klock_info; \
proc = cpu; \
__asm__ __volatile__("mov %%o7, %%g4\n\t" \
"call ___lock_reacquire_kernel\n\t" \
" mov %2, %%g2" \
: /* No outputs. */ \
: "r" (klip), "r" (proc), "r" (depth) \
: "g2", "g3", "g4", "g7", "memory", "cc"); \
} \
} while(0)
/* The following acquire and release the master kernel global lock,
* the idea is that the usage of this mechanmism becomes less and less
* as time goes on, to the point where they are no longer needed at all
* and can thus disappear.
*/
/* Do not fuck with this without consulting arch/sparc/lib/locks.S first! */
extern __inline__ void lock_kernel(void)
{
register struct klock_info *klip asm("g1");
register int proc asm("g5");
klip = &klock_info;
proc = smp_processor_id();
__asm__ __volatile__("
mov %%o7, %%g4
call ___lock_kernel
ld [%%g6 + %0], %%g2
" : : "i" (AOFF_task_lock_depth), "r" (klip), "r" (proc)
: "g2", "g3", "g4", "g7", "memory", "cc");
}
/* Release kernel global lock. */
extern __inline__ void unlock_kernel(void)
{
register struct klock_info *klip asm("g1");
klip = &klock_info;
__asm__ __volatile__("
mov %%o7, %%g4
call ___unlock_kernel
ld [%%g6 + %0], %%g2
" : : "i" (AOFF_task_lock_depth), "r" (klip)
: "g2", "g3", "g4", "memory", "cc");
}
#endif /* !(__SPARC_SMPLOCK_H) */
#endif /* (__SMP__) */
......@@ -51,6 +51,7 @@ typedef struct siginfo {
/* SIGCHLD */
struct {
pid_t _pid; /* which child */
uid_t _uid; /* sender's uid */
int _status; /* exit code */
clock_t _utime;
clock_t _stime;
......
/* smp_lock.h: Locking and unlocking the kernel on the 64-bit Sparc.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*/
#ifndef __SPARC64_SMPLOCK_H
#define __SPARC64_SMPLOCK_H
#include <asm/smp.h>
#include <asm/bitops.h>
#include <asm/pgtable.h>
#ifndef __SMP__
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#include <asm/hardirq.h>
/* Release global kernel lock and global interrupt lock */
#define release_kernel_lock(task, cpu, depth) \
do { \
if((depth = (task)->lock_depth) != 0) { \
__cli(); \
(task)->lock_depth = 0; \
klock_info.akp = NO_PROC_ID; \
membar("#LoadStore | #StoreStore"); \
klock_info.kernel_flag = 0; \
} \
release_irqlock(cpu); \
__sti(); \
} while(0)
/* Do not fuck with this without consulting arch/sparc64/lib/locks.S first! */
#define reacquire_kernel_lock(task, cpu, depth) \
do { \
if(depth) { \
register struct klock_info *klip asm("g1"); \
klip = &klock_info; \
__asm__ __volatile__("mov %%o7, %%g5\n\t" \
"call ___lock_reacquire_kernel\n\t" \
" mov %1, %%g2" \
: /* No outputs. */ \
: "r" (klip), "r" (depth) \
: "g2", "g3", "g5", "memory", "cc"); \
} \
} while(0)
/* The following acquire and release the master kernel global lock,
* the idea is that the usage of this mechanmism becomes less and less
* as time goes on, to the point where they are no longer needed at all
* and can thus disappear.
*/
/* Do not fuck with this without consulting arch/sparc64/lib/locks.S first! */
extern __inline__ void lock_kernel(void)
{
register struct klock_info *klip asm("g1");
klip = &klock_info;
__asm__ __volatile__("
mov %%o7, %%g5
call ___lock_kernel
lduw [%%g6 + %0], %%g2
" : : "i" (AOFF_task_lock_depth), "r" (klip)
: "g2", "g3", "g5", "memory", "cc");
}
/* Release kernel global lock. */
extern __inline__ void unlock_kernel(void)
{
register struct klock_info *klip asm("g1");
klip = &klock_info;
__asm__ __volatile__("
mov %%o7, %%g5
call ___unlock_kernel
lduw [%%g6 + %0], %%g2
" : : "i" (AOFF_task_lock_depth), "r" (klip)
: "g2", "g3", "g5", "memory", "cc");
}
#endif /* (__SMP__) */
#endif /* !(__SPARC64_SMPLOCK_H) */
......@@ -9,7 +9,15 @@
*
* Usage:
* For functions:
* you can surround the whole function declaration
*
* You should add __init immediately before the function name, like:
*
* static void __init initme(int x, int y)
* {
* extern int z; z = x * y;
* }
*
* Depricated: you can surround the whole function declaration
* just before function body into __initfunc() macro, like:
*
* __initfunc (static void initme(int x, int y))
......@@ -17,17 +25,20 @@
* extern int z; z = x * y;
* }
*
* if the function has a prototype somewhere, you can also add
* If the function has a prototype somewhere, you can also add
* __init between closing brace of the prototype and semicolon:
*
* extern int initialize_foobar_device(int, int, int) __init;
*
* For initialized data:
* you should insert __initdata between the variable name and equal
* You should insert __initdata between the variable name and equal
* sign followed by value, e.g.:
*
* static int init_variable __initdata = 0;
* static char linux_logo[] __initdata = { 0x32, 0x36, ... };
*
* For initialized data not at file scope, i.e. within a function,
* you should use __initlocaldata instead, due to a bug in GCC 2.7.
*/
/*
......@@ -48,4 +59,10 @@
#define __INITDATA
#endif
#if __GNUC__ >= 2 && __GNUC_MINOR__ >= 8
#define __initlocaldata __initdata
#else
#define __initlocaldata
#endif
#endif
......@@ -7,6 +7,7 @@
*/
#ifdef __SMP__
#include <asm/smp.h>
/*
......
#ifndef __LINUX_SMPLOCK_H
#define __LINUX_SMPLOCK_H
#include <asm/smp_lock.h>
#ifndef __SMP__
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu) do { } while(0)
#define reacquire_kernel_lock(task) do { } while(0)
#else
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
struct task_struct *tsk = current;
int lock_depth;
lock_depth = tsk->lock_depth;
tsk->lock_depth = lock_depth+1;
if (lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
struct task_struct *tsk = current;
int lock_depth;
lock_depth = tsk->lock_depth-1;
tsk->lock_depth = lock_depth;
if (!lock_depth)
spin_unlock(&kernel_flag);
}
#endif /* __SMP__ */
#endif
......@@ -46,12 +46,10 @@ static void release(struct task_struct * p)
nr_tasks--;
add_free_taskslot(p->tarray_ptr);
{
unsigned long flags;
write_lock_irqsave(&tasklist_lock, flags);
write_lock_irq(&tasklist_lock);
unhash_pid(p);
REMOVE_LINKS(p);
write_unlock_irqrestore(&tasklist_lock, flags);
write_unlock_irq(&tasklist_lock);
}
release_thread(p);
current->cmin_flt += p->min_flt + p->cmin_flt;
......@@ -456,12 +454,11 @@ asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct
__put_user(p->exit_code, stat_addr);
retval = p->pid;
if (p->p_opptr != p->p_pptr) {
/* Note this grabs tasklist_lock
* as a writer... (twice!)
*/
write_lock_irq(&tasklist_lock);
REMOVE_LINKS(p);
p->p_pptr = p->p_opptr;
SET_LINKS(p);
write_unlock_irq(&tasklist_lock);
notify_parent(p, SIGCHLD);
} else
release(p);
......
......@@ -14,7 +14,8 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/unistd.h>
#include <asm/smp_lock.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
/*
......
......@@ -447,16 +447,15 @@ int del_timer(struct timer_list * timer)
*/
asmlinkage void schedule(void)
{
int lock_depth;
struct task_struct * prev, * next;
unsigned long timeout;
int this_cpu;
prev = current;
this_cpu = smp_processor_id();
this_cpu = prev->processor;
if (in_interrupt())
goto scheduling_in_interrupt;
release_kernel_lock(prev, this_cpu, lock_depth);
release_kernel_lock(prev, this_cpu);
if (bh_active & bh_mask)
do_bottom_half();
......@@ -464,6 +463,7 @@ asmlinkage void schedule(void)
spin_lock_irq(&runqueue_lock);
/* move an exhausted RR process to be last.. */
prev->need_resched = 0;
if (!prev->counter && prev->policy == SCHED_RR) {
prev->counter = prev->priority;
move_last_runqueue(prev);
......@@ -561,8 +561,7 @@ asmlinkage void schedule(void)
* switched into it (from an even more "previous"
* prev)
*/
prev->need_resched = 0;
reacquire_kernel_lock(prev, smp_processor_id(), lock_depth);
reacquire_kernel_lock(prev);
return;
scheduling_in_interrupt:
......
......@@ -109,7 +109,6 @@
#include <asm/system.h>
#include <asm/atomic.h>
#include <asm/smp_lock.h>
#include <asm/spinlock.h>
/* If there is a different PAGE_SIZE around, and it works with this allocator,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment