Commit e3adef99 authored by Linus Torvalds's avatar Linus Torvalds

Move x86 big-kernel-lock implementation into <linux/smp_lock.h>,

since it was generic.

Remove all architecture-specific <asm/smplock.h> files.
parent 29ceefc7
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
static __inline__ void release_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
spin_unlock(&kernel_flag);
}
/*
* Re-acquire the kernel lock
*/
static __inline__ void reacquire_kernel_lock(struct task_struct *task)
{
if (unlikely(task->lock_depth >= 0))
spin_lock(&kernel_flag);
}
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#endif
}
static __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#else
#define kernel_locked() spin_is_locked(&kernel_flag)
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static inline void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#endif
}
static inline void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
#ifndef __ASM_CRIS_SMPLOCK_H
#define __ASM_CRIS_SMPLOCK_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/config.h>
#ifndef CONFIG_SMP
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#error "We do not support SMP on CRIS"
#endif
#endif
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* i386 SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
extern spinlock_t kernel_flag;
#define kernel_locked() (current->lock_depth >= 0)
#define get_kernel_lock() spin_lock(&kernel_flag)
#define put_kernel_lock() spin_unlock(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
put_kernel_lock(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
get_kernel_lock(); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
int depth = current->lock_depth+1;
if (!depth)
get_kernel_lock();
current->lock_depth = depth;
}
static __inline__ void unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
if (--current->lock_depth < 0)
put_kernel_lock();
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
#include <asm/hardirq.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
# define kernel_locked() spin_is_locked(&kernel_flag)
#else
# define kernel_locked() (1)
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void
lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
static __inline__ void
unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/* $Id: smplock.h,v 1.2 1999/10/09 00:01:43 ralf Exp $
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#ifndef _ASM_SMPLOCK_H
#define _ASM_SMPLOCK_H
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
static __inline__ void release_kernel_lock(struct task_struct *task, int cpu)
{
if (task->lock_depth >= 0)
spin_unlock(&kernel_flag);
release_irqlock(cpu);
local_irq_enable();
}
/*
* Re-acquire the kernel lock
*/
static __inline__ void reacquire_kernel_lock(struct task_struct *task)
{
if (task->lock_depth >= 0)
spin_lock(&kernel_flag);
}
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
static __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
#endif /* _ASM_SMPLOCK_H */
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#ifdef __KERNEL__
#ifndef __ASM_SMPLOCK_H__
#define __ASM_SMPLOCK_H__
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#elif defined(CONFIG_PREEMPT)
#define kernel_locked() preempt_count()
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#endif /* CONFIG_PREEMPT */
}
static __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
#endif /* __ASM_SMPLOCK_H__ */
#endif /* __KERNEL__ */
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
static __inline__ void unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* include/asm-s390/smplock.h
*
* S390 version
*
* Derived from "include/asm-i386/smplock.h"
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* include/asm-s390/smplock.h
*
* S390 version
*
* Derived from "include/asm-i386/smplock.h"
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() spin_is_locked(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
#ifndef __ASM_SH_SMPLOCK_H
#define __ASM_SH_SMPLOCK_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/config.h>
#ifndef CONFIG_SMP
#define lock_kernel() do { } while(0)
#define unlock_kernel() do { } while(0)
#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
#else
#error "We do not support SMP on SH"
#endif /* CONFIG_SMP */
#endif /* __ASM_SH_SMPLOCK_H */
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#define kernel_locked() \
(spin_is_locked(&kernel_flag) &&\
(current->lock_depth >= 0))
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
local_irq_enable(); \
} \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
#define lock_kernel() \
do { \
if (!++current->lock_depth) \
spin_lock(&kernel_flag); \
} while(0)
#define unlock_kernel() \
do { \
if (--current->lock_depth < 0) \
spin_unlock(&kernel_flag); \
} while(0)
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() \
(spin_is_locked(&kernel_flag) &&\
(current->lock_depth >= 0))
#else
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#else
#define kernel_locked() 1
#endif
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
#define lock_kernel() \
do { \
if (!++current->lock_depth) \
spin_lock(&kernel_flag); \
} while(0)
#define unlock_kernel() \
do { \
if (--current->lock_depth < 0) \
spin_unlock(&kernel_flag); \
} while(0)
/*
* <asm/smplock.h>
*/
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#define check_irq_holder(cpu) \
if (global_irq_holder == (cpu)) \
BUG();
#else
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#define global_irq_holder 0
#define check_irq_holder(cpu) do {} while(0)
#else
#define kernel_locked() 1
#define check_irq_holder(cpu) \
if (global_irq_holder == (cpu)) \
BUG();
#endif
#endif
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
spin_unlock(&kernel_flag); \
check_irq_holder(cpu); \
} \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
#ifdef CONFIG_PREEMPT
if (current->lock_depth == -1)
spin_lock(&kernel_flag);
++current->lock_depth;
#else
#if 1
if (!++current->lock_depth)
spin_lock(&kernel_flag);
#else
__asm__ __volatile__(
"incl %1\n\t"
"jne 9f"
spin_lock_string
"\n9:"
:"=m" (__dummy_lock(&kernel_flag)),
"=m" (current->lock_depth));
#endif
#endif
}
extern __inline__ void unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
#if 1
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
#else
__asm__ __volatile__(
"decl %1\n\t"
"jns 9f\n\t"
spin_unlock_string
"\n9:"
:"=m" (__dummy_lock(&kernel_flag)),
"=m" (current->lock_depth));
#endif
}
......@@ -13,7 +13,59 @@
#else
#include <asm/smplock.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <asm/current.h>
extern spinlock_t kernel_flag;
#define kernel_locked() (current->lock_depth >= 0)
#define get_kernel_lock() spin_lock(&kernel_flag)
#define put_kernel_lock() spin_unlock(&kernel_flag)
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
put_kernel_lock(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) \
get_kernel_lock(); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
static __inline__ void lock_kernel(void)
{
int depth = current->lock_depth+1;
if (!depth)
get_kernel_lock();
current->lock_depth = depth;
}
static __inline__ void unlock_kernel(void)
{
if (current->lock_depth < 0)
BUG();
if (--current->lock_depth < 0)
put_kernel_lock();
}
#endif /* CONFIG_SMP */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment