Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
e9d9db6b
Commit
e9d9db6b
authored
Feb 12, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge master.kernel.org:BK/linux-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
8875527c
4fe9df79
Changes
10
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
166 additions
and
157 deletions
+166
-157
arch/i386/kernel/entry.S
arch/i386/kernel/entry.S
+116
-99
arch/i386/kernel/ptrace.c
arch/i386/kernel/ptrace.c
+2
-0
arch/i386/kernel/signal.c
arch/i386/kernel/signal.c
+5
-1
include/asm-i386/bitops.h
include/asm-i386/bitops.h
+19
-0
include/asm-i386/mmu_context.h
include/asm-i386/mmu_context.h
+0
-18
include/asm-i386/system.h
include/asm-i386/system.h
+6
-7
include/linux/sched.h
include/linux/sched.h
+0
-1
include/linux/spinlock.h
include/linux/spinlock.h
+2
-3
kernel/exit.c
kernel/exit.c
+6
-1
kernel/sched.c
kernel/sched.c
+10
-27
No files found.
arch/i386/kernel/entry.S
View file @
e9d9db6b
This diff is collapsed.
Click to expand it.
arch/i386/kernel/ptrace.c
View file @
e9d9db6b
...
...
@@ -455,9 +455,11 @@ void do_syscall_trace(struct pt_regs *regs, int entryexit)
between a syscall stop and SIGTRAP delivery */
current
->
exit_code
=
SIGTRAP
|
((
current
->
ptrace
&
PT_TRACESYSGOOD
)
?
0x80
:
0
);
preempt_disable
();
current
->
state
=
TASK_STOPPED
;
notify_parent
(
current
,
SIGCHLD
);
schedule
();
preempt_enable
();
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
...
...
arch/i386/kernel/signal.c
View file @
e9d9db6b
...
...
@@ -610,9 +610,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
if
((
current
->
ptrace
&
PT_PTRACED
)
&&
signr
!=
SIGKILL
)
{
/* Let the debugger run. */
current
->
exit_code
=
signr
;
preempt_disable
();
current
->
state
=
TASK_STOPPED
;
notify_parent
(
current
,
SIGCHLD
);
schedule
();
preempt_enable
();
/* We're back. Did the debugger cancel the sig? */
if
(
!
(
signr
=
current
->
exit_code
))
...
...
@@ -667,12 +669,14 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset)
case
SIGSTOP
:
{
struct
signal_struct
*
sig
;
current
->
state
=
TASK_STOPPED
;
current
->
exit_code
=
signr
;
sig
=
current
->
p_pptr
->
sig
;
preempt_disable
();
current
->
state
=
TASK_STOPPED
;
if
(
sig
&&
!
(
sig
->
action
[
SIGCHLD
-
1
].
sa
.
sa_flags
&
SA_NOCLDSTOP
))
notify_parent
(
current
,
SIGCHLD
);
schedule
();
preempt_enable
();
continue
;
}
...
...
include/asm-i386/bitops.h
View file @
e9d9db6b
...
...
@@ -415,6 +415,25 @@ static __inline__ unsigned long __ffs(unsigned long word)
#ifdef __KERNEL__
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static
inline
int
sched_find_first_bit
(
unsigned
long
*
b
)
{
if
(
unlikely
(
b
[
0
]))
return
__ffs
(
b
[
0
]);
if
(
unlikely
(
b
[
1
]))
return
__ffs
(
b
[
1
])
+
32
;
if
(
unlikely
(
b
[
2
]))
return
__ffs
(
b
[
2
])
+
64
;
if
(
b
[
3
])
return
__ffs
(
b
[
3
])
+
96
;
return
__ffs
(
b
[
4
])
+
128
;
}
/**
* ffs - find first bit set
* @x: the word to search
...
...
include/asm-i386/mmu_context.h
View file @
e9d9db6b
...
...
@@ -6,24 +6,6 @@
#include <asm/atomic.h>
#include <asm/pgalloc.h>
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static
inline
int
sched_find_first_bit
(
unsigned
long
*
b
)
{
if
(
unlikely
(
b
[
0
]))
return
__ffs
(
b
[
0
]);
if
(
unlikely
(
b
[
1
]))
return
__ffs
(
b
[
1
])
+
32
;
if
(
unlikely
(
b
[
2
]))
return
__ffs
(
b
[
2
])
+
64
;
if
(
b
[
3
])
return
__ffs
(
b
[
3
])
+
96
;
return
__ffs
(
b
[
4
])
+
128
;
}
/*
* possibly do the LDT unload here?
*/
...
...
include/asm-i386/system.h
View file @
e9d9db6b
...
...
@@ -13,24 +13,23 @@ struct task_struct; /* one of the stranger aspects of C forward declarations.. *
extern
void
FASTCALL
(
__switch_to
(
struct
task_struct
*
prev
,
struct
task_struct
*
next
));
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
#define switch_to(prev,next) do { \
asm volatile("pushl %%esi\n\t" \
"pushl %%edi\n\t" \
"pushl %%ebp\n\t" \
"movl %%esp,%0\n\t"
/* save ESP */
\
"movl %
3
,%%esp\n\t"
/* restore ESP */
\
"movl %
2
,%%esp\n\t"
/* restore ESP */
\
"movl $1f,%1\n\t"
/* save EIP */
\
"pushl %
4
\n\t"
/* restore EIP */
\
"pushl %
3
\n\t"
/* restore EIP */
\
"jmp __switch_to\n" \
"1:\t" \
"popl %%ebp\n\t" \
"popl %%edi\n\t" \
"popl %%esi\n\t" \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
"=b" (last) \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
:"m" (next->thread.esp),"m" (next->thread.eip), \
"a" (prev), "d" (next), \
"b" (prev)); \
"a" (prev), "d" (next)); \
} while (0)
#define _set_base(addr,base) do { unsigned long __pr; \
...
...
include/linux/sched.h
View file @
e9d9db6b
...
...
@@ -92,7 +92,6 @@ extern unsigned long nr_running(void);
#define TASK_UNINTERRUPTIBLE 2
#define TASK_ZOMBIE 4
#define TASK_STOPPED 8
#define PREEMPT_ACTIVE 0x4000000
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
...
...
include/linux/spinlock.h
View file @
e9d9db6b
...
...
@@ -177,9 +177,8 @@ do { \
do { \
--current_thread_info()->preempt_count; \
barrier(); \
if (unlikely(!(current_thread_info()->preempt_count) && \
test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
preempt_schedule(); \
} while (0)
#define spin_lock(lock) \
...
...
kernel/exit.c
View file @
e9d9db6b
...
...
@@ -476,7 +476,12 @@ static void exit_notify(void)
write_lock_irq
(
&
tasklist_lock
);
}
}
write_unlock_irq
(
&
tasklist_lock
);
/*
* No need to unlock IRQs, we'll schedule() immediately
* anyway. In the preemption case this also makes it
* impossible for the task to get runnable again.
*/
write_unlock
(
&
tasklist_lock
);
}
NORET_TYPE
void
do_exit
(
long
code
)
...
...
kernel/sched.c
View file @
e9d9db6b
...
...
@@ -435,17 +435,8 @@ static inline void context_switch(task_t *prev, task_t *next)
mmdrop
(
oldmm
);
}
/*
* Here we just switch the register state and the stack. There are
* 3 processes affected by a context switch:
*
* prev ==> .... ==> (last => next)
*
* It's the 'much more previous' 'prev' that is on next's stack,
* but prev is set to (the just run) 'last' process by switch_to().
* This might sound slightly confusing but makes tons of sense.
*/
switch_to
(
prev
,
next
,
prev
);
/* Here we just switch the register state and the stack. */
switch_to
(
prev
,
next
);
}
unsigned
long
nr_running
(
void
)
...
...
@@ -770,6 +761,7 @@ asmlinkage void schedule(void)
if
(
unlikely
(
in_interrupt
()))
BUG
();
need_resched:
preempt_disable
();
prev
=
current
;
rq
=
this_rq
();
...
...
@@ -778,15 +770,6 @@ asmlinkage void schedule(void)
prev
->
sleep_timestamp
=
jiffies
;
spin_lock_irq
(
&
rq
->
lock
);
#ifdef CONFIG_PREEMPT
/*
* if entering from preempt_schedule, off a kernel preemption,
* go straight to picking the next task.
*/
if
(
unlikely
(
preempt_get_count
()
&
PREEMPT_ACTIVE
))
goto
pick_next_task
;
#endif
switch
(
prev
->
state
)
{
case
TASK_INTERRUPTIBLE
:
if
(
unlikely
(
signal_pending
(
prev
)))
{
...
...
@@ -798,7 +781,7 @@ asmlinkage void schedule(void)
case
TASK_RUNNING
:
;
}
#if CONFIG_SMP
|| CONFIG_PREEMPT
#if CONFIG_SMP
pick_next_task:
#endif
if
(
unlikely
(
!
rq
->
nr_running
))
{
...
...
@@ -847,6 +830,8 @@ asmlinkage void schedule(void)
reacquire_kernel_lock
(
current
);
preempt_enable_no_resched
();
if
(
test_thread_flag
(
TIF_NEED_RESCHED
))
goto
need_resched
;
return
;
}
...
...
@@ -856,12 +841,10 @@ asmlinkage void schedule(void)
*/
asmlinkage
void
preempt_schedule
(
void
)
{
do
{
current_thread_info
()
->
preempt_count
+=
PREEMPT_ACTIVE
;
schedule
();
current_thread_info
()
->
preempt_count
-=
PREEMPT_ACTIVE
;
barrier
();
}
while
(
test_thread_flag
(
TIF_NEED_RESCHED
));
if
(
unlikely
(
preempt_get_count
()))
return
;
current
->
state
=
TASK_RUNNING
;
schedule
();
}
#endif
/* CONFIG_PREEMPT */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment