Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
a68d9759
Commit
a68d9759
authored
May 16, 2002
by
Richard Gooch
Browse files
Options
Browse Files
Download
Plain Diff
Merge atnf.csiro.au:/workaholix1/kernel/v2.5/linus
into atnf.csiro.au:/workaholix1/kernel/v2.5/rgooch-2.5
parents
6a71fc38
4cc4c697
Changes
43
Hide whitespace changes
Inline
Side-by-side
Showing
43 changed files
with
342 additions
and
305 deletions
+342
-305
arch/alpha/kernel/process.c
arch/alpha/kernel/process.c
+9
-3
arch/alpha/kernel/smp.c
arch/alpha/kernel/smp.c
+4
-7
arch/arm/kernel/sys_arm.c
arch/arm/kernel/sys_arm.c
+9
-3
arch/cris/kernel/process.c
arch/cris/kernel/process.c
+9
-3
arch/i386/kernel/process.c
arch/i386/kernel/process.c
+11
-3
arch/i386/kernel/setup.c
arch/i386/kernel/setup.c
+0
-5
arch/i386/kernel/smpboot.c
arch/i386/kernel/smpboot.c
+4
-7
arch/ia64/ia32/ia32_entry.S
arch/ia64/ia32/ia32_entry.S
+2
-2
arch/ia64/kernel/entry.S
arch/ia64/kernel/entry.S
+2
-2
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/smpboot.c
+4
-7
arch/m68k/kernel/process.c
arch/m68k/kernel/process.c
+9
-3
arch/mips/kernel/smp.c
arch/mips/kernel/smp.c
+2
-3
arch/mips/kernel/syscall.c
arch/mips/kernel/syscall.c
+6
-6
arch/mips64/kernel/syscall.c
arch/mips64/kernel/syscall.c
+6
-6
arch/mips64/sgi-ip27/ip27-init.c
arch/mips64/sgi-ip27/ip27-init.c
+1
-1
arch/parisc/kernel/entry.S
arch/parisc/kernel/entry.S
+1
-1
arch/parisc/kernel/process.c
arch/parisc/kernel/process.c
+6
-3
arch/ppc/kernel/process.c
arch/ppc/kernel/process.c
+9
-3
arch/ppc/kernel/smp.c
arch/ppc/kernel/smp.c
+2
-4
arch/ppc64/kernel/process.c
arch/ppc64/kernel/process.c
+9
-3
arch/ppc64/kernel/smp.c
arch/ppc64/kernel/smp.c
+2
-4
arch/s390/kernel/process.c
arch/s390/kernel/process.c
+9
-4
arch/s390/kernel/smp.c
arch/s390/kernel/smp.c
+4
-6
arch/s390x/kernel/process.c
arch/s390x/kernel/process.c
+9
-4
arch/s390x/kernel/smp.c
arch/s390x/kernel/smp.c
+4
-6
arch/sh/kernel/process.c
arch/sh/kernel/process.c
+9
-3
arch/sparc/kernel/entry.S
arch/sparc/kernel/entry.S
+5
-4
arch/sparc/kernel/sun4d_smp.c
arch/sparc/kernel/sun4d_smp.c
+1
-1
arch/sparc/kernel/sun4m_smp.c
arch/sparc/kernel/sun4m_smp.c
+1
-1
arch/sparc64/kernel/entry.S
arch/sparc64/kernel/entry.S
+1
-1
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/smp.c
+1
-1
arch/x86_64/ia32/sys_ia32.c
arch/x86_64/ia32/sys_ia32.c
+9
-3
arch/x86_64/kernel/entry.S
arch/x86_64/kernel/entry.S
+1
-1
arch/x86_64/kernel/process.c
arch/x86_64/kernel/process.c
+9
-3
arch/x86_64/kernel/smpboot.c
arch/x86_64/kernel/smpboot.c
+4
-7
include/asm-generic/tlb.h
include/asm-generic/tlb.h
+56
-60
include/asm-i386/pgalloc.h
include/asm-i386/pgalloc.h
+4
-0
include/linux/mm.h
include/linux/mm.h
+0
-2
include/linux/sched.h
include/linux/sched.h
+2
-2
kernel/fork.c
kernel/fork.c
+13
-20
lib/zlib_inflate/inflate.c
lib/zlib_inflate/inflate.c
+1
-1
mm/memory.c
mm/memory.c
+44
-65
mm/mmap.c
mm/mmap.c
+48
-31
No files found.
arch/alpha/kernel/process.c
View file @
a68d9759
...
...
@@ -260,16 +260,22 @@ int
alpha_clone
(
unsigned
long
clone_flags
,
unsigned
long
usp
,
struct
switch_stack
*
swstack
)
{
struct
task_struct
*
p
;
if
(
!
usp
)
usp
=
rdusp
();
return
do_fork
(
clone_flags
,
usp
,
(
struct
pt_regs
*
)
(
swstack
+
1
),
0
);
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
usp
,
(
struct
pt_regs
*
)
(
swstack
+
1
),
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
alpha_vfork
(
struct
switch_stack
*
swstack
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
rdusp
(),
(
struct
pt_regs
*
)
(
swstack
+
1
),
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
rdusp
(),
(
struct
pt_regs
*
)
(
swstack
+
1
),
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/alpha/kernel/smp.c
View file @
a68d9759
...
...
@@ -433,13 +433,13 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
return
0
;
}
static
int
__init
static
struct
task_struct
*
__init
fork_by_hand
(
void
)
{
/* Don't care about the contents of regs since we'll never
reschedule the forked task. */
struct
pt_regs
regs
;
return
do_fork
(
CLONE_VM
|
CLONE_
PID
,
0
,
&
regs
,
0
);
return
do_fork
(
CLONE_VM
|
CLONE_
IDLETASK
,
0
,
&
regs
,
0
);
}
/*
...
...
@@ -457,13 +457,10 @@ smp_boot_one_cpu(int cpuid, int cpunum)
the other task-y sort of data structures set up like we
wish. We can't use kernel_thread since we must avoid
rescheduling the child. */
if
(
fork_by_hand
()
<
0
)
idle
=
fork_by_hand
();
if
(
IS_ERR
(
idle
))
panic
(
"failed fork for CPU %d"
,
cpuid
);
idle
=
prev_task
(
&
init_task
);
if
(
!
idle
)
panic
(
"No idle process for CPU %d"
,
cpuid
);
init_idle
(
idle
,
cpuid
);
unhash_process
(
idle
);
...
...
arch/arm/kernel/sys_arm.c
View file @
a68d9759
...
...
@@ -238,7 +238,9 @@ asmlinkage int sys_ipc (uint call, int first, int second, int third, void *ptr,
*/
asmlinkage
int
sys_fork
(
struct
pt_regs
*
regs
)
{
return
do_fork
(
SIGCHLD
,
regs
->
ARM_sp
,
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
regs
->
ARM_sp
,
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/* Clone a task - this clones the calling program thread.
...
...
@@ -246,14 +248,18 @@ asmlinkage int sys_fork(struct pt_regs *regs)
*/
asmlinkage
int
sys_clone
(
unsigned
long
clone_flags
,
unsigned
long
newsp
,
struct
pt_regs
*
regs
)
{
struct
task_struct
*
p
;
if
(
!
newsp
)
newsp
=
regs
->
ARM_sp
;
return
do_fork
(
clone_flags
,
newsp
,
regs
,
0
);
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
int
sys_vfork
(
struct
pt_regs
*
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
->
ARM_sp
,
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
->
ARM_sp
,
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/* sys_execve() executes a new program.
...
...
arch/cris/kernel/process.c
View file @
a68d9759
...
...
@@ -299,7 +299,9 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
asmlinkage
int
sys_fork
(
long
r10
,
long
r11
,
long
r12
,
long
r13
,
long
mof
,
long
srp
,
struct
pt_regs
*
regs
)
{
return
do_fork
(
SIGCHLD
,
rdusp
(),
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
rdusp
(),
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/* if newusp is 0, we just grab the old usp */
...
...
@@ -308,9 +310,11 @@ asmlinkage int sys_clone(unsigned long newusp, unsigned long flags,
long
r12
,
long
r13
,
long
mof
,
long
srp
,
struct
pt_regs
*
regs
)
{
struct
task_struct
*
p
;
if
(
!
newusp
)
newusp
=
rdusp
();
return
do_fork
(
flags
,
newusp
,
regs
,
0
);
p
=
do_fork
(
flags
&
~
CLONE_IDLETASK
,
newusp
,
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/* vfork is a system call in i386 because of register-pressure - maybe
...
...
@@ -320,7 +324,9 @@ asmlinkage int sys_clone(unsigned long newusp, unsigned long flags,
asmlinkage
int
sys_vfork
(
long
r10
,
long
r11
,
long
r12
,
long
r13
,
long
mof
,
long
srp
,
struct
pt_regs
*
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
rdusp
(),
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
rdusp
(),
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/i386/kernel/process.c
View file @
a68d9759
...
...
@@ -711,11 +711,15 @@ void __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
asmlinkage
int
sys_fork
(
struct
pt_regs
regs
)
{
return
do_fork
(
SIGCHLD
,
regs
.
esp
,
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
regs
.
esp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
int
sys_clone
(
struct
pt_regs
regs
)
{
struct
task_struct
*
p
;
unsigned
long
clone_flags
;
unsigned
long
newsp
;
...
...
@@ -723,7 +727,8 @@ asmlinkage int sys_clone(struct pt_regs regs)
newsp
=
regs
.
ecx
;
if
(
!
newsp
)
newsp
=
regs
.
esp
;
return
do_fork
(
clone_flags
,
newsp
,
&
regs
,
0
);
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
@@ -738,7 +743,10 @@ asmlinkage int sys_clone(struct pt_regs regs)
*/
asmlinkage
int
sys_vfork
(
struct
pt_regs
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
esp
,
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
esp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/i386/kernel/setup.c
View file @
a68d9759
...
...
@@ -2153,11 +2153,6 @@ static void __init init_intel(struct cpuinfo_x86 *c)
strcpy
(
c
->
x86_model_id
,
p
);
#ifdef CONFIG_SMP
/* PGE CPUID bug: Pentium4 supports PGE, but seems to have SMP bugs.. */
if
(
c
->
x86
==
15
)
clear_bit
(
X86_FEATURE_PGE
,
c
->
x86_capability
);
if
(
test_bit
(
X86_FEATURE_HT
,
c
->
x86_capability
))
{
extern
int
phys_proc_id
[
NR_CPUS
];
...
...
arch/i386/kernel/smpboot.c
View file @
a68d9759
...
...
@@ -529,14 +529,14 @@ extern struct {
unsigned
short
ss
;
}
stack_start
;
static
int
__init
fork_by_hand
(
void
)
static
struct
task_struct
*
__init
fork_by_hand
(
void
)
{
struct
pt_regs
regs
;
/*
* don't care about the eip and regs settings since
* we'll never reschedule the forked task.
*/
return
do_fork
(
CLONE_VM
|
CLONE_
PID
,
0
,
&
regs
,
0
);
return
do_fork
(
CLONE_VM
|
CLONE_
IDLETASK
,
0
,
&
regs
,
0
);
}
/* which physical APIC ID maps to which logical CPU number */
...
...
@@ -822,17 +822,14 @@ static void __init do_boot_cpu (int apicid)
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
if
(
fork_by_hand
()
<
0
)
idle
=
fork_by_hand
();
if
(
IS_ERR
(
idle
))
panic
(
"failed fork for CPU %d"
,
cpu
);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
idle
=
prev_task
(
&
init_task
);
if
(
!
idle
)
panic
(
"No idle process for CPU %d"
,
cpu
);
init_idle
(
idle
,
cpu
);
map_cpu_to_boot_apicid
(
cpu
,
apicid
);
...
...
arch/ia64/ia32/ia32_entry.S
View file @
a68d9759
...
...
@@ -41,7 +41,7 @@ ENTRY(ia32_clone)
mov
out3
=
16
//
stacksize
(
compensates
for
16
-
byte
scratch
area
)
adds
out2
=
IA64_SWITCH_STACK_SIZE
+
16
,
sp
//
out2
=
&
regs
zxt4
out0
=
in0
//
out0
=
clone_flags
br.call.sptk.many
rp
=
do_fork
br.call.sptk.many
rp
=
do_fork
_WITHOUT_CLONE_IDLETASK
//
FIXME
:
mask
out
CLONE_IDLETASK
from
flags
,
and
return
value
now
task_struct
*
.
.
ret0
:
.
restore
sp
adds
sp
=
IA64_SWITCH_STACK_SIZE
,
sp
//
pop
the
switch
stack
mov
ar
.
pfs
=
loc1
...
...
@@ -167,7 +167,7 @@ GLOBAL_ENTRY(sys32_fork)
mov
out1
=
0
mov
out3
=
0
adds
out2
=
IA64_SWITCH_STACK_SIZE
+
16
,
sp
//
out2
=
&
regs
br.call.sptk.few
rp
=
do_fork
br.call.sptk.few
rp
=
do_fork
_FIXME_RETURNS_TASK_STRUCT
.
ret5
:
mov
ar
.
pfs
=
loc1
.
restore
sp
adds
sp
=
IA64_SWITCH_STACK_SIZE
,
sp
//
pop
the
switch
stack
...
...
arch/ia64/kernel/entry.S
View file @
a68d9759
...
...
@@ -101,7 +101,7 @@ GLOBAL_ENTRY(sys_clone2)
mov
out3
=
in2
adds
out2
=
IA64_SWITCH_STACK_SIZE
+
16
,
sp
//
out2
=
&
regs
mov
out0
=
in0
//
out0
=
clone_flags
br.call.sptk.many
rp
=
do_fork
br.call.sptk.many
rp
=
do_fork
_WITHOUT_CLONE_IDLETASK
//
FIXME
:
mask
out
CLONE_IDLETASK
from
flags
,
and
now
returns
task_struct
*
.
.
ret1
:
.
restore
sp
adds
sp
=
IA64_SWITCH_STACK_SIZE
,
sp
//
pop
the
switch
stack
mov
ar
.
pfs
=
loc1
...
...
@@ -120,7 +120,7 @@ GLOBAL_ENTRY(sys_clone)
mov
out3
=
16
//
stacksize
(
compensates
for
16
-
byte
scratch
area
)
adds
out2
=
IA64_SWITCH_STACK_SIZE
+
16
,
sp
//
out2
=
&
regs
mov
out0
=
in0
//
out0
=
clone_flags
br.call.sptk.many
rp
=
do_fork
br.call.sptk.many
rp
=
do_fork
_WITHOUT_CLONE_IDLETASK
//
FIXME
:
mask
out
CLONE_IDLETASK
from
flags
,
and
now
return
task_struct
*
.
.
ret2
:
.
restore
sp
adds
sp
=
IA64_SWITCH_STACK_SIZE
,
sp
//
pop
the
switch
stack
mov
ar
.
pfs
=
loc1
...
...
arch/ia64/kernel/smpboot.c
View file @
a68d9759
...
...
@@ -391,14 +391,14 @@ start_secondary (void *unused)
return
cpu_idle
();
}
static
int
__init
static
struct
task_struct
*
__init
fork_by_hand
(
void
)
{
/*
* don't care about the eip and regs settings since
* we'll never reschedule the forked task.
*/
return
do_fork
(
CLONE_VM
|
CLONE_
PID
,
0
,
0
,
0
);
return
do_fork
(
CLONE_VM
|
CLONE_
IDLETASK
,
0
,
0
,
0
);
}
static
void
__init
...
...
@@ -412,17 +412,14 @@ do_boot_cpu (int sapicid)
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
if
(
fork_by_hand
()
<
0
)
idle
=
fork_by_hand
();
if
(
IS_ERR
(
idle
))
panic
(
"failed fork for CPU %d"
,
cpu
);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
idle
=
prev_task
(
&
init_task
);
if
(
!
idle
)
panic
(
"No idle process for CPU %d"
,
cpu
);
init_idle
(
idle
,
cpu
);
ia64_cpu_to_sapicid
[
cpu
]
=
sapicid
;
...
...
arch/m68k/kernel/process.c
View file @
a68d9759
...
...
@@ -177,25 +177,31 @@ void flush_thread(void)
asmlinkage
int
m68k_fork
(
struct
pt_regs
*
regs
)
{
return
do_fork
(
SIGCHLD
,
rdusp
(),
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
rdusp
(),
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
int
m68k_vfork
(
struct
pt_regs
*
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
rdusp
(),
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
rdusp
(),
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
int
m68k_clone
(
struct
pt_regs
*
regs
)
{
unsigned
long
clone_flags
;
unsigned
long
newsp
;
struct
task_struct
*
p
;
/* syscall2 puts clone_flags in d1 and usp in d2 */
clone_flags
=
regs
->
d1
;
newsp
=
regs
->
d2
;
if
(
!
newsp
)
newsp
=
rdusp
();
return
do_fork
(
clone_flags
,
newsp
,
regs
,
0
);
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
copy_thread
(
int
nr
,
unsigned
long
clone_flags
,
unsigned
long
usp
,
...
...
arch/mips/kernel/smp.c
View file @
a68d9759
...
...
@@ -122,8 +122,7 @@ void __init smp_boot_cpus(void)
/* Spawn a new process normally. Grab a pointer to
its task struct so we can mess with it */
do_fork
(
CLONE_VM
|
CLONE_PID
,
0
,
&
regs
,
0
);
p
=
prev_task
(
&
init_task
);
p
=
do_fork
(
CLONE_VM
|
CLONE_IDLETASK
,
0
,
&
regs
,
0
);
/* Schedule the first task manually */
p
->
processor
=
i
;
...
...
@@ -151,7 +150,7 @@ void __init smp_boot_cpus(void)
* The following code is purely to make sure
* Linux can schedule processes on this slave.
*/
kernel_thread(0, NULL, CLONE_
PID
);
kernel_thread(0, NULL, CLONE_
IDLETASK
);
p = prev_task(&init_task);
sprintf(p->comm, "%s%d", "Idle", i);
init_tasks[i] = p;
...
...
arch/mips/kernel/syscall.c
View file @
a68d9759
...
...
@@ -95,10 +95,10 @@ sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
save_static_function
(
sys_fork
);
static_unused
int
_sys_fork
(
struct
pt_regs
regs
)
{
int
res
;
struct
task_struct
*
p
;
res
=
do_fork
(
SIGCHLD
,
regs
.
regs
[
29
],
&
regs
,
0
);
return
res
;
p
=
do_fork
(
SIGCHLD
,
regs
.
regs
[
29
],
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
...
...
@@ -107,14 +107,14 @@ static_unused int _sys_clone(struct pt_regs regs)
{
unsigned
long
clone_flags
;
unsigned
long
newsp
;
int
res
;
struct
task_struct
*
p
;
clone_flags
=
regs
.
regs
[
4
];
newsp
=
regs
.
regs
[
5
];
if
(
!
newsp
)
newsp
=
regs
.
regs
[
29
];
res
=
do_fork
(
clone_flags
,
newsp
,
&
regs
,
0
);
return
res
;
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/mips64/kernel/syscall.c
View file @
a68d9759
...
...
@@ -77,26 +77,26 @@ sys_mmap(unsigned long addr, size_t len, unsigned long prot,
asmlinkage
int
sys_fork
(
abi64_no_regargs
,
struct
pt_regs
regs
)
{
int
res
;
struct
task_struct
*
p
;
save_static
(
&
regs
);
res
=
do_fork
(
SIGCHLD
,
regs
.
regs
[
29
],
&
regs
,
0
);
return
res
;
p
=
do_fork
(
SIGCHLD
,
regs
.
regs
[
29
],
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
int
sys_clone
(
abi64_no_regargs
,
struct
pt_regs
regs
)
{
unsigned
long
clone_flags
;
unsigned
long
newsp
;
int
res
;
struct
task_struct
*
p
;
save_static
(
&
regs
);
clone_flags
=
regs
.
regs
[
4
];
newsp
=
regs
.
regs
[
5
];
if
(
!
newsp
)
newsp
=
regs
.
regs
[
29
];
res
=
do_fork
(
clone_flags
,
newsp
,
&
regs
,
0
);
return
res
;
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/mips64/sgi-ip27/ip27-init.c
View file @
a68d9759
...
...
@@ -490,7 +490,7 @@ void allowboot(void)
* The following code is purely to make sure
* Linux can schedule processes on this slave.
*/
kernel_thread
(
0
,
NULL
,
CLONE_
PID
);
kernel_thread
(
0
,
NULL
,
CLONE_
IDLETASK
);
p
=
prev_task
(
&
init_task
);
sprintf
(
p
->
comm
,
"%s%d"
,
"Idle"
,
num_cpus
);
init_tasks
[
num_cpus
]
=
p
;
...
...
arch/parisc/kernel/entry.S
View file @
a68d9759
...
...
@@ -500,7 +500,7 @@ __kernel_thread:
ldo
CLONE_VM
(%
r0
),
%
r26
/*
Force
CLONE_VM
since
only
init_mm
*/
or
%
r26
,
%
r24
,
%
r26
/*
will
have
kernel
mappings
.
*/
copy
%
r0
,
%
r25
bl
do_fork
,
%
r2
bl
do_fork
_FIXME_NOW_RETURNS_TASK_STRUCT
,
%
r2
copy
%
r1
,
%
r24
/
*
Parent
Returns
here
*/
...
...
arch/parisc/kernel/process.c
View file @
a68d9759
...
...
@@ -159,14 +159,17 @@ int
sys_clone
(
unsigned
long
clone_flags
,
unsigned
long
usp
,
struct
pt_regs
*
regs
)
{
return
do_fork
(
clone_flags
,
usp
,
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
usp
,
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
sys_vfork
(
struct
pt_regs
*
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
->
gr
[
30
],
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
->
gr
[
30
],
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
...
...
arch/ppc/kernel/process.c
View file @
a68d9759
...
...
@@ -437,22 +437,28 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
int
sys_clone
(
int
p1
,
int
p2
,
int
p3
,
int
p4
,
int
p5
,
int
p6
,
struct
pt_regs
*
regs
)
{
struct
task_struct
*
p
;
CHECK_FULL_REGS
(
regs
);
return
do_fork
(
p1
,
regs
->
gpr
[
1
],
regs
,
0
);
p
=
do_fork
(
p1
&
~
CLONE_IDLETASK
,
regs
->
gpr
[
1
],
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
sys_fork
(
int
p1
,
int
p2
,
int
p3
,
int
p4
,
int
p5
,
int
p6
,
struct
pt_regs
*
regs
)
{
struct
task_struct
*
p
;
CHECK_FULL_REGS
(
regs
);
return
do_fork
(
SIGCHLD
,
regs
->
gpr
[
1
],
regs
,
0
);
p
=
do_fork
(
SIGCHLD
,
regs
->
gpr
[
1
],
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
sys_vfork
(
int
p1
,
int
p2
,
int
p3
,
int
p4
,
int
p5
,
int
p6
,
struct
pt_regs
*
regs
)
{
struct
task_struct
*
p
;
CHECK_FULL_REGS
(
regs
);
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
->
gpr
[
1
],
regs
,
0
);
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
->
gpr
[
1
],
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
sys_execve
(
unsigned
long
a0
,
unsigned
long
a1
,
unsigned
long
a2
,
...
...
arch/ppc/kernel/smp.c
View file @
a68d9759
...
...
@@ -343,11 +343,9 @@ void __init smp_boot_cpus(void)
/* create a process for the processor */
/* only regs.msr is actually used, and 0 is OK for it */
memset
(
&
regs
,
0
,
sizeof
(
struct
pt_regs
));
if
(
do_fork
(
CLONE_VM
|
CLONE_PID
,
0
,
&
regs
,
0
)
<
0
)
p
=
do_fork
(
CLONE_VM
|
CLONE_IDLETASK
,
0
,
&
regs
,
0
);
if
(
IS_ERR
(
p
))
panic
(
"failed fork for CPU %d"
,
i
);
p
=
prev_task
(
&
init_task
);
if
(
!
p
)
panic
(
"No idle task for CPU %d"
,
i
);
init_idle
(
p
,
i
);
unhash_process
(
p
);
...
...
arch/ppc64/kernel/process.c
View file @
a68d9759
...
...
@@ -256,19 +256,25 @@ void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
int
sys_clone
(
int
p1
,
int
p2
,
int
p3
,
int
p4
,
int
p5
,
int
p6
,
struct
pt_regs
*
regs
)
{
return
do_fork
(
p1
,
regs
->
gpr
[
1
],
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
p1
&
~
CLONE_IDLETASK
,
regs
->
gpr
[
1
],
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
sys_fork
(
int
p1
,
int
p2
,
int
p3
,
int
p4
,
int
p5
,
int
p6
,
struct
pt_regs
*
regs
)
{
return
do_fork
(
SIGCHLD
,
regs
->
gpr
[
1
],
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
regs
->
gpr
[
1
],
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
sys_vfork
(
int
p1
,
int
p2
,
int
p3
,
int
p4
,
int
p5
,
int
p6
,
struct
pt_regs
*
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
->
gpr
[
1
],
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
->
gpr
[
1
],
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
int
sys_execve
(
unsigned
long
a0
,
unsigned
long
a1
,
unsigned
long
a2
,
...
...
arch/ppc64/kernel/smp.c
View file @
a68d9759
...
...
@@ -640,11 +640,9 @@ void __init smp_boot_cpus(void)
memset
(
&
regs
,
0
,
sizeof
(
struct
pt_regs
));
if
(
do_fork
(
CLONE_VM
|
CLONE_PID
,
0
,
&
regs
,
0
)
<
0
)
p
=
do_fork
(
CLONE_VM
|
CLONE_IDLETASK
,
0
,
&
regs
,
0
);
if
(
IS_ERR
(
p
))
panic
(
"failed fork for CPU %d"
,
i
);
p
=
prev_task
(
&
init_task
);
if
(
!
p
)
panic
(
"No idle task for CPU %d"
,
i
);
init_idle
(
p
,
i
);
...
...
arch/s390/kernel/process.c
View file @
a68d9759
...
...
@@ -332,19 +332,23 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
asmlinkage
int
sys_fork
(
struct
pt_regs
regs
)
{
return
do_fork
(
SIGCHLD
,
regs
.
gprs
[
15
],
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
regs
.
gprs
[
15
],
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
int
sys_clone
(
struct
pt_regs
regs
)
{
unsigned
long
clone_flags
;
unsigned
long
newsp
;
struct
task_struct
*
p
;
clone_flags
=
regs
.
gprs
[
3
];
newsp
=
regs
.
orig_gpr2
;
if
(
!
newsp
)
newsp
=
regs
.
gprs
[
15
];
return
do_fork
(
clone_flags
,
newsp
,
&
regs
,
0
);
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
@@ -359,8 +363,9 @@ asmlinkage int sys_clone(struct pt_regs regs)
*/
asmlinkage
int
sys_vfork
(
struct
pt_regs
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
gprs
[
15
],
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
gprs
[
15
],
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/s390/kernel/smp.c
View file @
a68d9759
...
...
@@ -505,13 +505,13 @@ void __init initialize_secondary(void)
{
}
static
int
__init
fork_by_hand
(
void
)
static
struct
task_struct
*
__init
fork_by_hand
(
void
)
{
struct
pt_regs
regs
;
/* don't care about the psw and regs settings since we'll never
reschedule the forked task. */
memset
(
&
regs
,
0
,
sizeof
(
struct
pt_regs
));
return
do_fork
(
CLONE_VM
|
CLONE_
PID
,
0
,
&
regs
,
0
);
return
do_fork
(
CLONE_VM
|
CLONE_
IDLETASK
,
0
,
&
regs
,
0
);
}
static
void
__init
do_boot_cpu
(
int
cpu
)
...
...
@@ -521,16 +521,14 @@ static void __init do_boot_cpu(int cpu)
/* We can't use kernel_thread since we must _avoid_ to reschedule
the child. */
if
(
fork_by_hand
()
<
0
)
idle
=
fork_by_hand
();
if
(
IS_ERR
(
idle
))
panic
(
"failed fork for CPU %d"
,
cpu
);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
idle
=
prev_task
(
&
init_task
);
if
(
!
idle
)
panic
(
"No idle process for CPU %d"
,
cpu
);
idle
->
processor
=
cpu
;
idle
->
cpus_runnable
=
1
<<
cpu
;
/* we schedule the first task manually */
...
...
arch/s390x/kernel/process.c
View file @
a68d9759
...
...
@@ -331,19 +331,23 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
asmlinkage
int
sys_fork
(
struct
pt_regs
regs
)
{
return
do_fork
(
SIGCHLD
,
regs
.
gprs
[
15
],
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
regs
.
gprs
[
15
],
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
int
sys_clone
(
struct
pt_regs
regs
)
{
unsigned
long
clone_flags
;
unsigned
long
newsp
;
struct
task_struct
*
p
;
clone_flags
=
regs
.
gprs
[
3
];
newsp
=
regs
.
orig_gpr2
;
if
(
!
newsp
)
newsp
=
regs
.
gprs
[
15
];
return
do_fork
(
clone_flags
,
newsp
,
&
regs
,
0
);
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
@@ -358,8 +362,9 @@ asmlinkage int sys_clone(struct pt_regs regs)
*/
asmlinkage
int
sys_vfork
(
struct
pt_regs
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
gprs
[
15
],
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
gprs
[
15
],
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/s390x/kernel/smp.c
View file @
a68d9759
...
...
@@ -484,13 +484,13 @@ void __init initialize_secondary(void)
{
}
static
int
__init
fork_by_hand
(
void
)
static
struct
task_struct
*
__init
fork_by_hand
(
void
)
{
struct
pt_regs
regs
;
/* don't care about the psw and regs settings since we'll never
reschedule the forked task. */
memset
(
&
regs
,
0
,
sizeof
(
struct
pt_regs
));
return
do_fork
(
CLONE_VM
|
CLONE_
PID
,
0
,
&
regs
,
0
);
return
do_fork
(
CLONE_VM
|
CLONE_
IDLETASK
,
0
,
&
regs
,
0
);
}
static
void
__init
do_boot_cpu
(
int
cpu
)
...
...
@@ -500,16 +500,14 @@ static void __init do_boot_cpu(int cpu)
/* We can't use kernel_thread since we must _avoid_ to reschedule
the child. */
if
(
fork_by_hand
()
<
0
)
idle
=
fork_by_hand
();
if
(
IS_ERR
(
idle
))
panic
(
"failed fork for CPU %d"
,
cpu
);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
idle
=
prev_task
(
&
init_task
);
if
(
!
idle
)
panic
(
"No idle process for CPU %d"
,
cpu
);
idle
->
processor
=
cpu
;
idle
->
cpus_runnable
=
1
<<
cpu
;
/* we schedule the first task manually */
...
...
arch/sh/kernel/process.c
View file @
a68d9759
...
...
@@ -276,16 +276,20 @@ asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
unsigned
long
r6
,
unsigned
long
r7
,
struct
pt_regs
regs
)
{
return
do_fork
(
SIGCHLD
,
regs
.
regs
[
15
],
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
regs
.
regs
[
15
],
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
int
sys_clone
(
unsigned
long
clone_flags
,
unsigned
long
newsp
,
unsigned
long
r6
,
unsigned
long
r7
,
struct
pt_regs
regs
)
{
struct
task_struct
*
p
;
if
(
!
newsp
)
newsp
=
regs
.
regs
[
15
];
return
do_fork
(
clone_flags
,
newsp
,
&
regs
,
0
);
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
@@ -302,7 +306,9 @@ asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
unsigned
long
r6
,
unsigned
long
r7
,
struct
pt_regs
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
regs
[
15
],
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
regs
[
15
],
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/sparc/kernel/entry.S
View file @
a68d9759
...
...
@@ -1393,7 +1393,7 @@ flush_patch_two:
std
%
g4
,
[%
curptr
+
AOFF_task_thread
+
AOFF_thread_fork_kpsr
]
add
%
sp
,
REGWIN_SZ
,
%
o2
!
arg2
:
pt_regs
ptr
mov
0
,
%
o3
call
C_LABEL
(
do_fork
)
call
C_LABEL
(
do_fork
_FIXME_NOW_RETURNS_TASK_STRUCT
)
mov
%
l5
,
%
o7
/
*
Whee
,
kernel
threads
!
*/
...
...
@@ -1416,7 +1416,8 @@ flush_patch_three:
std
%
g4
,
[%
curptr
+
AOFF_task_thread
+
AOFF_thread_fork_kpsr
]
add
%
sp
,
REGWIN_SZ
,
%
o2
!
arg2
:
pt_regs
ptr
mov
0
,
%
o3
call
C_LABEL
(
do_fork
)
/
*
FIXME
:
remove
CLONE_IDLETASK
from
flags
first
*/
call
C_LABEL
(
do_fork_WITHOUT_CLONE_IDLETASK
)
mov
%
l5
,
%
o7
/
*
Whee
,
real
vfork
!
*/
...
...
@@ -1432,9 +1433,9 @@ flush_patch_four:
sethi
%
hi
(
0x4000
| 0x0100 |
SIGCHLD
),
%
o0
mov
%
fp
,
%
o1
or
%
o0
,
%
lo
(
0x4000
| 0x0100 |
SIGCHLD
),
%
o0
sethi
%
hi
(
C_LABEL
(
do_fork
)),
%
l1
sethi
%
hi
(
C_LABEL
(
do_fork
_FIXME_NOW_RETURNS_TASK_STRUCT
)),
%
l1
mov
0
,
%
o3
jmpl
%
l1
+
%
lo
(
C_LABEL
(
do_fork
)),
%
g0
jmpl
%
l1
+
%
lo
(
C_LABEL
(
do_fork
_FIXME_NOW_RETURNS_TASK_STRUCT
)),
%
g0
add
%
sp
,
REGWIN_SZ
,
%
o2
.
align
4
...
...
arch/sparc/kernel/sun4d_smp.c
View file @
a68d9759
...
...
@@ -214,7 +214,7 @@ void __init smp4d_boot_cpus(void)
int
no
;
/* Cook up an idler for this guy. */
kernel_thread
(
start_secondary
,
NULL
,
CLONE_
PID
);
kernel_thread
(
start_secondary
,
NULL
,
CLONE_
IDLETASK
);
cpucount
++
;
...
...
arch/sparc/kernel/sun4m_smp.c
View file @
a68d9759
...
...
@@ -187,7 +187,7 @@ void __init smp4m_boot_cpus(void)
int
timeout
;
/* Cook up an idler for this guy. */
kernel_thread
(
start_secondary
,
NULL
,
CLONE_
PID
);
kernel_thread
(
start_secondary
,
NULL
,
CLONE_
IDLETASK
);
cpucount
++
;
...
...
arch/sparc64/kernel/entry.S
View file @
a68d9759
...
...
@@ -1429,7 +1429,7 @@ sys_fork: clr %o1
sys_clone
:
flushw
movrz
%
o1
,
%
fp
,
%
o1
mov
0
,
%
o3
ba
,
pt
%
xcc
,
do_fork
ba
,
pt
%
xcc
,
do_fork
_FIXME_NOW_RETURNS_TASK_STRUCT
add
%
sp
,
STACK_BIAS
+
REGWIN_SZ
,
%
o2
ret_from_syscall
:
/
*
Clear
SPARC_FLAG_NEWCHILD
,
switch_to
leaves
thread
.
flags
in
...
...
arch/sparc64/kernel/smp.c
View file @
a68d9759
...
...
@@ -268,7 +268,7 @@ void __init smp_boot_cpus(void)
int
no
;
prom_printf
(
"Starting CPU %d... "
,
i
);
kernel_thread
(
NULL
,
NULL
,
CLONE_
PID
);
kernel_thread
(
NULL
,
NULL
,
CLONE_
IDLETASK
);
cpucount
++
;
p
=
prev_task
(
&
init_task
);
...
...
arch/x86_64/ia32/sys_ia32.c
View file @
a68d9759
...
...
@@ -2683,14 +2683,18 @@ int sys32_execve(char *name, u32 argv, u32 envp, struct pt_regs regs)
asmlinkage
int
sys32_fork
(
struct
pt_regs
regs
)
{
return
do_fork
(
SIGCHLD
,
regs
.
rsp
,
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
regs
.
rsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
int
sys32_clone
(
unsigned
int
clone_flags
,
unsigned
int
newsp
,
struct
pt_regs
regs
)
{
struct
task_struct
*
p
;
if
(
!
newsp
)
newsp
=
regs
.
rsp
;
return
do_fork
(
clone_flags
,
newsp
,
&
regs
,
0
);
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
@@ -2705,7 +2709,9 @@ asmlinkage int sys32_clone(unsigned int clone_flags, unsigned int newsp, struct
*/
asmlinkage
int
sys32_vfork
(
struct
pt_regs
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
rsp
,
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
rsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/x86_64/kernel/entry.S
View file @
a68d9759
...
...
@@ -570,7 +570,7 @@ ENTRY(kernel_thread)
movq
%
rsp
,
%
rdx
#
clone
now
call
do_fork
call
do_fork
_FIXME_NOW_RETURNS_TASK_STRUCT
#
save
retval
on
the
stack
so
it
's popped before `ret`
movq
%
rax
,
RAX
(%
rsp
)
...
...
arch/x86_64/kernel/process.c
View file @
a68d9759
...
...
@@ -608,14 +608,18 @@ void set_personality_64bit(void)
asmlinkage
long
sys_fork
(
struct
pt_regs
regs
)
{
return
do_fork
(
SIGCHLD
,
regs
.
rsp
,
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
SIGCHLD
,
regs
.
rsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
asmlinkage
long
sys_clone
(
unsigned
long
clone_flags
,
unsigned
long
newsp
,
struct
pt_regs
regs
)
{
struct
task_struct
*
p
;
if
(
!
newsp
)
newsp
=
regs
.
rsp
;
return
do_fork
(
clone_flags
,
newsp
,
&
regs
,
0
);
p
=
do_fork
(
clone_flags
&
~
CLONE_IDLETASK
,
newsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
@@ -630,7 +634,9 @@ asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, struct
*/
asmlinkage
long
sys_vfork
(
struct
pt_regs
regs
)
{
return
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
rsp
,
&
regs
,
0
);
struct
task_struct
*
p
;
p
=
do_fork
(
CLONE_VFORK
|
CLONE_VM
|
SIGCHLD
,
regs
.
rsp
,
&
regs
,
0
);
return
IS_ERR
(
p
)
?
PTR_ERR
(
p
)
:
p
->
pid
;
}
/*
...
...
arch/x86_64/kernel/smpboot.c
View file @
a68d9759
...
...
@@ -476,14 +476,14 @@ void __init initialize_secondary(void)
extern
volatile
unsigned
long
init_rsp
;
extern
void
(
*
initial_code
)(
void
);
static
int
__init
fork_by_hand
(
void
)
static
struct
task_struct
*
__init
fork_by_hand
(
void
)
{
struct
pt_regs
regs
;
/*
* don't care about the rip and regs settings since
* we'll never reschedule the forked task.
*/
return
do_fork
(
CLONE_VM
|
CLONE_
PID
,
0
,
&
regs
,
0
);
return
do_fork
(
CLONE_VM
|
CLONE_
IDLETASK
,
0
,
&
regs
,
0
);
}
#if APIC_DEBUG
...
...
@@ -538,17 +538,14 @@ static void __init do_boot_cpu (int apicid)
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
if
(
fork_by_hand
()
<
0
)
idle
=
fork_by_hand
();
if
(
IS_ERR
(
idle
))
panic
(
"failed fork for CPU %d"
,
cpu
);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
idle
=
prev_task
(
&
init_task
);
if
(
!
idle
)
panic
(
"No idle process for CPU %d"
,
cpu
);
init_idle
(
idle
,
cpu
);
x86_cpu_to_apicid
[
cpu
]
=
apicid
;
...
...
include/asm-generic/tlb.h
View file @
a68d9759
...
...
@@ -16,7 +16,6 @@
#include <linux/config.h>
#include <asm/tlbflush.h>
#ifdef CONFIG_SMP
/* aim for something that fits in the L1 cache */
#define FREE_PTE_NR 508
...
...
@@ -26,90 +25,87 @@
* shootdown.
*/
typedef
struct
free_pte_ctx
{
struct
vm_area_struct
*
vma
;
struct
mm_struct
*
mm
;
unsigned
long
nr
;
/* set to ~0UL means fast mode */
unsigned
long
start_addr
,
end_addr
;
pte_t
pt
es
[
FREE_PTE_NR
];
unsigned
long
freed
;
struct
page
*
pag
es
[
FREE_PTE_NR
];
}
mmu_gather_t
;
/* Users of the generic TLB shootdown code must declare this storage space. */
extern
mmu_gather_t
mmu_gathers
[
NR_CPUS
];
/* Do me later */
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
/* tlb_gather_mmu
* Return a pointer to an initialized mmu_gather_t.
*/
static
inline
mmu_gather_t
*
tlb_gather_mmu
(
struct
vm_area_struct
*
vma
)
static
inline
mmu_gather_t
*
tlb_gather_mmu
(
struct
mm_struct
*
mm
)
{
mmu_gather_t
*
tlb
=
&
mmu_gathers
[
smp_processor_id
()];
struct
mm_struct
*
mm
=
vma
->
vm_mm
;
unsigned
long
nr
;
tlb
->
vma
=
vma
;
/* Use fast mode if there is only one user of this mm (this process) */
tlb
->
nr
=
(
atomic_read
(
&
(
mm
)
->
mm_users
)
==
1
)
?
~
0UL
:
0UL
;
tlb
->
mm
=
mm
;
tlb
->
freed
=
0
;
/* Use fast mode if this MM only exists on this CPU */
nr
=
~
0UL
;
#ifdef CONFIG_SMP
if
(
mm
->
cpu_vm_mask
!=
(
1
<<
smp_processor_id
()))
nr
=
0UL
;
#endif
tlb
->
nr
=
nr
;
return
tlb
;
}
/* void tlb_remove_page(mmu_gather_t *tlb, pte_t *ptep, unsigned long addr)
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs.
*/
#define tlb_remove_page(ctxp, pte, addr) do {\
/* Handle the common case fast, first. */
\
if ((ctxp)->nr == ~0UL) {\
__free_pte(*(pte));\
pte_clear((pte));\
break;\
}\
if (!(ctxp)->nr) \
(ctxp)->start_addr = (addr);\
(ctxp)->ptes[(ctxp)->nr++] = ptep_get_and_clear(pte);\
(ctxp)->end_addr = (addr) + PAGE_SIZE;\
if ((ctxp)->nr >= FREE_PTE_NR)\
tlb_finish_mmu((ctxp), 0, 0);\
} while (0)
static
inline
void
tlb_flush_mmu
(
mmu_gather_t
*
tlb
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
nr
;
flush_tlb_mm
(
tlb
->
mm
);
nr
=
tlb
->
nr
;
if
(
nr
!=
~
0UL
)
{
unsigned
long
i
;
tlb
->
nr
=
0
;
for
(
i
=
0
;
i
<
nr
;
i
++
)
free_page_and_swap_cache
(
tlb
->
pages
[
i
]);
}
}
/* tlb_finish_mmu
* Called at the end of the shootdown operation to free up any resources
* that were required. The page table lock is still held at this point.
*/
static
inline
void
tlb_finish_mmu
(
struct
free_pte_ctx
*
ctx
,
unsigned
long
start
,
unsigned
long
end
)
static
inline
void
tlb_finish_mmu
(
mmu_gather_t
*
tlb
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
i
,
nr
;
/* Handle the fast case first. */
if
(
ctx
->
nr
==
~
0UL
)
{
flush_tlb_range
(
ctx
->
vma
,
start
,
end
);
return
;
}
nr
=
ctx
->
nr
;
ctx
->
nr
=
0
;
if
(
nr
)
flush_tlb_range
(
ctx
->
vma
,
ctx
->
start_addr
,
ctx
->
end_addr
);
for
(
i
=
0
;
i
<
nr
;
i
++
)
{
pte_t
pte
=
ctx
->
ptes
[
i
];
__free_pte
(
pte
);
}
int
freed
=
tlb
->
freed
;
struct
mm_struct
*
mm
=
tlb
->
mm
;
int
rss
=
mm
->
rss
;
if
(
rss
<
freed
)
freed
=
rss
;
mm
->
rss
=
rss
-
freed
;
tlb_flush_mmu
(
tlb
,
start
,
end
);
}
#else
/* The uniprocessor functions are quite simple and are inline macros in an
* attempt to get gcc to generate optimal code since this code is run on each
* page in a process at exit.
/* void tlb_remove_page(mmu_gather_t *tlb, pte_t *ptep, unsigned long addr)
* Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
* handling the additional races in SMP caused by other CPUs caching valid
* mappings in their TLBs.
*/
typedef
struct
vm_area_struct
mmu_gather_t
;
#define tlb_gather_mmu(vma) (vma)
#define tlb_finish_mmu(tlb, start, end) flush_tlb_range(tlb, start, end)
#define tlb_remove_page(tlb, ptep, addr) do {\
pte_t __pte = *(ptep);\
pte_clear(ptep);\
__free_pte(__pte);\
} while (0)
#endif
static
inline
void
tlb_remove_page
(
mmu_gather_t
*
tlb
,
struct
page
*
page
)
{
/* Handle the common case fast, first. */
\
if
(
tlb
->
nr
==
~
0UL
)
{
free_page_and_swap_cache
(
page
);
return
;
}
tlb
->
pages
[
tlb
->
nr
++
]
=
page
;
if
(
tlb
->
nr
>=
FREE_PTE_NR
)
tlb_flush_mmu
(
tlb
,
0
,
0
);
}
#endif
/* _ASM_GENERIC__TLB_H */
include/asm-i386/pgalloc.h
View file @
a68d9759
...
...
@@ -35,6 +35,9 @@ static inline void pte_free(struct page *pte)
__free_page
(
pte
);
}
#define pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
...
...
@@ -43,6 +46,7 @@ static inline void pte_free(struct page *pte)
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#define check_pgt_cache() do { } while (0)
...
...
include/linux/mm.h
View file @
a68d9759
...
...
@@ -311,8 +311,6 @@ extern mem_map_t * mem_map;
extern
void
show_free_areas
(
void
);
extern
void
show_free_areas_node
(
pg_data_t
*
pgdat
);
extern
void
clear_page_tables
(
struct
mm_struct
*
,
unsigned
long
,
int
);
extern
int
fail_writepage
(
struct
page
*
);
struct
page
*
shmem_nopage
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
int
unused
);
struct
file
*
shmem_file_setup
(
char
*
name
,
loff_t
size
);
...
...
include/linux/sched.h
View file @
a68d9759
...
...
@@ -39,7 +39,7 @@ struct exec_domain;
#define CLONE_FS 0x00000200
/* set if fs info shared between processes */
#define CLONE_FILES 0x00000400
/* set if open files shared between processes */
#define CLONE_SIGHAND 0x00000800
/* set if signal handlers and blocked signals shared */
#define CLONE_
PID 0x00001000
/* set if pid shared
*/
#define CLONE_
IDLETASK 0x00001000
/* set if new pid should be 0 (kernel only)
*/
#define CLONE_PTRACE 0x00002000
/* set if we want to let tracing continue on the child too */
#define CLONE_VFORK 0x00004000
/* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT 0x00008000
/* set if we want to have the same parent as the cloner */
...
...
@@ -663,7 +663,7 @@ extern void daemonize(void);
extern
task_t
*
child_reaper
;
extern
int
do_execve
(
char
*
,
char
**
,
char
**
,
struct
pt_regs
*
);
extern
int
do_fork
(
unsigned
long
,
unsigned
long
,
struct
pt_regs
*
,
unsigned
long
);
extern
struct
task_struct
*
do_fork
(
unsigned
long
,
unsigned
long
,
struct
pt_regs
*
,
unsigned
long
);
extern
void
FASTCALL
(
add_wait_queue
(
wait_queue_head_t
*
q
,
wait_queue_t
*
wait
));
extern
void
FASTCALL
(
add_wait_queue_exclusive
(
wait_queue_head_t
*
q
,
wait_queue_t
*
wait
));
...
...
kernel/fork.c
View file @
a68d9759
...
...
@@ -136,8 +136,8 @@ static int get_pid(unsigned long flags)
struct
task_struct
*
p
;
int
pid
;
if
(
flags
&
CLONE_
PID
)
return
current
->
pid
;
if
(
flags
&
CLONE_
IDLETASK
)
return
0
;
spin_lock
(
&
lastpid_lock
);
if
((
++
last_pid
)
&
0xffff8000
)
{
...
...
@@ -608,27 +608,18 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
* For an example that's using stack_top, see
* arch/ia64/kernel/process.c.
*/
int
do_fork
(
unsigned
long
clone_flags
,
unsigned
long
stack_start
,
struct
pt_regs
*
regs
,
unsigned
long
stack_size
)
struct
task_struct
*
do_fork
(
unsigned
long
clone_flags
,
unsigned
long
stack_start
,
struct
pt_regs
*
regs
,
unsigned
long
stack_size
)
{
int
retval
;
unsigned
long
flags
;
struct
task_struct
*
p
;
struct
task_struct
*
p
=
NULL
;
struct
completion
vfork
;
if
((
clone_flags
&
(
CLONE_NEWNS
|
CLONE_FS
))
==
(
CLONE_NEWNS
|
CLONE_FS
))
return
-
EINVAL
;
retval
=
-
EPERM
;
/*
* CLONE_PID is only allowed for the initial SMP swapper
* calls
*/
if
(
clone_flags
&
CLONE_PID
)
{
if
(
current
->
pid
)
goto
fork_out
;
}
return
ERR_PTR
(
-
EINVAL
);
retval
=
-
ENOMEM
;
p
=
dup_task_struct
(
current
);
...
...
@@ -768,8 +759,7 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
*
* Let it rip!
*/
retval
=
p
->
pid
;
p
->
tgid
=
retval
;
p
->
tgid
=
p
->
pid
;
INIT_LIST_HEAD
(
&
p
->
thread_group
);
/* Need tasklist lock for parent etc handling! */
...
...
@@ -807,9 +797,12 @@ int do_fork(unsigned long clone_flags, unsigned long stack_start,
* COW overhead when the child exec()s afterwards.
*/
set_need_resched
();
retval
=
0
;
fork_out:
return
retval
;
if
(
retval
)
return
ERR_PTR
(
retval
);
return
p
;
bad_fork_cleanup_namespace:
exit_namespace
(
p
);
...
...
lib/zlib_inflate/inflate.c
View file @
a68d9759
...
...
@@ -110,7 +110,7 @@ int stream_size;
#undef NEEDBYTE
#undef NEXTBYTE
#define NEEDBYTE {if(z->avail_in==0)goto empty;r=
f
;}
#define NEEDBYTE {if(z->avail_in==0)goto empty;r=
trv
;}
#define NEXTBYTE (z->avail_in--,z->total_in++,*z->next_in++)
int
ZEXPORT
zlib_inflate
(
z
,
f
)
...
...
mm/memory.c
View file @
a68d9759
...
...
@@ -71,29 +71,11 @@ static inline void copy_cow_page(struct page * from, struct page * to, unsigned
mem_map_t
*
mem_map
;
/*
* Called by TLB shootdown
*/
void
__free_pte
(
pte_t
pte
)
{
struct
page
*
page
;
unsigned
long
pfn
=
pte_pfn
(
pte
);
if
(
!
pfn_valid
(
pfn
))
return
;
page
=
pfn_to_page
(
pfn
);
if
(
PageReserved
(
page
))
return
;
if
(
pte_dirty
(
pte
))
set_page_dirty
(
page
);
free_page_and_swap_cache
(
page
);
}
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
*/
static
inline
void
free_one_pmd
(
pmd_t
*
dir
)
static
inline
void
free_one_pmd
(
mmu_gather_t
*
tlb
,
pmd_t
*
dir
)
{
struct
page
*
pte
;
...
...
@@ -106,10 +88,10 @@ static inline void free_one_pmd(pmd_t * dir)
}
pte
=
pmd_page
(
*
dir
);
pmd_clear
(
dir
);
pte_free
(
pte
);
pte_free
_tlb
(
tlb
,
pte
);
}
static
inline
void
free_one_pgd
(
pgd_t
*
dir
)
static
inline
void
free_one_pgd
(
mmu_gather_t
*
tlb
,
pgd_t
*
dir
)
{
int
j
;
pmd_t
*
pmd
;
...
...
@@ -125,26 +107,26 @@ static inline void free_one_pgd(pgd_t * dir)
pgd_clear
(
dir
);
for
(
j
=
0
;
j
<
PTRS_PER_PMD
;
j
++
)
{
prefetchw
(
pmd
+
j
+
(
PREFETCH_STRIDE
/
16
));
free_one_pmd
(
pmd
+
j
);
free_one_pmd
(
tlb
,
pmd
+
j
);
}
pmd_free
(
pmd
);
pmd_free
_tlb
(
tlb
,
pmd
);
}
/*
* This function clears all user-level page tables of a process - this
* is needed by execve(), so that old pages aren't in the way.
*
* Must be called with pagetable lock held.
*/
void
clear_page_tables
(
struct
mm_struct
*
mm
,
unsigned
long
first
,
int
nr
)
void
clear_page_tables
(
mmu_gather_t
*
tlb
,
unsigned
long
first
,
int
nr
)
{
pgd_t
*
page_dir
=
mm
->
pgd
;
pgd_t
*
page_dir
=
tlb
->
mm
->
pgd
;
spin_lock
(
&
mm
->
page_table_lock
);
page_dir
+=
first
;
do
{
free_one_pgd
(
page_dir
);
free_one_pgd
(
tlb
,
page_dir
);
page_dir
++
;
}
while
(
--
nr
);
spin_unlock
(
&
mm
->
page_table_lock
);
/* keep the page table cache within bounds */
check_pgt_cache
();
...
...
@@ -340,18 +322,17 @@ static inline void forget_pte(pte_t page)
}
}
static
inline
int
zap_pte_range
(
mmu_gather_t
*
tlb
,
pmd_t
*
pmd
,
unsigned
long
address
,
unsigned
long
size
)
static
void
zap_pte_range
(
mmu_gather_t
*
tlb
,
pmd_t
*
pmd
,
unsigned
long
address
,
unsigned
long
size
)
{
unsigned
long
offset
;
pte_t
*
ptep
;
int
freed
=
0
;
if
(
pmd_none
(
*
pmd
))
return
0
;
return
;
if
(
pmd_bad
(
*
pmd
))
{
pmd_ERROR
(
*
pmd
);
pmd_clear
(
pmd
);
return
0
;
return
;
}
ptep
=
pte_offset_map
(
pmd
,
address
);
offset
=
address
&
~
PMD_MASK
;
...
...
@@ -363,49 +344,63 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad
if
(
pte_none
(
pte
))
continue
;
if
(
pte_present
(
pte
))
{
struct
page
*
page
;
unsigned
long
pfn
=
pte_pfn
(
pte
);
pte_clear
(
ptep
);
pfn
=
pte_pfn
(
pte
);
if
(
pfn_valid
(
pfn
))
{
page
=
pfn_to_page
(
pfn
);
if
(
!
PageReserved
(
page
))
freed
++
;
struct
page
*
page
=
pfn_to_page
(
pfn
);
if
(
!
PageReserved
(
page
))
{
if
(
pte_dirty
(
pte
))
set_page_dirty
(
page
);
tlb_remove_page
(
tlb
,
page
);
}
}
/* This will eventually call __free_pte on the pte. */
tlb_remove_page
(
tlb
,
ptep
,
address
+
offset
);
}
else
{
free_swap_and_cache
(
pte_to_swp_entry
(
pte
));
pte_clear
(
ptep
);
}
}
pte_unmap
(
ptep
-
1
);
return
freed
;
}
static
inline
int
zap_pmd_range
(
mmu_gather_t
*
tlb
,
pgd_t
*
dir
,
unsigned
long
address
,
unsigned
long
size
)
static
void
zap_pmd_range
(
mmu_gather_t
*
tlb
,
pgd_t
*
dir
,
unsigned
long
address
,
unsigned
long
size
)
{
pmd_t
*
pmd
;
unsigned
long
end
;
int
freed
;
if
(
pgd_none
(
*
dir
))
return
0
;
return
;
if
(
pgd_bad
(
*
dir
))
{
pgd_ERROR
(
*
dir
);
pgd_clear
(
dir
);
return
0
;
return
;
}
pmd
=
pmd_offset
(
dir
,
address
);
end
=
address
+
size
;
if
(
end
>
((
address
+
PGDIR_SIZE
)
&
PGDIR_MASK
))
end
=
((
address
+
PGDIR_SIZE
)
&
PGDIR_MASK
);
freed
=
0
;
do
{
freed
+=
zap_pte_range
(
tlb
,
pmd
,
address
,
end
-
address
);
zap_pte_range
(
tlb
,
pmd
,
address
,
end
-
address
);
address
=
(
address
+
PMD_SIZE
)
&
PMD_MASK
;
pmd
++
;
}
while
(
address
<
end
);
return
freed
;
}
void
unmap_page_range
(
mmu_gather_t
*
tlb
,
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
unsigned
long
end
)
{
pgd_t
*
dir
;
if
(
address
>=
end
)
BUG
();
dir
=
pgd_offset
(
vma
->
vm_mm
,
address
);
tlb_start_vma
(
tlb
,
vma
);
do
{
zap_pmd_range
(
tlb
,
dir
,
address
,
end
-
address
);
address
=
(
address
+
PGDIR_SIZE
)
&
PGDIR_MASK
;
dir
++
;
}
while
(
address
&&
(
address
<
end
));
tlb_end_vma
(
tlb
,
vma
);
}
/*
...
...
@@ -417,7 +412,6 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned
mmu_gather_t
*
tlb
;
pgd_t
*
dir
;
unsigned
long
start
=
address
,
end
=
address
+
size
;
int
freed
=
0
;
dir
=
pgd_offset
(
mm
,
address
);
...
...
@@ -432,25 +426,10 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned
BUG
();
spin_lock
(
&
mm
->
page_table_lock
);
flush_cache_range
(
vma
,
address
,
end
);
tlb
=
tlb_gather_mmu
(
vma
);
do
{
freed
+=
zap_pmd_range
(
tlb
,
dir
,
address
,
end
-
address
);
address
=
(
address
+
PGDIR_SIZE
)
&
PGDIR_MASK
;
dir
++
;
}
while
(
address
&&
(
address
<
end
));
/* this will flush any remaining tlb entries */
tlb
=
tlb_gather_mmu
(
mm
);
unmap_page_range
(
tlb
,
vma
,
address
,
end
);
tlb_finish_mmu
(
tlb
,
start
,
end
);
/*
* Update rss for the mm_struct (not necessarily current->mm)
* Notice that rss is an unsigned long.
*/
if
(
mm
->
rss
>
freed
)
mm
->
rss
-=
freed
;
else
mm
->
rss
=
0
;
spin_unlock
(
&
mm
->
page_table_lock
);
}
...
...
mm/mmap.c
View file @
a68d9759
...
...
@@ -17,7 +17,10 @@
#include <asm/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
extern
void
unmap_page_range
(
mmu_gather_t
*
,
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
unsigned
long
size
);
extern
void
clear_page_tables
(
mmu_gather_t
*
tlb
,
unsigned
long
first
,
int
nr
);
/*
* WARNING: the debugging will use recursive algorithms so never enable this
...
...
@@ -329,11 +332,11 @@ static void __vma_link(struct mm_struct * mm, struct vm_area_struct * vma, stru
static
inline
void
vma_link
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
vma
,
struct
vm_area_struct
*
prev
,
rb_node_t
**
rb_link
,
rb_node_t
*
rb_parent
)
{
lock_vma_mappings
(
vma
);
spin_lock
(
&
mm
->
page_table_lock
);
lock_vma_mappings
(
vma
);
__vma_link
(
mm
,
vma
,
prev
,
rb_link
,
rb_parent
);
spin_unlock
(
&
mm
->
page_table_lock
);
unlock_vma_mappings
(
vma
);
spin_unlock
(
&
mm
->
page_table_lock
);
mm
->
map_count
++
;
validate_mm
(
mm
);
...
...
@@ -781,13 +784,11 @@ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm,
*/
area
->
vm_end
=
addr
;
lock_vma_mappings
(
area
);
spin_lock
(
&
mm
->
page_table_lock
);
}
else
if
(
addr
==
area
->
vm_start
)
{
area
->
vm_pgoff
+=
(
end
-
area
->
vm_start
)
>>
PAGE_SHIFT
;
/* same locking considerations of the above case */
area
->
vm_start
=
end
;
lock_vma_mappings
(
area
);
spin_lock
(
&
mm
->
page_table_lock
);
}
else
{
/* Unmapping a hole: area->vm_start < addr <= end < area->vm_end */
/* Add end mapping -- leave beginning for below */
...
...
@@ -814,12 +815,10 @@ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm,
* things correctly.
*/
lock_vma_mappings
(
area
);
spin_lock
(
&
mm
->
page_table_lock
);
__insert_vm_struct
(
mm
,
mpnt
);
}
__insert_vm_struct
(
mm
,
area
);
spin_unlock
(
&
mm
->
page_table_lock
);
unlock_vma_mappings
(
area
);
return
extra
;
}
...
...
@@ -837,12 +836,13 @@ static struct vm_area_struct * unmap_fixup(struct mm_struct *mm,
* "prev", if it exists, points to a vma before the one
* we just free'd - but there's no telling how much before.
*/
static
void
free_pgtables
(
struct
mm_struct
*
mm
,
struct
vm_area_struct
*
prev
,
static
void
free_pgtables
(
mmu_gather_t
*
tlb
,
struct
vm_area_struct
*
prev
,
unsigned
long
start
,
unsigned
long
end
)
{
unsigned
long
first
=
start
&
PGDIR_MASK
;
unsigned
long
last
=
end
+
PGDIR_SIZE
-
1
;
unsigned
long
start_index
,
end_index
;
struct
mm_struct
*
mm
=
tlb
->
mm
;
if
(
!
prev
)
{
prev
=
mm
->
mmap
;
...
...
@@ -877,7 +877,7 @@ static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
start_index
=
pgd_index
(
first
);
end_index
=
pgd_index
(
last
);
if
(
end_index
>
start_index
)
{
clear_page_tables
(
mm
,
start_index
,
end_index
-
start_index
);
clear_page_tables
(
tlb
,
start_index
,
end_index
-
start_index
);
flush_tlb_pgtables
(
mm
,
first
&
PGDIR_MASK
,
last
&
PGDIR_MASK
);
}
}
...
...
@@ -889,6 +889,7 @@ static void free_pgtables(struct mm_struct * mm, struct vm_area_struct *prev,
*/
int
do_munmap
(
struct
mm_struct
*
mm
,
unsigned
long
addr
,
size_t
len
)
{
mmu_gather_t
*
tlb
;
struct
vm_area_struct
*
mpnt
,
*
prev
,
**
npp
,
*
free
,
*
extra
;
if
((
addr
&
~
PAGE_MASK
)
||
addr
>
TASK_SIZE
||
len
>
TASK_SIZE
-
addr
)
...
...
@@ -933,7 +934,8 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
rb_erase
(
&
mpnt
->
vm_rb
,
&
mm
->
mm_rb
);
}
mm
->
mmap_cache
=
NULL
;
/* Kill the cache. */
spin_unlock
(
&
mm
->
page_table_lock
);
tlb
=
tlb_gather_mmu
(
mm
);
/* Ok - we have the memory areas we should free on the 'free' list,
* so release them, and unmap the page range..
...
...
@@ -942,7 +944,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
* In that case we have to be careful with VM_DENYWRITE.
*/
while
((
mpnt
=
free
)
!=
NULL
)
{
unsigned
long
st
,
end
,
size
;
unsigned
long
st
,
end
;
struct
file
*
file
=
NULL
;
free
=
free
->
vm_next
;
...
...
@@ -950,7 +952,6 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
st
=
addr
<
mpnt
->
vm_start
?
mpnt
->
vm_start
:
addr
;
end
=
addr
+
len
;
end
=
end
>
mpnt
->
vm_end
?
mpnt
->
vm_end
:
end
;
size
=
end
-
st
;
if
(
mpnt
->
vm_flags
&
VM_DENYWRITE
&&
(
st
!=
mpnt
->
vm_start
||
end
!=
mpnt
->
vm_end
)
&&
...
...
@@ -960,12 +961,12 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
remove_shared_vm_struct
(
mpnt
);
mm
->
map_count
--
;
zap_page_range
(
mpnt
,
st
,
size
);
unmap_page_range
(
tlb
,
mpnt
,
st
,
end
);
/*
* Fix the mapping, and free the old area if it wasn't reused.
*/
extra
=
unmap_fixup
(
mm
,
mpnt
,
st
,
size
,
extra
);
extra
=
unmap_fixup
(
mm
,
mpnt
,
st
,
end
-
st
,
extra
);
if
(
file
)
atomic_inc
(
&
file
->
f_dentry
->
d_inode
->
i_writecount
);
}
...
...
@@ -975,7 +976,9 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
if
(
extra
)
kmem_cache_free
(
vm_area_cachep
,
extra
);
free_pgtables
(
mm
,
prev
,
addr
,
addr
+
len
);
free_pgtables
(
tlb
,
prev
,
addr
,
addr
+
len
);
tlb_finish_mmu
(
tlb
,
addr
,
addr
+
len
);
spin_unlock
(
&
mm
->
page_table_lock
);
return
0
;
}
...
...
@@ -1092,44 +1095,58 @@ void build_mmap_rb(struct mm_struct * mm)
/* Release all mmaps. */
void
exit_mmap
(
struct
mm_struct
*
mm
)
{
mmu_gather_t
*
tlb
;
struct
vm_area_struct
*
mpnt
;
release_segments
(
mm
);
spin_lock
(
&
mm
->
page_table_lock
);
tlb
=
tlb_gather_mmu
(
mm
);
flush_cache_mm
(
mm
);
mpnt
=
mm
->
mmap
;
while
(
mpnt
)
{
unsigned
long
start
=
mpnt
->
vm_start
;
unsigned
long
end
=
mpnt
->
vm_end
;
mm
->
map_count
--
;
remove_shared_vm_struct
(
mpnt
);
unmap_page_range
(
tlb
,
mpnt
,
start
,
end
);
mpnt
=
mpnt
->
vm_next
;
}
/* This is just debugging */
if
(
mm
->
map_count
)
BUG
();
clear_page_tables
(
tlb
,
FIRST_USER_PGD_NR
,
USER_PTRS_PER_PGD
);
tlb_finish_mmu
(
tlb
,
FIRST_USER_PGD_NR
*
PGDIR_SIZE
,
USER_PTRS_PER_PGD
*
PGDIR_SIZE
);
mpnt
=
mm
->
mmap
;
mm
->
mmap
=
mm
->
mmap_cache
=
NULL
;
mm
->
mm_rb
=
RB_ROOT
;
mm
->
rss
=
0
;
spin_unlock
(
&
mm
->
page_table_lock
);
mm
->
total_vm
=
0
;
mm
->
locked_vm
=
0
;
flush_cache_mm
(
mm
);
spin_unlock
(
&
mm
->
page_table_lock
);
/*
* Walk the list again, actually closing and freeing it
* without holding any MM locks.
*/
while
(
mpnt
)
{
struct
vm_area_struct
*
next
=
mpnt
->
vm_next
;
unsigned
long
start
=
mpnt
->
vm_start
;
unsigned
long
end
=
mpnt
->
vm_end
;
unsigned
long
size
=
end
-
start
;
if
(
mpnt
->
vm_ops
)
{
if
(
mpnt
->
vm_ops
->
close
)
mpnt
->
vm_ops
->
close
(
mpnt
);
}
mm
->
map_count
--
;
remove_shared_vm_struct
(
mpnt
);
zap_page_range
(
mpnt
,
start
,
size
);
if
(
mpnt
->
vm_file
)
fput
(
mpnt
->
vm_file
);
kmem_cache_free
(
vm_area_cachep
,
mpnt
);
mpnt
=
next
;
}
flush_tlb_mm
(
mm
);
/* This is just debugging */
if
(
mm
->
map_count
)
BUG
();
clear_page_tables
(
mm
,
FIRST_USER_PGD_NR
,
USER_PTRS_PER_PGD
);
}
/* Insert vm structure into process list sorted by address
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment