Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
1524c2f2
Commit
1524c2f2
authored
Mar 14, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://kernel.bkbits.net/jgarzik/random-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
1c6604f1
65511c2b
Changes
25
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
25 changed files
with
1007 additions
and
487 deletions
+1007
-487
Documentation/md.txt
Documentation/md.txt
+64
-4
drivers/base/platform.c
drivers/base/platform.c
+1
-0
drivers/md/linear.c
drivers/md/linear.c
+8
-10
drivers/md/md.c
drivers/md/md.c
+666
-289
drivers/md/multipath.c
drivers/md/multipath.c
+12
-14
drivers/md/raid0.c
drivers/md/raid0.c
+13
-14
drivers/md/raid1.c
drivers/md/raid1.c
+23
-27
drivers/md/raid5.c
drivers/md/raid5.c
+26
-30
fs/lockd/svclock.c
fs/lockd/svclock.c
+6
-2
fs/nfsd/export.c
fs/nfsd/export.c
+12
-3
fs/nfsd/vfs.c
fs/nfsd/vfs.c
+19
-3
fs/reiserfs/journal.c
fs/reiserfs/journal.c
+36
-40
include/asm-generic/xor.h
include/asm-generic/xor.h
+20
-20
include/asm-i386/xor.h
include/asm-i386/xor.h
+1
-0
include/linux/bio.h
include/linux/bio.h
+0
-1
include/linux/nfsd/export.h
include/linux/nfsd/export.h
+4
-3
include/linux/raid/md.h
include/linux/raid/md.h
+5
-3
include/linux/raid/md_k.h
include/linux/raid/md_k.h
+24
-12
include/linux/raid/md_p.h
include/linux/raid/md_p.h
+53
-0
include/linux/raid/multipath.h
include/linux/raid/multipath.h
+0
-1
include/linux/raid/raid1.h
include/linux/raid/raid1.h
+0
-2
include/linux/raid/raid5.h
include/linux/raid/raid5.h
+0
-2
include/linux/sunrpc/cache.h
include/linux/sunrpc/cache.h
+1
-0
net/sunrpc/svcauth_unix.c
net/sunrpc/svcauth_unix.c
+6
-4
net/sunrpc/svcsock.c
net/sunrpc/svcsock.c
+7
-3
No files found.
Documentation/md.txt
View file @
1524c2f2
Tools that manage md devices can be found at
http://www.<country>.kernel.org/pub/linux/
daemon
s/raid/....
http://www.<country>.kernel.org/pub/linux/
util
s/raid/....
You can boot
(if you selected boot support in the configuration) with your md
device with the following kernel command
lines:
You can boot
with your md device with the following kernel command
lines:
for old raid arrays without persistent superblocks:
md=<md device no.>,<raid level>,<chunk size factor>,<fault level>,dev0,dev1,...,devn
...
...
@@ -33,4 +33,64 @@ dev0-devn: e.g. /dev/hda1,/dev/hdc1,/dev/sda1,/dev/sdb1
A possible loadlin line (Harald Hoyer <HarryH@Royal.Net>) looks like this:
e:\loadlin\loadlin e:\zimage root=/dev/md0 md=0,0,4,0,/dev/hdb2,/dev/hdc3 ro
-------------------------------
The md driver can support a variety of different superblock formats.
(It doesn't yet, but it can)
The kernel does *NOT* autodetect which format superblock is being
used. It must be told.
Superblock format '0' is treated differently to others for legacy
reasons.
General Rules - apply for all superblock formats
------------------------------------------------
An array is 'created' by writing appropriate superblocks to all
devices.
It is 'assembled' by associating each of these devices with an
particular md virtual device. Once it is completely assembled, it can
be accessed.
An array should be created by a user-space tool. This will write
superblocks to all devices. It will usually mark the array as
'unclean', or with some devices missing so that the kernel md driver
can create approrpriate redundancy (copying in raid1, parity
calculation in raid4/5).
When an array is assembled, it is first initialised with the
SET_ARRAY_INFO ioctl. This contains, in particular, a major and minor
version number. The major version number selects which superblock
format is to be used. The minor number might be used to tune handling
of the format, such as suggesting where on each device to look for the
superblock.
Then each device is added using the ADD_NEW_DISK ioctl. This
provides, in particular, a major and minor number identifying the
device to add.
The array is started with the RUN_ARRAY ioctl.
Once started, new devices can be added. They should have an
appropriate superblock written to them, and then passed be in with
ADD_NEW_DISK.
Devices that have failed or are not yet active can be detached from an
array using HOT_REMOVE_DISK.
Specific Rules that apply to format-0 super block arrays, and
arrays with no superblock (non-presistant).
-------------------------------------------------------------
An array can be 'created' by describing the array (level, chunksize
etc) in a SET_ARRAY_INFO ioctl. This must has major_version==0 and
raid_disks != 0.
Then uninitialised devices can be added with ADD_NEW_DISK. The
structure passed to ADD_NEW_DISK must specify the state of the device
and it's role in the array.
One started with RUN_ARRAY, uninitialised spares can be added with
HOT_ADD_DISK.
drivers/base/platform.c
View file @
1524c2f2
...
...
@@ -75,5 +75,6 @@ int __init platform_bus_init(void)
return
bus_register
(
&
platform_bus_type
);
}
EXPORT_SYMBOL
(
platform_bus_type
);
EXPORT_SYMBOL
(
platform_device_register
);
EXPORT_SYMBOL
(
platform_device_unregister
);
drivers/md/linear.c
View file @
1524c2f2
...
...
@@ -203,36 +203,34 @@ static int linear_make_request (request_queue_t *q, struct bio *bio)
return
0
;
}
bio
->
bi_bdev
=
tmp_dev
->
rdev
->
bdev
;
bio
->
bi_sector
=
bio
->
bi_sector
-
(
tmp_dev
->
offset
<<
1
);
bio
->
bi_sector
=
bio
->
bi_sector
-
(
tmp_dev
->
offset
<<
1
)
+
tmp_dev
->
rdev
->
data_offset
;
return
1
;
}
static
int
linear_status
(
char
*
page
,
mddev_t
*
mddev
)
static
void
linear_status
(
struct
seq_file
*
seq
,
mddev_t
*
mddev
)
{
int
sz
=
0
;
#undef MD_DEBUG
#ifdef MD_DEBUG
int
j
;
linear_conf_t
*
conf
=
mddev_to_conf
(
mddev
);
s
z
+=
sprintf
(
page
+
sz
,
" "
);
s
eq_printf
(
seq
,
" "
);
for
(
j
=
0
;
j
<
conf
->
nr_zones
;
j
++
)
{
s
z
+=
sprintf
(
page
+
sz
,
"[%s"
,
s
eq_printf
(
seq
,
"[%s"
,
bdev_partition_name
(
conf
->
hash_table
[
j
].
dev0
->
rdev
->
bdev
));
if
(
conf
->
hash_table
[
j
].
dev1
)
s
z
+=
sprintf
(
page
+
sz
,
"/%s] "
,
s
eq_printf
(
seq
,
"/%s] "
,
bdev_partition_name
(
conf
->
hash_table
[
j
].
dev1
->
rdev
->
bdev
));
else
s
z
+=
sprintf
(
page
+
sz
,
"] "
);
s
eq_printf
(
seq
,
"] "
);
}
s
z
+=
sprintf
(
page
+
sz
,
"
\n
"
);
s
eq_printf
(
seq
,
"
\n
"
);
#endif
sz
+=
sprintf
(
page
+
sz
,
" %dk rounding"
,
mddev
->
chunk_size
/
1024
);
return
sz
;
seq_printf
(
seq
,
" %dk rounding"
,
mddev
->
chunk_size
/
1024
);
}
...
...
drivers/md/md.c
View file @
1524c2f2
This diff is collapsed.
Click to expand it.
drivers/md/multipath.c
View file @
1524c2f2
...
...
@@ -86,7 +86,6 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
{
unsigned
long
flags
;
mddev_t
*
mddev
=
mp_bh
->
mddev
;
multipath_conf_t
*
conf
=
mddev_to_conf
(
mddev
);
spin_lock_irqsave
(
&
retry_list_lock
,
flags
);
if
(
multipath_retry_list
==
NULL
)
...
...
@@ -95,7 +94,7 @@ static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
multipath_retry_tail
=
&
mp_bh
->
next_mp
;
mp_bh
->
next_mp
=
NULL
;
spin_unlock_irqrestore
(
&
retry_list_lock
,
flags
);
md_wakeup_thread
(
conf
->
thread
);
md_wakeup_thread
(
mddev
->
thread
);
}
...
...
@@ -185,19 +184,18 @@ static int multipath_make_request (request_queue_t *q, struct bio * bio)
return
0
;
}
static
int
multipath_status
(
char
*
page
,
mddev_t
*
mddev
)
static
void
multipath_status
(
struct
seq_file
*
seq
,
mddev_t
*
mddev
)
{
multipath_conf_t
*
conf
=
mddev_to_conf
(
mddev
);
int
sz
=
0
,
i
;
int
i
;
s
z
+=
sprintf
(
page
+
sz
,
" [%d/%d] ["
,
conf
->
raid_disks
,
s
eq_printf
(
seq
,
" [%d/%d] ["
,
conf
->
raid_disks
,
conf
->
working_disks
);
for
(
i
=
0
;
i
<
conf
->
raid_disks
;
i
++
)
s
z
+=
sprintf
(
page
+
sz
,
"%s"
,
s
eq_printf
(
seq
,
"%s"
,
conf
->
multipaths
[
i
].
rdev
&&
conf
->
multipaths
[
i
].
rdev
->
in_sync
?
"U"
:
"_"
);
sz
+=
sprintf
(
page
+
sz
,
"]"
);
return
sz
;
seq_printf
(
seq
,
"]"
);
}
#define LAST_DISK KERN_ALERT \
...
...
@@ -334,14 +332,14 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
* 3. Performs writes following reads for array syncronising.
*/
static
void
multipathd
(
void
*
data
)
static
void
multipathd
(
mddev_t
*
mddev
)
{
struct
multipath_bh
*
mp_bh
;
struct
bio
*
bio
;
unsigned
long
flags
;
mddev_t
*
mddev
;
mdk_rdev_t
*
rdev
;
md_check_recovery
(
mddev
);
for
(;;)
{
spin_lock_irqsave
(
&
retry_list_lock
,
flags
);
mp_bh
=
multipath_retry_list
;
...
...
@@ -471,10 +469,10 @@ static int multipath_run (mddev_t *mddev)
}
{
const
char
*
name
=
"m
ultipathd
"
;
const
char
*
name
=
"m
d%d_multipath
"
;
conf
->
thread
=
md_register_thread
(
multipathd
,
conf
,
name
);
if
(
!
conf
->
thread
)
{
mddev
->
thread
=
md_register_thread
(
multipathd
,
mddev
,
name
);
if
(
!
mddev
->
thread
)
{
printk
(
THREAD_ERROR
,
mdidx
(
mddev
));
goto
out_free_conf
;
}
...
...
@@ -513,7 +511,7 @@ static int multipath_stop (mddev_t *mddev)
{
multipath_conf_t
*
conf
=
mddev_to_conf
(
mddev
);
md_unregister_thread
(
conf
->
thread
);
md_unregister_thread
(
mddev
->
thread
);
mempool_destroy
(
conf
->
pool
);
kfree
(
conf
);
mddev
->
private
=
NULL
;
...
...
drivers/md/raid0.c
View file @
1524c2f2
...
...
@@ -349,7 +349,7 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
* is the only IO operation happening on this bh.
*/
bio
->
bi_bdev
=
tmp_dev
->
bdev
;
bio
->
bi_sector
=
rsect
;
bio
->
bi_sector
=
rsect
+
tmp_dev
->
data_offset
;
/*
* Let the main block layer submit the IO and resolve recursion:
...
...
@@ -372,41 +372,40 @@ static int raid0_make_request (request_queue_t *q, struct bio *bio)
return
0
;
}
static
int
raid0_status
(
char
*
page
,
mddev_t
*
mddev
)
static
void
raid0_status
(
struct
seq_file
*
seq
,
mddev_t
*
mddev
)
{
int
sz
=
0
;
#undef MD_DEBUG
#ifdef MD_DEBUG
int
j
,
k
;
raid0_conf_t
*
conf
=
mddev_to_conf
(
mddev
);
s
z
+=
sprintf
(
page
+
sz
,
" "
);
s
eq_printf
(
seq
,
" "
);
for
(
j
=
0
;
j
<
conf
->
nr_zones
;
j
++
)
{
s
z
+=
sprintf
(
page
+
sz
,
"[z%d"
,
s
eq_printf
(
seq
,
"[z%d"
,
conf
->
hash_table
[
j
].
zone0
-
conf
->
strip_zone
);
if
(
conf
->
hash_table
[
j
].
zone1
)
s
z
+=
sprintf
(
page
+
sz
,
"/z%d] "
,
s
eq_printf
(
seq
,
"/z%d] "
,
conf
->
hash_table
[
j
].
zone1
-
conf
->
strip_zone
);
else
s
z
+=
sprintf
(
page
+
sz
,
"] "
);
s
eq_printf
(
seq
,
"] "
);
}
s
z
+=
sprintf
(
page
+
sz
,
"
\n
"
);
s
eq_printf
(
seq
,
"
\n
"
);
for
(
j
=
0
;
j
<
conf
->
nr_strip_zones
;
j
++
)
{
s
z
+=
sprintf
(
page
+
sz
,
" z%d=["
,
j
);
s
eq_printf
(
seq
,
" z%d=["
,
j
);
for
(
k
=
0
;
k
<
conf
->
strip_zone
[
j
].
nb_dev
;
k
++
)
s
z
+=
sprintf
(
page
+
sz
,
"%s/"
,
bdev_partition_name
(
s
eq_printf
(
seq
,
"%s/"
,
bdev_partition_name
(
conf
->
strip_zone
[
j
].
dev
[
k
]
->
bdev
));
sz
--
;
s
z
+=
sprintf
(
page
+
sz
,
"] zo=%d do=%d s=%d
\n
"
,
s
eq_printf
(
seq
,
"] zo=%d do=%d s=%d
\n
"
,
conf
->
strip_zone
[
j
].
zone_offset
,
conf
->
strip_zone
[
j
].
dev_offset
,
conf
->
strip_zone
[
j
].
size
);
}
#endif
s
z
+=
sprintf
(
page
+
sz
,
" %dk chunks"
,
mddev
->
chunk_size
/
1024
);
return
sz
;
s
eq_printf
(
seq
,
" %dk chunks"
,
mddev
->
chunk_size
/
1024
);
return
;
}
static
mdk_personality_t
raid0_personality
=
...
...
drivers/md/raid1.c
View file @
1524c2f2
...
...
@@ -225,13 +225,12 @@ static void reschedule_retry(r1bio_t *r1_bio)
{
unsigned
long
flags
;
mddev_t
*
mddev
=
r1_bio
->
mddev
;
conf_t
*
conf
=
mddev_to_conf
(
mddev
);
spin_lock_irqsave
(
&
retry_list_lock
,
flags
);
list_add
(
&
r1_bio
->
retry_list
,
&
retry_list_head
);
spin_unlock_irqrestore
(
&
retry_list_lock
,
flags
);
md_wakeup_thread
(
conf
->
thread
);
md_wakeup_thread
(
mddev
->
thread
);
}
/*
...
...
@@ -320,7 +319,7 @@ static int end_request(struct bio *bio, unsigned int bytes_done, int error)
* already.
*/
if
(
atomic_dec_and_test
(
&
r1_bio
->
remaining
))
{
md_write_end
(
r1_bio
->
mddev
,
conf
->
thread
);
md_write_end
(
r1_bio
->
mddev
);
raid_end_bio_io
(
r1_bio
,
uptodate
);
}
}
...
...
@@ -494,7 +493,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
BUG
();
r1_bio
->
read_bio
=
read_bio
;
read_bio
->
bi_sector
=
r1_bio
->
sector
;
read_bio
->
bi_sector
=
r1_bio
->
sector
+
mirror
->
rdev
->
data_offset
;
read_bio
->
bi_bdev
=
mirror
->
rdev
->
bdev
;
read_bio
->
bi_end_io
=
end_request
;
read_bio
->
bi_rw
=
r1_bio
->
cmd
;
...
...
@@ -529,7 +528,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
mbio
=
bio_clone
(
bio
,
GFP_NOIO
);
r1_bio
->
write_bios
[
i
]
=
mbio
;
mbio
->
bi_sector
=
r1_bio
->
sector
;
mbio
->
bi_sector
=
r1_bio
->
sector
+
conf
->
mirrors
[
i
].
rdev
->
data_offset
;
mbio
->
bi_bdev
=
conf
->
mirrors
[
i
].
rdev
->
bdev
;
mbio
->
bi_end_io
=
end_request
;
mbio
->
bi_rw
=
r1_bio
->
cmd
;
...
...
@@ -542,7 +541,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
* If all mirrors are non-operational
* then return an IO error:
*/
md_write_end
(
mddev
,
conf
->
thread
);
md_write_end
(
mddev
);
raid_end_bio_io
(
r1_bio
,
0
);
return
0
;
}
...
...
@@ -571,19 +570,18 @@ static int make_request(request_queue_t *q, struct bio * bio)
return
0
;
}
static
int
status
(
char
*
page
,
mddev_t
*
mddev
)
static
void
status
(
struct
seq_file
*
seq
,
mddev_t
*
mddev
)
{
conf_t
*
conf
=
mddev_to_conf
(
mddev
);
int
sz
=
0
,
i
;
int
i
;
s
z
+=
sprintf
(
page
+
sz
,
" [%d/%d] ["
,
conf
->
raid_disks
,
s
eq_printf
(
seq
,
" [%d/%d] ["
,
conf
->
raid_disks
,
conf
->
working_disks
);
for
(
i
=
0
;
i
<
conf
->
raid_disks
;
i
++
)
s
z
+=
sprintf
(
page
+
sz
,
"%s"
,
s
eq_printf
(
seq
,
"%s"
,
conf
->
mirrors
[
i
].
rdev
&&
conf
->
mirrors
[
i
].
rdev
->
in_sync
?
"U"
:
"_"
);
sz
+=
sprintf
(
page
+
sz
,
"]"
);
return
sz
;
seq_printf
(
seq
,
"]"
);
}
#define LAST_DISK KERN_ALERT \
...
...
@@ -624,10 +622,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
mddev
->
degraded
++
;
conf
->
working_disks
--
;
/*
* if recovery
was running, stop it now
.
* if recovery
is running, make sure it aborts
.
*/
if
(
mddev
->
recovery_running
)
mddev
->
recovery_running
=
-
EIO
;
set_bit
(
MD_RECOVERY_ERR
,
&
mddev
->
recovery
);
}
rdev
->
in_sync
=
0
;
rdev
->
faulty
=
1
;
...
...
@@ -859,7 +856,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
mbio
=
bio_clone
(
bio
,
GFP_NOIO
);
r1_bio
->
write_bios
[
i
]
=
mbio
;
mbio
->
bi_bdev
=
conf
->
mirrors
[
i
].
rdev
->
bdev
;
mbio
->
bi_sector
=
r1_bio
->
sector
;
mbio
->
bi_sector
=
r1_bio
->
sector
|
conf
->
mirrors
[
i
].
rdev
->
data_offset
;
mbio
->
bi_end_io
=
end_sync_write
;
mbio
->
bi_rw
=
WRITE
;
mbio
->
bi_private
=
r1_bio
;
...
...
@@ -900,17 +897,17 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* 3. Performs writes following reads for array syncronising.
*/
static
void
raid1d
(
void
*
data
)
static
void
raid1d
(
mddev_t
*
mddev
)
{
struct
list_head
*
head
=
&
retry_list_head
;
r1bio_t
*
r1_bio
;
struct
bio
*
bio
;
unsigned
long
flags
;
mddev_t
*
mddev
;
conf_t
*
conf
=
data
;
conf_t
*
conf
=
mddev_to_conf
(
mddev
);
mdk_rdev_t
*
rdev
;
md_handle_safemode
(
conf
->
mddev
);
md_check_recovery
(
mddev
);
md_handle_safemode
(
mddev
);
for
(;;)
{
spin_lock_irqsave
(
&
retry_list_lock
,
flags
);
...
...
@@ -937,7 +934,7 @@ static void raid1d(void *data)
printk
(
REDIRECT_SECTOR
,
bdev_partition_name
(
rdev
->
bdev
),
(
unsigned
long
long
)
r1_bio
->
sector
);
bio
->
bi_bdev
=
rdev
->
bdev
;
bio
->
bi_sector
=
r1_bio
->
sector
;
bio
->
bi_sector
=
r1_bio
->
sector
+
rdev
->
data_offset
;
bio
->
bi_rw
=
r1_bio
->
cmd
;
generic_make_request
(
bio
);
...
...
@@ -1048,7 +1045,7 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
read_bio
=
bio_clone
(
r1_bio
->
master_bio
,
GFP_NOIO
);
read_bio
->
bi_sector
=
sector_nr
;
read_bio
->
bi_sector
=
sector_nr
+
mirror
->
rdev
->
data_offset
;
read_bio
->
bi_bdev
=
mirror
->
rdev
->
bdev
;
read_bio
->
bi_end_io
=
end_sync_read
;
read_bio
->
bi_rw
=
READ
;
...
...
@@ -1190,10 +1187,8 @@ static int run(mddev_t *mddev)
{
snprintf
(
conf
->
thread_name
,
MD_THREAD_NAME_MAX
,
"raid1d_md%d"
,
mdidx
(
mddev
));
conf
->
thread
=
md_register_thread
(
raid1d
,
conf
,
conf
->
thread_name
);
if
(
!
conf
->
thread
)
{
mddev
->
thread
=
md_register_thread
(
raid1d
,
mddev
,
"md%d_raid1"
);
if
(
!
mddev
->
thread
)
{
printk
(
THREAD_ERROR
,
mdidx
(
mddev
));
goto
out_free_conf
;
}
...
...
@@ -1219,7 +1214,8 @@ static int stop(mddev_t *mddev)
{
conf_t
*
conf
=
mddev_to_conf
(
mddev
);
md_unregister_thread
(
conf
->
thread
);
md_unregister_thread
(
mddev
->
thread
);
mddev
->
thread
=
NULL
;
if
(
conf
->
r1bio_pool
)
mempool_destroy
(
conf
->
r1bio_pool
);
kfree
(
conf
);
...
...
drivers/md/raid5.c
View file @
1524c2f2
...
...
@@ -71,12 +71,12 @@ static inline void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh)
list_add_tail
(
&
sh
->
lru
,
&
conf
->
delayed_list
);
else
list_add_tail
(
&
sh
->
lru
,
&
conf
->
handle_list
);
md_wakeup_thread
(
conf
->
thread
);
md_wakeup_thread
(
conf
->
mddev
->
thread
);
}
else
{
if
(
test_and_clear_bit
(
STRIPE_PREREAD_ACTIVE
,
&
sh
->
state
))
{
atomic_dec
(
&
conf
->
preread_active_stripes
);
if
(
atomic_read
(
&
conf
->
preread_active_stripes
)
<
IO_THRESHOLD
)
md_wakeup_thread
(
conf
->
thread
);
md_wakeup_thread
(
conf
->
mddev
->
thread
);
}
list_add_tail
(
&
sh
->
lru
,
&
conf
->
inactive_list
);
atomic_dec
(
&
conf
->
active_stripes
);
...
...
@@ -463,10 +463,9 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
conf
->
failed_disks
++
;
rdev
->
in_sync
=
0
;
/*
* if recovery was running,
stop it now
.
* if recovery was running,
make sure it aborts
.
*/
if
(
mddev
->
recovery_running
)
mddev
->
recovery_running
=
-
EIO
;
set_bit
(
MD_RECOVERY_ERR
,
&
mddev
->
recovery
);
}
rdev
->
faulty
=
1
;
printk
(
KERN_ALERT
...
...
@@ -913,7 +912,7 @@ static void handle_stripe(struct stripe_head *sh)
struct
bio
*
nextbi
=
bi
->
bi_next
;
clear_bit
(
BIO_UPTODATE
,
&
bi
->
bi_flags
);
if
(
--
bi
->
bi_phys_segments
==
0
)
{
md_write_end
(
conf
->
mddev
,
conf
->
thread
);
md_write_end
(
conf
->
mddev
);
bi
->
bi_next
=
return_bi
;
return_bi
=
bi
;
}
...
...
@@ -970,7 +969,7 @@ static void handle_stripe(struct stripe_head *sh)
while
(
wbi
&&
wbi
->
bi_sector
<
dev
->
sector
+
STRIPE_SECTORS
)
{
wbi2
=
wbi
->
bi_next
;
if
(
--
wbi
->
bi_phys_segments
==
0
)
{
md_write_end
(
conf
->
mddev
,
conf
->
thread
);
md_write_end
(
conf
->
mddev
);
wbi
->
bi_next
=
return_bi
;
return_bi
=
wbi
;
}
...
...
@@ -1113,7 +1112,7 @@ static void handle_stripe(struct stripe_head *sh)
if
(
test_and_clear_bit
(
STRIPE_PREREAD_ACTIVE
,
&
sh
->
state
))
{
atomic_dec
(
&
conf
->
preread_active_stripes
);
if
(
atomic_read
(
&
conf
->
preread_active_stripes
)
<
IO_THRESHOLD
)
md_wakeup_thread
(
conf
->
thread
);
md_wakeup_thread
(
conf
->
mddev
->
thread
);
}
}
}
...
...
@@ -1207,7 +1206,7 @@ static void handle_stripe(struct stripe_head *sh)
bi
->
bi_bdev
=
rdev
->
bdev
;
PRINTK
(
"for %llu schedule op %ld on disc %d
\n
"
,
(
unsigned
long
long
)
sh
->
sector
,
bi
->
bi_rw
,
i
);
atomic_inc
(
&
sh
->
count
);
bi
->
bi_sector
=
sh
->
sector
;
bi
->
bi_sector
=
sh
->
sector
+
rdev
->
data_offset
;
bi
->
bi_flags
=
1
<<
BIO_UPTODATE
;
bi
->
bi_vcnt
=
1
;
bi
->
bi_idx
=
0
;
...
...
@@ -1251,7 +1250,7 @@ static void raid5_unplug_device(void *data)
if
(
blk_remove_plug
(
q
))
raid5_activate_delayed
(
conf
);
md_wakeup_thread
(
conf
->
thread
);
md_wakeup_thread
(
mddev
->
thread
);
spin_unlock_irqrestore
(
&
conf
->
device_lock
,
flags
);
}
...
...
@@ -1304,7 +1303,7 @@ static int make_request (request_queue_t *q, struct bio * bi)
int
bytes
=
bi
->
bi_size
;
if
(
bio_data_dir
(
bi
)
==
WRITE
)
md_write_end
(
mddev
,
conf
->
thread
);
md_write_end
(
mddev
);
bi
->
bi_size
=
0
;
bi
->
bi_end_io
(
bi
,
bytes
,
0
);
}
...
...
@@ -1356,16 +1355,17 @@ static int sync_request (mddev_t *mddev, sector_t sector_nr, int go_faster)
* During the scan, completed stripes are saved for us by the interrupt
* handler, so that they will not have to wait for our next wakeup.
*/
static
void
raid5d
(
void
*
data
)
static
void
raid5d
(
mddev_t
*
mddev
)
{
struct
stripe_head
*
sh
;
raid5_conf_t
*
conf
=
data
;
mddev_t
*
mddev
=
conf
->
mddev
;
raid5_conf_t
*
conf
=
mddev_to_conf
(
mddev
);
int
handled
;
PRINTK
(
"+++ raid5d active
\n
"
);
md_check_recovery
(
mddev
);
md_handle_safemode
(
mddev
);
handled
=
0
;
spin_lock_irq
(
&
conf
->
device_lock
);
while
(
1
)
{
...
...
@@ -1486,10 +1486,8 @@ static int run (mddev_t *mddev)
}
{
snprintf
(
conf
->
thread_name
,
MD_THREAD_NAME_MAX
,
"raid5d_md%d"
,
mdidx
(
mddev
));
conf
->
thread
=
md_register_thread
(
raid5d
,
conf
,
conf
->
thread_name
);
if
(
!
conf
->
thread
)
{
mddev
->
thread
=
md_register_thread
(
raid5d
,
mddev
,
"md%d_raid5"
);
if
(
!
mddev
->
thread
)
{
printk
(
KERN_ERR
"raid5: couldn't allocate thread for md%d
\n
"
,
mdidx
(
mddev
));
goto
abort
;
}
...
...
@@ -1500,7 +1498,7 @@ static int run (mddev_t *mddev)
if
(
grow_stripes
(
conf
,
conf
->
max_nr_stripes
))
{
printk
(
KERN_ERR
"raid5: couldn't allocate %dkB for buffers
\n
"
,
memory
);
shrink_stripes
(
conf
);
md_unregister_thread
(
conf
->
thread
);
md_unregister_thread
(
mddev
->
thread
);
goto
abort
;
}
else
printk
(
KERN_INFO
"raid5: allocated %dkB for md%d
\n
"
,
memory
,
mdidx
(
mddev
));
...
...
@@ -1536,7 +1534,8 @@ static int stop (mddev_t *mddev)
{
raid5_conf_t
*
conf
=
(
raid5_conf_t
*
)
mddev
->
private
;
md_unregister_thread
(
conf
->
thread
);
md_unregister_thread
(
mddev
->
thread
);
mddev
->
thread
=
NULL
;
shrink_stripes
(
conf
);
free_pages
((
unsigned
long
)
conf
->
stripe_hashtbl
,
HASH_PAGES_ORDER
);
kfree
(
conf
);
...
...
@@ -1574,29 +1573,26 @@ static void printall (raid5_conf_t *conf)
}
}
spin_unlock_irq
(
&
conf
->
device_lock
);
PRINTK
(
"--- raid5d inactive
\n
"
);
}
#endif
static
int
status
(
char
*
page
,
mddev_t
*
mddev
)
static
void
status
(
struct
seq_file
*
seq
,
mddev_t
*
mddev
)
{
raid5_conf_t
*
conf
=
(
raid5_conf_t
*
)
mddev
->
private
;
int
sz
=
0
,
i
;
int
i
;
s
z
+=
sprintf
(
page
+
sz
,
" level %d, %dk chunk, algorithm %d"
,
mddev
->
level
,
mddev
->
chunk_size
>>
10
,
mddev
->
layout
);
s
z
+=
sprintf
(
page
+
sz
,
" [%d/%d] ["
,
conf
->
raid_disks
,
conf
->
working_disks
);
s
eq_printf
(
seq
,
" level %d, %dk chunk, algorithm %d"
,
mddev
->
level
,
mddev
->
chunk_size
>>
10
,
mddev
->
layout
);
s
eq_printf
(
seq
,
" [%d/%d] ["
,
conf
->
raid_disks
,
conf
->
working_disks
);
for
(
i
=
0
;
i
<
conf
->
raid_disks
;
i
++
)
s
z
+=
sprintf
(
page
+
sz
,
"%s"
,
s
eq_printf
(
seq
,
"%s"
,
conf
->
disks
[
i
].
rdev
&&
conf
->
disks
[
i
].
rdev
->
in_sync
?
"U"
:
"_"
);
s
z
+=
sprintf
(
page
+
sz
,
"]"
);
s
eq_printf
(
seq
,
"]"
);
#if RAID5_DEBUG
#define D(x) \
s
z += sprintf (page+sz
, "<"#x":%d>", atomic_read(&conf->x))
s
eq_printf (seq
, "<"#x":%d>", atomic_read(&conf->x))
printall
(
conf
);
#endif
return
sz
;
}
static
void
print_raid5_conf
(
raid5_conf_t
*
conf
)
...
...
fs/lockd/svclock.c
View file @
1524c2f2
...
...
@@ -305,8 +305,6 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
(
long
long
)
lock
->
fl
.
fl_end
,
wait
);
/* Lock file against concurrent access */
down
(
&
file
->
f_sema
);
/* Get existing block (in case client is busy-waiting) */
block
=
nlmsvc_lookup_block
(
file
,
lock
,
0
);
...
...
@@ -314,6 +312,9 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
lock
->
fl
.
fl_flags
|=
FL_LOCKD
;
again:
/* Lock file against concurrent access */
down
(
&
file
->
f_sema
);
if
(
!
(
conflock
=
posix_test_lock
(
&
file
->
f_file
,
&
lock
->
fl
)))
{
error
=
posix_lock_file
(
&
file
->
f_file
,
&
lock
->
fl
);
...
...
@@ -346,7 +347,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
/* If we don't have a block, create and initialize it. Then
* retry because we may have slept in kmalloc. */
/* We have to release f_sema as nlmsvc_create_block may try to
* to claim it while doing host garbage collection */
if
(
block
==
NULL
)
{
up
(
&
file
->
f_sema
);
dprintk
(
"lockd: blocking on this lock (allocating).
\n
"
);
if
(
!
(
block
=
nlmsvc_create_block
(
rqstp
,
file
,
lock
,
cookie
)))
return
nlm_lck_denied_nolocks
;
...
...
fs/nfsd/export.c
View file @
1524c2f2
...
...
@@ -294,7 +294,9 @@ int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
/* client */
len
=
qword_get
(
&
mesg
,
buf
,
PAGE_SIZE
);
if
(
len
<=
0
)
return
-
EINVAL
;
err
=
-
EINVAL
;
if
(
len
<=
0
)
goto
out
;
err
=
-
ENOENT
;
dom
=
auth_domain_find
(
buf
);
if
(
!
dom
)
...
...
@@ -473,8 +475,14 @@ exp_get_by_name(svc_client *clp, struct vfsmount *mnt, struct dentry *dentry,
exp
=
svc_export_lookup
(
&
key
,
0
);
if
(
exp
!=
NULL
)
if
(
cache_check
(
&
svc_export_cache
,
&
exp
->
h
,
reqp
))
switch
(
cache_check
(
&
svc_export_cache
,
&
exp
->
h
,
reqp
))
{
case
0
:
break
;
case
-
EAGAIN
:
exp
=
ERR_PTR
(
-
EAGAIN
);
break
;
default:
exp
=
NULL
;
}
return
exp
;
}
...
...
@@ -915,7 +923,8 @@ struct flags {
{
NFSEXP_UIDMAP
,
{
"uidmap"
,
""
}},
{
NFSEXP_KERBEROS
,
{
"kerberos"
,
""
}},
{
NFSEXP_SUNSECURE
,
{
"sunsecure"
,
""
}},
{
NFSEXP_CROSSMNT
,
{
"nohide"
,
""
}},
{
NFSEXP_NOHIDE
,
{
"nohide"
,
""
}},
{
NFSEXP_CROSSMNT
,
{
"crossmnt"
,
""
}},
{
NFSEXP_NOSUBTREECHECK
,
{
"no_subtree_check"
,
""
}},
{
NFSEXP_NOAUTHNLM
,
{
"insecure_locks"
,
""
}},
#ifdef MSNFS
...
...
fs/nfsd/vfs.c
View file @
1524c2f2
...
...
@@ -79,7 +79,7 @@ static struct raparms * raparm_cache;
* N.B. After this call _both_ fhp and resfh need an fh_put
*
* If the lookup would cross a mountpoint, and the mounted filesystem
* is exported to the client with NFSEXP_
CROSSMNT
, then the lookup is
* is exported to the client with NFSEXP_
NOHIDE
, then the lookup is
* accepted as it stands and the mounted directory is
* returned. Otherwise the covered directory is returned.
* NOTE: this mountpoint crossing is not supported properly by all
...
...
@@ -115,7 +115,7 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
read_lock
(
&
dparent_lock
);
dentry
=
dget
(
dparent
->
d_parent
);
read_unlock
(
&
dparent_lock
);
}
else
if
(
!
EX_
CROSSMNT
(
exp
))
}
else
if
(
!
EX_
NOHIDE
(
exp
))
dentry
=
dget
(
dparent
);
/* .. == . just like at / */
else
{
/* checking mountpoint crossing is very different when stepping up */
...
...
@@ -133,6 +133,12 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
exp2
=
exp_parent
(
exp
->
ex_client
,
mnt
,
dentry
,
&
rqstp
->
rq_chandle
);
if
(
IS_ERR
(
exp2
))
{
err
=
PTR_ERR
(
exp2
);
dput
(
dentry
);
mntput
(
mnt
);
goto
out
;
}
if
(
!
exp2
)
{
dput
(
dentry
);
dentry
=
dget
(
dparent
);
...
...
@@ -157,9 +163,19 @@ nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
struct
dentry
*
mounts
=
dget
(
dentry
);
while
(
follow_down
(
&
mnt
,
&
mounts
)
&&
d_mountpoint
(
mounts
))
;
exp2
=
exp_get_by_name
(
exp
->
ex_client
,
mnt
,
mounts
,
&
rqstp
->
rq_chandle
);
if
(
exp2
&&
EX_CROSSMNT
(
exp2
))
{
if
(
IS_ERR
(
exp2
))
{
err
=
PTR_ERR
(
exp2
);
dput
(
mounts
);
dput
(
dentry
);
mntput
(
mnt
);
goto
out
;
}
if
(
exp2
&&
((
exp
->
ex_flags
&
NFSEXP_CROSSMNT
)
||
EX_NOHIDE
(
exp2
)))
{
/* successfully crossed mount point */
exp_put
(
exp
);
exp
=
exp2
;
...
...
fs/reiserfs/journal.c
View file @
1524c2f2
...
...
@@ -1310,6 +1310,10 @@ static void free_journal_ram(struct super_block *p_s_sb) {
if
(
SB_JOURNAL
(
p_s_sb
)
->
j_header_bh
)
{
brelse
(
SB_JOURNAL
(
p_s_sb
)
->
j_header_bh
)
;
}
/* j_header_bh is on the journal dev, make sure not to release the journal
* dev until we brelse j_header_bh
*/
release_journal_dev
(
p_s_sb
,
SB_JOURNAL
(
p_s_sb
));
vfree
(
SB_JOURNAL
(
p_s_sb
))
;
}
...
...
@@ -1341,7 +1345,6 @@ static int do_journal_release(struct reiserfs_transaction_handle *th, struct sup
commit_wq
=
NULL
;
}
release_journal_dev
(
p_s_sb
,
SB_JOURNAL
(
p_s_sb
)
);
free_journal_ram
(
p_s_sb
)
;
return
0
;
...
...
@@ -1867,24 +1870,18 @@ static int release_journal_dev( struct super_block *super,
int
result
;
result
=
0
;
if
(
journal
->
j_dev_file
!=
NULL
)
{
/*
* journal block device was taken via filp_open
*/
result
=
filp_close
(
journal
->
j_dev_file
,
NULL
);
journal
->
j_dev_file
=
NULL
;
journal
->
j_dev_bd
=
NULL
;
}
else
if
(
journal
->
j_dev_bd
!=
NULL
)
{
/*
* journal block device was taken via bdget and blkdev_get
*/
result
=
blkdev_put
(
journal
->
j_dev_bd
,
BDEV_FS
);
journal
->
j_dev_bd
=
NULL
;
}
if
(
result
!=
0
)
{
reiserfs_warning
(
"sh-457: release_journal_dev: Cannot release journal device: %i"
,
result
);
reiserfs_warning
(
"sh-457: release_journal_dev: Cannot release journal device: %i
\n
"
,
result
);
}
return
result
;
}
...
...
@@ -1895,6 +1892,7 @@ static int journal_init_dev( struct super_block *super,
{
int
result
;
dev_t
jdev
;
int
blkdev_mode
=
FMODE_READ
|
FMODE_WRITE
;
result
=
0
;
...
...
@@ -1902,12 +1900,16 @@ static int journal_init_dev( struct super_block *super,
journal
->
j_dev_file
=
NULL
;
jdev
=
SB_ONDISK_JOURNAL_DEVICE
(
super
)
?
SB_ONDISK_JOURNAL_DEVICE
(
super
)
:
super
->
s_dev
;
if
(
bdev_read_only
(
super
->
s_bdev
))
blkdev_mode
=
FMODE_READ
;
/* there is no "jdev" option and journal is on separate device */
if
(
(
!
jdev_name
||
!
jdev_name
[
0
]
)
)
{
journal
->
j_dev_bd
=
bdget
(
jdev
);
if
(
journal
->
j_dev_bd
)
result
=
blkdev_get
(
journal
->
j_dev_bd
,
FMODE_READ
|
FMODE_WRITE
,
0
,
blkdev_mode
,
0
,
BDEV_FS
);
else
result
=
-
ENOMEM
;
...
...
@@ -1928,10 +1930,10 @@ static int journal_init_dev( struct super_block *super,
jdev_inode
=
journal
->
j_dev_file
->
f_dentry
->
d_inode
;
journal
->
j_dev_bd
=
jdev_inode
->
i_bdev
;
if
(
!
S_ISBLK
(
jdev_inode
->
i_mode
)
)
{
printk
(
"journal_init_dev: '%s' is not a block device"
,
jdev_name
);
printk
(
"journal_init_dev: '%s' is not a block device
\n
"
,
jdev_name
);
result
=
-
ENOTBLK
;
}
else
if
(
jdev_inode
->
i_bdev
==
NULL
)
{
printk
(
"journal_init_dev: bdev uninitialized for '%s'"
,
jdev_name
);
printk
(
"journal_init_dev: bdev uninitialized for '%s'
\n
"
,
jdev_name
);
result
=
-
ENOMEM
;
}
else
{
/* ok */
...
...
@@ -1941,12 +1943,12 @@ static int journal_init_dev( struct super_block *super,
}
else
{
result
=
PTR_ERR
(
journal
->
j_dev_file
);
journal
->
j_dev_file
=
NULL
;
printk
(
"journal_init_dev: Cannot open '%s': %i"
,
jdev_name
,
result
);
printk
(
"journal_init_dev: Cannot open '%s': %i
\n
"
,
jdev_name
,
result
);
}
if
(
result
!=
0
)
{
release_journal_dev
(
super
,
journal
);
}
printk
(
"journal_init_dev: journal device: %s"
,
bdevname
(
journal
->
j_dev_bd
));
printk
(
"journal_init_dev: journal device: %s
\n
"
,
bdevname
(
journal
->
j_dev_bd
));
return
result
;
}
...
...
@@ -1960,20 +1962,24 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
struct
reiserfs_journal_header
*
jh
;
struct
reiserfs_journal
*
journal
;
if
(
sizeof
(
struct
reiserfs_journal_commit
)
!=
4096
||
sizeof
(
struct
reiserfs_journal_desc
)
!=
4096
)
{
printk
(
"journal-1249: commit or desc struct not 4096 %Zd %Zd
\n
"
,
sizeof
(
struct
reiserfs_journal_commit
),
if
(
sizeof
(
struct
reiserfs_journal_commit
)
!=
4096
||
sizeof
(
struct
reiserfs_journal_desc
)
!=
4096
)
{
printk
(
"journal-1249: commit or desc struct not 4096 %Zd %Zd
\n
"
,
sizeof
(
struct
reiserfs_journal_commit
),
sizeof
(
struct
reiserfs_journal_desc
))
;
return
1
;
}
return
1
;
}
journal
=
SB_JOURNAL
(
p_s_sb
)
=
vmalloc
(
sizeof
(
struct
reiserfs_journal
))
;
if
(
!
journal
)
{
printk
(
"journal-1256: unable to get memory for journal structure
\n
"
)
;
return
1
;
}
return
1
;
}
memset
(
journal
,
0
,
sizeof
(
struct
reiserfs_journal
))
;
INIT_LIST_HEAD
(
&
SB_JOURNAL
(
p_s_sb
)
->
j_bitmap_nodes
)
;
INIT_LIST_HEAD
(
&
SB_JOURNAL
(
p_s_sb
)
->
j_prealloc_list
);
reiserfs_allocate_list_bitmaps
(
p_s_sb
,
SB_JOURNAL
(
p_s_sb
)
->
j_list_bitmap
,
SB_BMAP_NR
(
p_s_sb
))
;
allocate_bitmap_nodes
(
p_s_sb
)
;
/* reserved for journal area support */
SB_JOURNAL_1st_RESERVED_BLOCK
(
p_s_sb
)
=
(
old_format
?
...
...
@@ -1983,7 +1989,7 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
if
(
journal_init_dev
(
p_s_sb
,
journal
,
j_dev_name
)
!=
0
)
{
printk
(
"sh-462: unable to initialize jornal device
\n
"
);
return
1
;
goto
free_and_return
;
}
rs
=
SB_DISK_SUPER_BLOCK
(
p_s_sb
);
...
...
@@ -1993,8 +1999,7 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
SB_ONDISK_JOURNAL_1st_BLOCK
(
p_s_sb
)
+
SB_ONDISK_JOURNAL_SIZE
(
p_s_sb
));
if
(
!
bhjh
)
{
printk
(
"sh-459: unable to read journal header
\n
"
)
;
release_journal_dev
(
p_s_sb
,
journal
);
return
1
;
goto
free_and_return
;
}
jh
=
(
struct
reiserfs_journal_header
*
)(
bhjh
->
b_data
);
...
...
@@ -2005,8 +2010,7 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
jh
->
jh_journal
.
jp_journal_magic
,
bdevname
(
SB_JOURNAL
(
p_s_sb
)
->
j_dev_bd
),
sb_jp_journal_magic
(
rs
),
reiserfs_bdevname
(
p_s_sb
));
brelse
(
bhjh
);
release_journal_dev
(
p_s_sb
,
journal
);
return
1
;
goto
free_and_return
;
}
SB_JOURNAL_TRANS_MAX
(
p_s_sb
)
=
le32_to_cpu
(
jh
->
jh_journal
.
jp_journal_trans_max
);
...
...
@@ -2064,7 +2068,6 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
brelse
(
bhjh
);
SB_JOURNAL
(
p_s_sb
)
->
j_list_bitmap_index
=
0
;
SB_JOURNAL_LIST_INDEX
(
p_s_sb
)
=
-
10000
;
/* make sure flush_old_commits does not try to flush a list while replay is on */
...
...
@@ -2075,12 +2078,8 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
memset
(
SB_JOURNAL
(
p_s_sb
)
->
j_list_hash_table
,
0
,
JOURNAL_HASH_SIZE
*
sizeof
(
struct
reiserfs_journal_cnode
*
))
;
memset
(
journal_writers
,
0
,
sizeof
(
char
*
)
*
512
)
;
/* debug code */
INIT_LIST_HEAD
(
&
SB_JOURNAL
(
p_s_sb
)
->
j_bitmap_nodes
)
;
INIT_LIST_HEAD
(
&
SB_JOURNAL
(
p_s_sb
)
->
j_dirty_buffers
)
;
spin_lock_init
(
&
SB_JOURNAL
(
p_s_sb
)
->
j_dirty_buffers_lock
)
;
reiserfs_allocate_list_bitmaps
(
p_s_sb
,
SB_JOURNAL
(
p_s_sb
)
->
j_list_bitmap
,
SB_BMAP_NR
(
p_s_sb
))
;
allocate_bitmap_nodes
(
p_s_sb
)
;
SB_JOURNAL
(
p_s_sb
)
->
j_start
=
0
;
SB_JOURNAL
(
p_s_sb
)
->
j_len
=
0
;
...
...
@@ -2107,20 +2106,15 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
SB_JOURNAL_LIST
(
p_s_sb
)[
0
].
j_list_bitmap
=
get_list_bitmap
(
p_s_sb
,
SB_JOURNAL_LIST
(
p_s_sb
))
;
if
(
!
(
SB_JOURNAL_LIST
(
p_s_sb
)[
0
].
j_list_bitmap
))
{
reiserfs_warning
(
"journal-2005, get_list_bitmap failed for journal list 0
\n
"
)
;
release_journal_dev
(
p_s_sb
,
journal
);
return
1
;
goto
free_and_return
;
}
if
(
journal_read
(
p_s_sb
)
<
0
)
{
reiserfs_warning
(
"Replay Failure, unable to mount
\n
"
)
;
free_journal_ram
(
p_s_sb
)
;
release_journal_dev
(
p_s_sb
,
journal
);
return
1
;
goto
free_and_return
;
}
SB_JOURNAL_LIST_INDEX
(
p_s_sb
)
=
0
;
/* once the read is done, we can set this
where it belongs */
INIT_LIST_HEAD
(
&
SB_JOURNAL
(
p_s_sb
)
->
j_prealloc_list
);
if
(
reiserfs_dont_log
(
p_s_sb
))
return
0
;
...
...
@@ -2129,7 +2123,9 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
commit_wq
=
create_workqueue
(
"reiserfs"
);
return
0
;
free_and_return:
free_journal_ram
(
p_s_sb
);
return
1
;
}
/*
...
...
include/asm-generic/xor.h
View file @
1524c2f2
...
...
@@ -678,35 +678,35 @@ xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
}
static
struct
xor_block_template
xor_block_8regs
=
{
name:
"8regs"
,
do_2:
xor_8regs_2
,
do_3:
xor_8regs_3
,
do_4:
xor_8regs_4
,
do_5:
xor_8regs_5
,
.
name
=
"8regs"
,
.
do_2
=
xor_8regs_2
,
.
do_3
=
xor_8regs_3
,
.
do_4
=
xor_8regs_4
,
.
do_5
=
xor_8regs_5
,
};
static
struct
xor_block_template
xor_block_32regs
=
{
name:
"32regs"
,
do_2:
xor_32regs_2
,
do_3:
xor_32regs_3
,
do_4:
xor_32regs_4
,
do_5:
xor_32regs_5
,
.
name
=
"32regs"
,
.
do_2
=
xor_32regs_2
,
.
do_3
=
xor_32regs_3
,
.
do_4
=
xor_32regs_4
,
.
do_5
=
xor_32regs_5
,
};
static
struct
xor_block_template
xor_block_8regs_p
=
{
name:
"8regs_prefetch"
,
do_2:
xor_8regs_p_2
,
do_3:
xor_8regs_p_3
,
do_4:
xor_8regs_p_4
,
do_5:
xor_8regs_p_5
,
.
name
=
"8regs_prefetch"
,
.
do_2
=
xor_8regs_p_2
,
.
do_3
=
xor_8regs_p_3
,
.
do_4
=
xor_8regs_p_4
,
.
do_5
=
xor_8regs_p_5
,
};
static
struct
xor_block_template
xor_block_32regs_p
=
{
name:
"32regs_prefetch"
,
do_2:
xor_32regs_p_2
,
do_3:
xor_32regs_p_3
,
do_4:
xor_32regs_p_4
,
do_5:
xor_32regs_p_5
,
.
name
=
"32regs_prefetch"
,
.
do_2
=
xor_32regs_p_2
,
.
do_3
=
xor_32regs_p_3
,
.
do_4
=
xor_32regs_p_4
,
.
do_5
=
xor_32regs_p_5
,
};
#define XOR_TRY_TEMPLATES \
...
...
include/asm-i386/xor.h
View file @
1524c2f2
...
...
@@ -25,6 +25,7 @@
#define XO3(x,y) " pxor 8*("#x")(%4), %%mm"#y" ;\n"
#define XO4(x,y) " pxor 8*("#x")(%5), %%mm"#y" ;\n"
#include <asm/i387.h>
static
void
xor_pII_mmx_2
(
unsigned
long
bytes
,
unsigned
long
*
p1
,
unsigned
long
*
p2
)
...
...
include/linux/bio.h
View file @
1524c2f2
...
...
@@ -239,7 +239,6 @@ extern inline char *bio_kmap_irq(struct bio *bio, unsigned long *flags)
* balancing is a lot nicer this way
*/
local_save_flags
(
*
flags
);
local_irq_disable
();
addr
=
(
unsigned
long
)
kmap_atomic
(
bio_page
(
bio
),
KM_BIO_SRC_IRQ
);
if
(
addr
&
~
PAGE_MASK
)
...
...
include/linux/nfsd/export.h
View file @
1524c2f2
...
...
@@ -35,12 +35,13 @@
#define NFSEXP_UIDMAP 0x0040
#define NFSEXP_KERBEROS 0x0080
/* not available */
#define NFSEXP_SUNSECURE 0x0100
#define NFSEXP_
CROSSMNT
0x0200
#define NFSEXP_
NOHIDE
0x0200
#define NFSEXP_NOSUBTREECHECK 0x0400
#define NFSEXP_NOAUTHNLM 0x0800
/* Don't authenticate NLM requests - just trust */
#define NFSEXP_MSNFS 0x1000
/* do silly things that MS clients expect */
#define NFSEXP_FSID 0x2000
#define NFSEXP_ALLFLAGS 0x3FFF
#define NFSEXP_CROSSMNT 0x4000
#define NFSEXP_ALLFLAGS 0x7FFF
#ifdef __KERNEL__
...
...
@@ -73,7 +74,7 @@ struct svc_expkey {
#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
#define EX_RDONLY(exp) ((exp)->ex_flags & NFSEXP_READONLY)
#define EX_
CROSSMNT(exp) ((exp)->ex_flags & NFSEXP_CROSSMNT
)
#define EX_
NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE
)
#define EX_SUNSECURE(exp) ((exp)->ex_flags & NFSEXP_SUNSECURE)
#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
...
...
include/linux/raid/md.h
View file @
1524c2f2
...
...
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/smp_lock.h>
#include <linux/delay.h>
#include <net/checksum.h>
...
...
@@ -68,13 +69,14 @@ extern inline char * bdev_partition_name (struct block_device *bdev)
}
extern
int
register_md_personality
(
int
p_num
,
mdk_personality_t
*
p
);
extern
int
unregister_md_personality
(
int
p_num
);
extern
mdk_thread_t
*
md_register_thread
(
void
(
*
run
)
(
void
*
data
),
void
*
data
,
const
char
*
name
);
extern
mdk_thread_t
*
md_register_thread
(
void
(
*
run
)
(
mddev_t
*
mddev
),
mddev_t
*
mddev
,
const
char
*
name
);
extern
void
md_unregister_thread
(
mdk_thread_t
*
thread
);
extern
void
md_wakeup_thread
(
mdk_thread_t
*
thread
);
extern
void
md_check_recovery
(
mddev_t
*
mddev
);
extern
void
md_interrupt_thread
(
mdk_thread_t
*
thread
);
extern
void
md_write_start
(
mddev_t
*
mddev
);
extern
void
md_write_end
(
mddev_t
*
mddev
,
mdk_thread_t
*
thread
);
extern
void
md_write_end
(
mddev_t
*
mddev
);
extern
void
md_handle_safemode
(
mddev_t
*
mddev
);
extern
void
md_done_sync
(
mddev_t
*
mddev
,
int
blocks
,
int
ok
);
extern
void
md_sync_acct
(
mdk_rdev_t
*
rdev
,
unsigned
long
nr_sectors
);
...
...
include/linux/raid/md_k.h
View file @
1524c2f2
...
...
@@ -155,6 +155,7 @@ struct mdk_rdev_s
struct
page
*
sb_page
;
int
sb_loaded
;
sector_t
data_offset
;
/* start of data in array */
sector_t
sb_offset
;
int
preferred_minor
;
/* autorun support */
...
...
@@ -206,22 +207,31 @@ struct mddev_s
char
uuid
[
16
];
struct
mdk_thread_s
*
thread
;
/* management thread */
struct
mdk_thread_s
*
sync_thread
;
/* doing resync or reconstruct */
unsigned
long
curr_resync
;
/* blocks scheduled */
unsigned
long
resync_mark
;
/* a recent timestamp */
unsigned
long
resync_mark_cnt
;
/* blocks written at resync_mark */
/* recovery_running is 0 for no recovery/resync,
* 1 for active recovery
* 2 for active resync
* -error for an error (e.g. -EINTR)
* it can only be set > 0 under reconfig_sem
/* recovery/resync flags
* NEEDED: we might need to start a resync/recover
* RUNNING: a thread is running, or about to be started
* SYNC: actually doing a resync, not a recovery
* ERR: and IO error was detected - abort the resync/recovery
* INTR: someone requested a (clean) early abort.
* DONE: thread is done and is waiting to be reaped
*/
int
recovery_running
;
int
recovery_error
;
/* error from recovery write */
#define MD_RECOVERY_RUNNING 0
#define MD_RECOVERY_SYNC 1
#define MD_RECOVERY_ERR 2
#define MD_RECOVERY_INTR 3
#define MD_RECOVERY_DONE 4
#define MD_RECOVERY_NEEDED 5
unsigned
long
recovery
;
int
in_sync
;
/* know to not need resync */
struct
semaphore
reconfig_sem
;
atomic_t
active
;
int
spares
;
int
degraded
;
/* whether md should consider
* adding a spare
...
...
@@ -230,9 +240,11 @@ struct mddev_s
atomic_t
recovery_active
;
/* blocks scheduled, but not written */
wait_queue_head_t
recovery_wait
;
sector_t
recovery_cp
;
int
safemode
;
/* if set, update "clean" superblock
unsigned
int
safemode
;
/* if set, update "clean" superblock
* when no writes pending.
*/
unsigned
int
safemode_delay
;
struct
timer_list
safemode_timer
;
atomic_t
writes_pending
;
request_queue_t
queue
;
/* for plugging ... */
...
...
@@ -245,7 +257,7 @@ struct mdk_personality_s
int
(
*
make_request
)(
request_queue_t
*
q
,
struct
bio
*
bio
);
int
(
*
run
)(
mddev_t
*
mddev
);
int
(
*
stop
)(
mddev_t
*
mddev
);
int
(
*
status
)(
char
*
page
,
mddev_t
*
mddev
);
void
(
*
status
)(
struct
seq_file
*
seq
,
mddev_t
*
mddev
);
/* error_handler must set ->faulty and clear ->in_sync
* if appropriate, and should abort recovery if needed
*/
...
...
@@ -292,8 +304,8 @@ extern mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr);
ITERATE_RDEV_GENERIC(pending_raid_disks,rdev,tmp)
typedef
struct
mdk_thread_s
{
void
(
*
run
)
(
void
*
data
);
void
*
data
;
void
(
*
run
)
(
mddev_t
*
mddev
);
mddev_t
*
mddev
;
wait_queue_head_t
wqueue
;
unsigned
long
flags
;
struct
completion
*
event
;
...
...
include/linux/raid/md_p.h
View file @
1524c2f2
...
...
@@ -173,5 +173,58 @@ static inline __u64 md_event(mdp_super_t *sb) {
return
(
ev
<<
32
)
|
sb
->
events_lo
;
}
/*
* The version-1 superblock :
* All numeric fields are little-endian.
*
* total size: 256 bytes plus 2 per device.
* 1K allows 384 devices.
*/
struct
mdp_superblock_1
{
/* constant array information - 128 bytes */
__u32
magic
;
/* MD_SB_MAGIC: 0xa92b4efc - little endian */
__u32
major_version
;
/* 1 */
__u32
feature_map
;
/* 0 for now */
__u32
pad0
;
/* always set to 0 when writing */
__u8
set_uuid
[
16
];
/* user-space generated. */
char
set_name
[
32
];
/* set and interpreted by user-space */
__u64
ctime
;
/* lo 40 bits are seconds, top 24 are microseconds or 0*/
__u32
level
;
/* -4 (multipath), -1 (linear), 0,1,4,5 */
__u32
layout
;
/* only for raid5 currently */
__u64
size
;
/* used size of component devices, in 512byte sectors */
__u32
chunksize
;
/* in 512byte sectors */
__u32
raid_disks
;
__u8
pad1
[
128
-
92
];
/* set to 0 when written */
/* constant this-device information - 64 bytes */
__u64
data_offset
;
/* sector start of data, often 0 */
__u64
data_size
;
/* sectors in this device that can be used for data */
__u64
super_offset
;
/* sector start of this superblock */
__u64
recovery_offset
;
/* sectors before this offset (from data_offset) have been recovered */
__u32
dev_number
;
/* permanent identifier of this device - not role in raid */
__u32
cnt_corrected_read
;
/* number of read errors that were corrected by re-writing */
__u8
device_uuid
[
16
];
/* user-space setable, ignored by kernel */
__u8
pad2
[
64
-
56
];
/* set to 0 when writing */
/* array state information - 64 bytes */
__u64
utime
;
/* 40 bits second, 24 btes microseconds */
__u64
events
;
/* incremented when superblock updated */
__u64
resync_offset
;
/* data before this offset (from data_offset) known to be in sync */
__u32
sb_csum
;
/* checksum upto devs[max_dev] */
__u32
max_dev
;
/* size of devs[] array to consider */
__u8
pad3
[
64
-
40
];
/* set to 0 when writing */
/* device state information. Indexed by dev_number.
* 2 bytes per device
* Note there are no per-device state flags. State information is rolled
* into the 'roles' value. If a device is spare or faulty, then it doesn't
* have a meaningful role.
*/
__u16
dev_roles
[
0
];
/* role in array, or 0xffff for a spare, or 0xfffe for faulty */
};
#endif
include/linux/raid/multipath.h
View file @
1524c2f2
...
...
@@ -13,7 +13,6 @@ struct multipath_private_data {
struct
multipath_info
multipaths
[
MD_SB_DISKS
];
int
raid_disks
;
int
working_disks
;
mdk_thread_t
*
thread
;
spinlock_t
device_lock
;
mempool_t
*
pool
;
...
...
include/linux/raid/raid1.h
View file @
1524c2f2
...
...
@@ -19,7 +19,6 @@ struct r1_private_data_s {
int
working_disks
;
int
last_used
;
sector_t
next_seq_sect
;
mdk_thread_t
*
thread
;
spinlock_t
device_lock
;
/* for use when syncing mirrors: */
...
...
@@ -34,7 +33,6 @@ struct r1_private_data_s {
mempool_t
*
r1bio_pool
;
mempool_t
*
r1buf_pool
;
char
thread_name
[
MD_THREAD_NAME_MAX
];
};
typedef
struct
r1_private_data_s
conf_t
;
...
...
include/linux/raid/raid5.h
View file @
1524c2f2
...
...
@@ -203,7 +203,6 @@ struct disk_info {
struct
raid5_private_data
{
struct
stripe_head
**
stripe_hashtbl
;
mddev_t
*
mddev
;
mdk_thread_t
*
thread
;
struct
disk_info
disks
[
MD_SB_DISKS
];
struct
disk_info
*
spare
;
int
chunk_size
,
level
,
algorithm
;
...
...
@@ -226,7 +225,6 @@ struct raid5_private_data {
* waiting for 25% to be free
*/
spinlock_t
device_lock
;
char
thread_name
[
MD_THREAD_NAME_MAX
];
};
typedef
struct
raid5_private_data
raid5_conf_t
;
...
...
include/linux/sunrpc/cache.h
View file @
1524c2f2
...
...
@@ -190,6 +190,7 @@ RTN *FNAME ARGS \
else read_unlock(&(DETAIL)->hash_lock); \
if (set) \
cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \
if (set==1 && new) cache_fresh(DETAIL, &new->MEMBER, 0); \
if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL); \
return tmp; \
} \
...
...
net/sunrpc/svcauth_unix.c
View file @
1524c2f2
...
...
@@ -441,9 +441,6 @@ svcauth_unix_accept(struct svc_rqst *rqstp, u32 *authp)
return
SVC_DENIED
;
}
/* Put NULL verifier */
svc_putu32
(
resv
,
RPC_AUTH_NULL
);
svc_putu32
(
resv
,
0
);
key
.
m_class
=
rqstp
->
rq_server
->
sv_program
->
pg_class
;
key
.
m_addr
=
rqstp
->
rq_addr
.
sin_addr
;
...
...
@@ -470,8 +467,13 @@ svcauth_unix_accept(struct svc_rqst *rqstp, u32 *authp)
}
else
rv
=
SVC_DROP
;
if
(
rqstp
->
rq_client
==
NULL
&&
rqstp
->
rq_proc
!=
0
)
if
(
r
v
==
SVC_OK
&&
r
qstp
->
rq_client
==
NULL
&&
rqstp
->
rq_proc
!=
0
)
goto
badcred
;
/* Put NULL verifier */
svc_putu32
(
resv
,
RPC_AUTH_NULL
);
svc_putu32
(
resv
,
0
);
return
rv
;
badcred:
...
...
net/sunrpc/svcsock.c
View file @
1524c2f2
...
...
@@ -577,12 +577,15 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
if
(
skb_is_nonlinear
(
skb
))
{
/* we have to copy */
local_bh_disable
();
if
(
csum_partial_copy_to_xdr
(
&
rqstp
->
rq_arg
,
skb
))
{
local_bh_enable
();
/* checksum error */
skb_free_datagram
(
svsk
->
sk_sk
,
skb
);
svc_sock_received
(
svsk
);
return
0
;
}
local_bh_enable
();
skb_free_datagram
(
svsk
->
sk_sk
,
skb
);
}
else
{
/* we can use it in-place */
...
...
@@ -1435,7 +1438,7 @@ static struct cache_deferred_req *
svc_defer
(
struct
cache_req
*
req
)
{
struct
svc_rqst
*
rqstp
=
container_of
(
req
,
struct
svc_rqst
,
rq_chandle
);
int
size
=
sizeof
(
struct
svc_deferred_req
)
+
(
rqstp
->
rq_arg
.
head
[
0
].
iov_
len
);
int
size
=
sizeof
(
struct
svc_deferred_req
)
+
(
rqstp
->
rq_arg
.
len
);
struct
svc_deferred_req
*
dr
;
if
(
rqstp
->
rq_arg
.
page_len
)
...
...
@@ -1444,6 +1447,7 @@ svc_defer(struct cache_req *req)
dr
=
rqstp
->
rq_deferred
;
rqstp
->
rq_deferred
=
NULL
;
}
else
{
int
skip
=
rqstp
->
rq_arg
.
len
-
rqstp
->
rq_arg
.
head
[
0
].
iov_len
;
/* FIXME maybe discard if size too large */
dr
=
kmalloc
(
size
,
GFP_KERNEL
);
if
(
dr
==
NULL
)
...
...
@@ -1452,8 +1456,8 @@ svc_defer(struct cache_req *req)
dr
->
serv
=
rqstp
->
rq_server
;
dr
->
prot
=
rqstp
->
rq_prot
;
dr
->
addr
=
rqstp
->
rq_addr
;
dr
->
argslen
=
rqstp
->
rq_arg
.
head
[
0
].
iov_
len
>>
2
;
memcpy
(
dr
->
args
,
rqstp
->
rq_arg
.
head
[
0
].
iov_base
,
dr
->
argslen
<<
2
);
dr
->
argslen
=
rqstp
->
rq_arg
.
len
>>
2
;
memcpy
(
dr
->
args
,
rqstp
->
rq_arg
.
head
[
0
].
iov_base
-
skip
,
dr
->
argslen
<<
2
);
}
spin_lock
(
&
rqstp
->
rq_server
->
sv_lock
);
rqstp
->
rq_sock
->
sk_inuse
++
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment