Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3f69168f
Commit
3f69168f
authored
Dec 15, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://linuxusb.bkbits.net/gregkh-2.6
into home.osdl.org:/home/torvalds/v2.5/linux
parents
10921a8f
e1f936a9
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
96 additions
and
22 deletions
+96
-22
drivers/block/scsi_ioctl.c
drivers/block/scsi_ioctl.c
+5
-6
drivers/md/raid5.c
drivers/md/raid5.c
+25
-9
fs/hpfs/dir.c
fs/hpfs/dir.c
+2
-0
include/linux/blkdev.h
include/linux/blkdev.h
+3
-1
include/linux/list.h
include/linux/list.h
+12
-0
kernel/exit.c
kernel/exit.c
+20
-3
kernel/fork.c
kernel/fork.c
+29
-3
No files found.
drivers/block/scsi_ioctl.c
View file @
3f69168f
...
...
@@ -150,6 +150,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
struct
request
*
rq
;
struct
bio
*
bio
;
char
sense
[
SCSI_SENSE_BUFFERSIZE
];
unsigned
char
cdb
[
BLK_MAX_CDB
];
void
*
buffer
;
if
(
hdr
->
interface_id
!=
'S'
)
...
...
@@ -166,6 +167,9 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
if
(
hdr
->
dxfer_len
>
(
q
->
max_sectors
<<
9
))
return
-
EIO
;
if
(
copy_from_user
(
cdb
,
hdr
->
cmdp
,
hdr
->
cmd_len
))
return
-
EFAULT
;
reading
=
writing
=
0
;
buffer
=
NULL
;
bio
=
NULL
;
...
...
@@ -216,12 +220,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
* fill in request structure
*/
rq
->
cmd_len
=
hdr
->
cmd_len
;
if
(
copy_from_user
(
rq
->
cmd
,
hdr
->
cmdp
,
hdr
->
cmd_len
))
{
blk_put_request
(
rq
);
return
-
EFAULT
;
}
memcpy
(
rq
->
cmd
,
cdb
,
hdr
->
cmd_len
);
if
(
sizeof
(
rq
->
cmd
)
!=
hdr
->
cmd_len
)
memset
(
rq
->
cmd
+
hdr
->
cmd_len
,
0
,
sizeof
(
rq
->
cmd
)
-
hdr
->
cmd_len
);
...
...
drivers/md/raid5.c
View file @
3f69168f
...
...
@@ -40,6 +40,16 @@
#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
* When walking this list for a particular stripe+device, we must never proceed
* beyond a bio that extends past this device, as the next bio might no longer
* be valid.
* This macro is used to determine the 'next' bio in the list, given the sector
* of the current stripe+device
*/
#define r5_next_bio(bio, sect) ( ( bio->bi_sector + (bio->bi_size>>9) < sect + STRIPE_SECTORS) ? bio->bi_next : NULL)
/*
* The following can be used to debug the driver
*/
...
...
@@ -613,7 +623,7 @@ static void copy_data(int frombio, struct bio *bio,
int
i
;
for
(;
bio
&&
bio
->
bi_sector
<
sector
+
STRIPE_SECTORS
;
bio
=
bio
->
bi_next
)
{
bio
=
r5_next_bio
(
bio
,
sector
)
)
{
int
page_offset
;
if
(
bio
->
bi_sector
>=
sector
)
page_offset
=
(
signed
)(
bio
->
bi_sector
-
sector
)
*
512
;
...
...
@@ -738,7 +748,11 @@ static void compute_parity(struct stripe_head *sh, int method)
for
(
i
=
disks
;
i
--
;)
if
(
sh
->
dev
[
i
].
written
)
{
sector_t
sector
=
sh
->
dev
[
i
].
sector
;
copy_data
(
1
,
sh
->
dev
[
i
].
written
,
sh
->
dev
[
i
].
page
,
sector
);
struct
bio
*
wbi
=
sh
->
dev
[
i
].
written
;
while
(
wbi
&&
wbi
->
bi_sector
<
sector
+
STRIPE_SECTORS
)
{
copy_data
(
1
,
wbi
,
sh
->
dev
[
i
].
page
,
sector
);
wbi
=
r5_next_bio
(
wbi
,
sector
);
}
set_bit
(
R5_LOCKED
,
&
sh
->
dev
[
i
].
flags
);
set_bit
(
R5_UPTODATE
,
&
sh
->
dev
[
i
].
flags
);
...
...
@@ -791,8 +805,10 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
bip
=
&
sh
->
dev
[
dd_idx
].
towrite
;
else
bip
=
&
sh
->
dev
[
dd_idx
].
toread
;
while
(
*
bip
&&
(
*
bip
)
->
bi_sector
<
bi
->
bi_sector
)
while
(
*
bip
&&
(
*
bip
)
->
bi_sector
<
bi
->
bi_sector
)
{
BUG_ON
((
*
bip
)
->
bi_sector
+
((
*
bip
)
->
bi_size
>>
9
)
>
bi
->
bi_sector
);
bip
=
&
(
*
bip
)
->
bi_next
;
}
/* FIXME do I need to worry about overlapping bion */
if
(
*
bip
&&
bi
->
bi_next
&&
(
*
bip
)
!=
bi
->
bi_next
)
BUG
();
...
...
@@ -813,7 +829,7 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
for
(
bi
=
sh
->
dev
[
dd_idx
].
towrite
;
sector
<
sh
->
dev
[
dd_idx
].
sector
+
STRIPE_SECTORS
&&
bi
&&
bi
->
bi_sector
<=
sector
;
bi
=
bi
->
bi_next
)
{
bi
=
r5_next_bio
(
bi
,
sh
->
dev
[
dd_idx
].
sector
)
)
{
if
(
bi
->
bi_sector
+
(
bi
->
bi_size
>>
9
)
>=
sector
)
sector
=
bi
->
bi_sector
+
(
bi
->
bi_size
>>
9
);
}
...
...
@@ -883,7 +899,7 @@ static void handle_stripe(struct stripe_head *sh)
spin_unlock_irq
(
&
conf
->
device_lock
);
while
(
rbi
&&
rbi
->
bi_sector
<
dev
->
sector
+
STRIPE_SECTORS
)
{
copy_data
(
0
,
rbi
,
dev
->
page
,
dev
->
sector
);
rbi2
=
r
bi
->
bi_next
;
rbi2
=
r
5_next_bio
(
rbi
,
dev
->
sector
)
;
spin_lock_irq
(
&
conf
->
device_lock
);
if
(
--
rbi
->
bi_phys_segments
==
0
)
{
rbi
->
bi_next
=
return_bi
;
...
...
@@ -928,7 +944,7 @@ static void handle_stripe(struct stripe_head *sh)
if
(
bi
)
to_write
--
;
while
(
bi
&&
bi
->
bi_sector
<
sh
->
dev
[
i
].
sector
+
STRIPE_SECTORS
){
struct
bio
*
nextbi
=
bi
->
bi_next
;
struct
bio
*
nextbi
=
r5_next_bio
(
bi
,
sh
->
dev
[
i
].
sector
)
;
clear_bit
(
BIO_UPTODATE
,
&
bi
->
bi_flags
);
if
(
--
bi
->
bi_phys_segments
==
0
)
{
md_write_end
(
conf
->
mddev
);
...
...
@@ -941,7 +957,7 @@ static void handle_stripe(struct stripe_head *sh)
bi
=
sh
->
dev
[
i
].
written
;
sh
->
dev
[
i
].
written
=
NULL
;
while
(
bi
&&
bi
->
bi_sector
<
sh
->
dev
[
i
].
sector
+
STRIPE_SECTORS
)
{
struct
bio
*
bi2
=
bi
->
bi_next
;
struct
bio
*
bi2
=
r5_next_bio
(
bi
,
sh
->
dev
[
i
].
sector
)
;
clear_bit
(
BIO_UPTODATE
,
&
bi
->
bi_flags
);
if
(
--
bi
->
bi_phys_segments
==
0
)
{
md_write_end
(
conf
->
mddev
);
...
...
@@ -957,7 +973,7 @@ static void handle_stripe(struct stripe_head *sh)
sh
->
dev
[
i
].
toread
=
NULL
;
if
(
bi
)
to_read
--
;
while
(
bi
&&
bi
->
bi_sector
<
sh
->
dev
[
i
].
sector
+
STRIPE_SECTORS
){
struct
bio
*
nextbi
=
bi
->
bi_next
;
struct
bio
*
nextbi
=
r5_next_bio
(
bi
,
sh
->
dev
[
i
].
sector
)
;
clear_bit
(
BIO_UPTODATE
,
&
bi
->
bi_flags
);
if
(
--
bi
->
bi_phys_segments
==
0
)
{
bi
->
bi_next
=
return_bi
;
...
...
@@ -1000,7 +1016,7 @@ static void handle_stripe(struct stripe_head *sh)
wbi
=
dev
->
written
;
dev
->
written
=
NULL
;
while
(
wbi
&&
wbi
->
bi_sector
<
dev
->
sector
+
STRIPE_SECTORS
)
{
wbi2
=
wbi
->
bi_next
;
wbi2
=
r5_next_bio
(
wbi
,
dev
->
sector
)
;
if
(
--
wbi
->
bi_phys_segments
==
0
)
{
md_write_end
(
conf
->
mddev
);
wbi
->
bi_next
=
return_bi
;
...
...
fs/hpfs/dir.c
View file @
3f69168f
...
...
@@ -65,6 +65,8 @@ int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
int
c1
,
c2
=
0
;
int
ret
=
0
;
lock_kernel
();
if
(
hpfs_sb
(
inode
->
i_sb
)
->
sb_chk
)
{
if
(
hpfs_chk_sectors
(
inode
->
i_sb
,
inode
->
i_ino
,
1
,
"dir_fnode"
))
{
ret
=
-
EFSERROR
;
...
...
include/linux/blkdev.h
View file @
3f69168f
...
...
@@ -82,6 +82,8 @@ struct request_list {
wait_queue_head_t
wait
[
2
];
};
#define BLK_MAX_CDB 16
/*
* try to put the fields that are referenced together in the same cacheline
*/
...
...
@@ -147,7 +149,7 @@ struct request {
* when request is used as a packet command carrier
*/
unsigned
int
cmd_len
;
unsigned
char
cmd
[
16
];
unsigned
char
cmd
[
BLK_MAX_CDB
];
unsigned
int
data_len
;
void
*
data
;
...
...
include/linux/list.h
View file @
3f69168f
...
...
@@ -208,6 +208,18 @@ static inline int list_empty(const struct list_head *head)
return
head
->
next
==
head
;
}
/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
* @head: the list to test.
*/
static
inline
int
list_empty_careful
(
const
struct
list_head
*
head
)
{
struct
list_head
*
next
=
head
->
next
;
return
(
next
==
head
)
&&
(
next
==
head
->
prev
);
}
static
inline
void
__list_splice
(
struct
list_head
*
list
,
struct
list_head
*
head
)
{
...
...
kernel/exit.c
View file @
3f69168f
...
...
@@ -49,9 +49,11 @@ static void __unhash_process(struct task_struct *p)
void
release_task
(
struct
task_struct
*
p
)
{
int
zap_leader
;
task_t
*
leader
;
struct
dentry
*
proc_dentry
;
repeat:
BUG_ON
(
p
->
state
<
TASK_ZOMBIE
);
atomic_dec
(
&
p
->
user
->
processes
);
...
...
@@ -70,10 +72,21 @@ void release_task(struct task_struct * p)
* group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.)
*/
zap_leader
=
0
;
leader
=
p
->
group_leader
;
if
(
leader
!=
p
&&
thread_group_empty
(
leader
)
&&
leader
->
state
==
TASK_ZOMBIE
&&
leader
->
exit_signal
!=
-
1
)
if
(
leader
!=
p
&&
thread_group_empty
(
leader
)
&&
leader
->
state
==
TASK_ZOMBIE
)
{
BUG_ON
(
leader
->
exit_signal
==
-
1
);
do_notify_parent
(
leader
,
leader
->
exit_signal
);
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*
* do_notify_parent() will have marked it self-reaping in
* that case.
*/
zap_leader
=
(
leader
->
exit_signal
==
-
1
);
}
p
->
parent
->
cutime
+=
p
->
utime
+
p
->
cutime
;
p
->
parent
->
cstime
+=
p
->
stime
+
p
->
cstime
;
...
...
@@ -88,6 +101,10 @@ void release_task(struct task_struct * p)
proc_pid_flush
(
proc_dentry
);
release_thread
(
p
);
put_task_struct
(
p
);
p
=
leader
;
if
(
unlikely
(
zap_leader
))
goto
repeat
;
}
/* we are using it only for SMP init */
...
...
kernel/fork.c
View file @
3f69168f
...
...
@@ -125,15 +125,28 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
EXPORT_SYMBOL
(
remove_wait_queue
);
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any
* wake-function that tests for the wait-queue being active
* will be guaranteed to see waitqueue addition _or_ subsequent
* tests in this thread will see the wakeup having taken place.
*
* The spin_unlock() itself is semi-permeable and only protects
* one way (it only protects stuff inside the critical region and
* stops them from bleeding out - it would still allow subsequent
* loads to move into the the critical region).
*/
void
prepare_to_wait
(
wait_queue_head_t
*
q
,
wait_queue_t
*
wait
,
int
state
)
{
unsigned
long
flags
;
__set_current_state
(
state
);
wait
->
flags
&=
~
WQ_FLAG_EXCLUSIVE
;
spin_lock_irqsave
(
&
q
->
lock
,
flags
);
if
(
list_empty
(
&
wait
->
task_list
))
__add_wait_queue
(
q
,
wait
);
set_current_state
(
state
);
spin_unlock_irqrestore
(
&
q
->
lock
,
flags
);
}
...
...
@@ -144,11 +157,11 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned
long
flags
;
__set_current_state
(
state
);
wait
->
flags
|=
WQ_FLAG_EXCLUSIVE
;
spin_lock_irqsave
(
&
q
->
lock
,
flags
);
if
(
list_empty
(
&
wait
->
task_list
))
__add_wait_queue_tail
(
q
,
wait
);
set_current_state
(
state
);
spin_unlock_irqrestore
(
&
q
->
lock
,
flags
);
}
...
...
@@ -159,7 +172,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
unsigned
long
flags
;
__set_current_state
(
TASK_RUNNING
);
if
(
!
list_empty
(
&
wait
->
task_list
))
{
/*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPU's that we haven't seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
if
(
!
list_empty_careful
(
&
wait
->
task_list
))
{
spin_lock_irqsave
(
&
q
->
lock
,
flags
);
list_del_init
(
&
wait
->
task_list
);
spin_unlock_irqrestore
(
&
q
->
lock
,
flags
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment