Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
39ceafe3
Commit
39ceafe3
authored
Dec 15, 2003
by
Jeff Garzik
Browse files
Options
Browse Files
Download
Plain Diff
Merge redhat.com:/spare/repo/linux-2.5
into redhat.com:/spare/repo/net-drivers-2.5
parents
e90b21e2
e1f936a9
Changes
11
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
114 additions
and
28 deletions
+114
-28
drivers/block/scsi_ioctl.c
drivers/block/scsi_ioctl.c
+5
-1
drivers/md/raid5.c
drivers/md/raid5.c
+25
-9
drivers/net/wireless/airo.c
drivers/net/wireless/airo.c
+2
-5
fs/hpfs/dir.c
fs/hpfs/dir.c
+2
-0
fs/libfs.c
fs/libfs.c
+1
-1
include/linux/blkdev.h
include/linux/blkdev.h
+3
-1
include/linux/list.h
include/linux/list.h
+12
-0
kernel/exit.c
kernel/exit.c
+20
-3
kernel/fork.c
kernel/fork.c
+29
-3
net/ipv6/tcp_ipv6.c
net/ipv6/tcp_ipv6.c
+1
-1
net/sched/sch_htb.c
net/sched/sch_htb.c
+14
-4
No files found.
drivers/block/scsi_ioctl.c
View file @
39ceafe3
...
...
@@ -150,6 +150,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
struct
request
*
rq
;
struct
bio
*
bio
;
char
sense
[
SCSI_SENSE_BUFFERSIZE
];
unsigned
char
cdb
[
BLK_MAX_CDB
];
void
*
buffer
;
if
(
hdr
->
interface_id
!=
'S'
)
...
...
@@ -166,6 +167,9 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
if
(
hdr
->
dxfer_len
>
(
q
->
max_sectors
<<
9
))
return
-
EIO
;
if
(
copy_from_user
(
cdb
,
hdr
->
cmdp
,
hdr
->
cmd_len
))
return
-
EFAULT
;
reading
=
writing
=
0
;
buffer
=
NULL
;
bio
=
NULL
;
...
...
@@ -216,7 +220,7 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
* fill in request structure
*/
rq
->
cmd_len
=
hdr
->
cmd_len
;
memcpy
(
rq
->
cmd
,
hdr
->
cmdp
,
hdr
->
cmd_len
);
memcpy
(
rq
->
cmd
,
cdb
,
hdr
->
cmd_len
);
if
(
sizeof
(
rq
->
cmd
)
!=
hdr
->
cmd_len
)
memset
(
rq
->
cmd
+
hdr
->
cmd_len
,
0
,
sizeof
(
rq
->
cmd
)
-
hdr
->
cmd_len
);
...
...
drivers/md/raid5.c
View file @
39ceafe3
...
...
@@ -40,6 +40,16 @@
#define stripe_hash(conf, sect) ((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
* order without overlap. There may be several bio's per stripe+device, and
* a bio could span several devices.
* When walking this list for a particular stripe+device, we must never proceed
* beyond a bio that extends past this device, as the next bio might no longer
* be valid.
* This macro is used to determine the 'next' bio in the list, given the sector
* of the current stripe+device
*/
#define r5_next_bio(bio, sect) ( ( bio->bi_sector + (bio->bi_size>>9) < sect + STRIPE_SECTORS) ? bio->bi_next : NULL)
/*
* The following can be used to debug the driver
*/
...
...
@@ -613,7 +623,7 @@ static void copy_data(int frombio, struct bio *bio,
int
i
;
for
(;
bio
&&
bio
->
bi_sector
<
sector
+
STRIPE_SECTORS
;
bio
=
bio
->
bi_next
)
{
bio
=
r5_next_bio
(
bio
,
sector
)
)
{
int
page_offset
;
if
(
bio
->
bi_sector
>=
sector
)
page_offset
=
(
signed
)(
bio
->
bi_sector
-
sector
)
*
512
;
...
...
@@ -738,7 +748,11 @@ static void compute_parity(struct stripe_head *sh, int method)
for
(
i
=
disks
;
i
--
;)
if
(
sh
->
dev
[
i
].
written
)
{
sector_t
sector
=
sh
->
dev
[
i
].
sector
;
copy_data
(
1
,
sh
->
dev
[
i
].
written
,
sh
->
dev
[
i
].
page
,
sector
);
struct
bio
*
wbi
=
sh
->
dev
[
i
].
written
;
while
(
wbi
&&
wbi
->
bi_sector
<
sector
+
STRIPE_SECTORS
)
{
copy_data
(
1
,
wbi
,
sh
->
dev
[
i
].
page
,
sector
);
wbi
=
r5_next_bio
(
wbi
,
sector
);
}
set_bit
(
R5_LOCKED
,
&
sh
->
dev
[
i
].
flags
);
set_bit
(
R5_UPTODATE
,
&
sh
->
dev
[
i
].
flags
);
...
...
@@ -791,8 +805,10 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
bip
=
&
sh
->
dev
[
dd_idx
].
towrite
;
else
bip
=
&
sh
->
dev
[
dd_idx
].
toread
;
while
(
*
bip
&&
(
*
bip
)
->
bi_sector
<
bi
->
bi_sector
)
while
(
*
bip
&&
(
*
bip
)
->
bi_sector
<
bi
->
bi_sector
)
{
BUG_ON
((
*
bip
)
->
bi_sector
+
((
*
bip
)
->
bi_size
>>
9
)
>
bi
->
bi_sector
);
bip
=
&
(
*
bip
)
->
bi_next
;
}
/* FIXME do I need to worry about overlapping bion */
if
(
*
bip
&&
bi
->
bi_next
&&
(
*
bip
)
!=
bi
->
bi_next
)
BUG
();
...
...
@@ -813,7 +829,7 @@ static void add_stripe_bio (struct stripe_head *sh, struct bio *bi, int dd_idx,
for
(
bi
=
sh
->
dev
[
dd_idx
].
towrite
;
sector
<
sh
->
dev
[
dd_idx
].
sector
+
STRIPE_SECTORS
&&
bi
&&
bi
->
bi_sector
<=
sector
;
bi
=
bi
->
bi_next
)
{
bi
=
r5_next_bio
(
bi
,
sh
->
dev
[
dd_idx
].
sector
)
)
{
if
(
bi
->
bi_sector
+
(
bi
->
bi_size
>>
9
)
>=
sector
)
sector
=
bi
->
bi_sector
+
(
bi
->
bi_size
>>
9
);
}
...
...
@@ -883,7 +899,7 @@ static void handle_stripe(struct stripe_head *sh)
spin_unlock_irq
(
&
conf
->
device_lock
);
while
(
rbi
&&
rbi
->
bi_sector
<
dev
->
sector
+
STRIPE_SECTORS
)
{
copy_data
(
0
,
rbi
,
dev
->
page
,
dev
->
sector
);
rbi2
=
r
bi
->
bi_next
;
rbi2
=
r
5_next_bio
(
rbi
,
dev
->
sector
)
;
spin_lock_irq
(
&
conf
->
device_lock
);
if
(
--
rbi
->
bi_phys_segments
==
0
)
{
rbi
->
bi_next
=
return_bi
;
...
...
@@ -928,7 +944,7 @@ static void handle_stripe(struct stripe_head *sh)
if
(
bi
)
to_write
--
;
while
(
bi
&&
bi
->
bi_sector
<
sh
->
dev
[
i
].
sector
+
STRIPE_SECTORS
){
struct
bio
*
nextbi
=
bi
->
bi_next
;
struct
bio
*
nextbi
=
r5_next_bio
(
bi
,
sh
->
dev
[
i
].
sector
)
;
clear_bit
(
BIO_UPTODATE
,
&
bi
->
bi_flags
);
if
(
--
bi
->
bi_phys_segments
==
0
)
{
md_write_end
(
conf
->
mddev
);
...
...
@@ -941,7 +957,7 @@ static void handle_stripe(struct stripe_head *sh)
bi
=
sh
->
dev
[
i
].
written
;
sh
->
dev
[
i
].
written
=
NULL
;
while
(
bi
&&
bi
->
bi_sector
<
sh
->
dev
[
i
].
sector
+
STRIPE_SECTORS
)
{
struct
bio
*
bi2
=
bi
->
bi_next
;
struct
bio
*
bi2
=
r5_next_bio
(
bi
,
sh
->
dev
[
i
].
sector
)
;
clear_bit
(
BIO_UPTODATE
,
&
bi
->
bi_flags
);
if
(
--
bi
->
bi_phys_segments
==
0
)
{
md_write_end
(
conf
->
mddev
);
...
...
@@ -957,7 +973,7 @@ static void handle_stripe(struct stripe_head *sh)
sh
->
dev
[
i
].
toread
=
NULL
;
if
(
bi
)
to_read
--
;
while
(
bi
&&
bi
->
bi_sector
<
sh
->
dev
[
i
].
sector
+
STRIPE_SECTORS
){
struct
bio
*
nextbi
=
bi
->
bi_next
;
struct
bio
*
nextbi
=
r5_next_bio
(
bi
,
sh
->
dev
[
i
].
sector
)
;
clear_bit
(
BIO_UPTODATE
,
&
bi
->
bi_flags
);
if
(
--
bi
->
bi_phys_segments
==
0
)
{
bi
->
bi_next
=
return_bi
;
...
...
@@ -1000,7 +1016,7 @@ static void handle_stripe(struct stripe_head *sh)
wbi
=
dev
->
written
;
dev
->
written
=
NULL
;
while
(
wbi
&&
wbi
->
bi_sector
<
dev
->
sector
+
STRIPE_SECTORS
)
{
wbi2
=
wbi
->
bi_next
;
wbi2
=
r5_next_bio
(
wbi
,
dev
->
sector
)
;
if
(
--
wbi
->
bi_phys_segments
==
0
)
{
md_write_end
(
conf
->
mddev
);
wbi
->
bi_next
=
return_bi
;
...
...
drivers/net/wireless/airo.c
View file @
39ceafe3
...
...
@@ -2466,11 +2466,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
OUT4500
(
apriv
,
EVACK
,
EV_MIC
);
#ifdef MICSUPPORT
if
(
test_bit
(
FLAG_MIC_CAPABLE
,
&
apriv
->
flags
))
{
if
(
down_trylock
(
&
apriv
->
sem
)
!=
0
)
{
set_bit
(
JOB_MIC
,
&
apriv
->
flags
);
wake_up_interruptible
(
&
apriv
->
thr_wait
);
}
else
micinit
(
apriv
);
set_bit
(
JOB_MIC
,
&
apriv
->
flags
);
wake_up_interruptible
(
&
apriv
->
thr_wait
);
}
#endif
}
...
...
fs/hpfs/dir.c
View file @
39ceafe3
...
...
@@ -65,6 +65,8 @@ int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
int
c1
,
c2
=
0
;
int
ret
=
0
;
lock_kernel
();
if
(
hpfs_sb
(
inode
->
i_sb
)
->
sb_chk
)
{
if
(
hpfs_chk_sectors
(
inode
->
i_sb
,
inode
->
i_ino
,
1
,
"dir_fnode"
))
{
ret
=
-
EFSERROR
;
...
...
fs/libfs.c
View file @
39ceafe3
...
...
@@ -79,6 +79,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
loff_t
n
=
file
->
f_pos
-
2
;
spin_lock
(
&
dcache_lock
);
list_del
(
&
cursor
->
d_child
);
p
=
file
->
f_dentry
->
d_subdirs
.
next
;
while
(
n
&&
p
!=
&
file
->
f_dentry
->
d_subdirs
)
{
struct
dentry
*
next
;
...
...
@@ -87,7 +88,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin)
n
--
;
p
=
p
->
next
;
}
list_del
(
&
cursor
->
d_child
);
list_add_tail
(
&
cursor
->
d_child
,
p
);
spin_unlock
(
&
dcache_lock
);
}
...
...
include/linux/blkdev.h
View file @
39ceafe3
...
...
@@ -82,6 +82,8 @@ struct request_list {
wait_queue_head_t
wait
[
2
];
};
#define BLK_MAX_CDB 16
/*
* try to put the fields that are referenced together in the same cacheline
*/
...
...
@@ -147,7 +149,7 @@ struct request {
* when request is used as a packet command carrier
*/
unsigned
int
cmd_len
;
unsigned
char
cmd
[
16
];
unsigned
char
cmd
[
BLK_MAX_CDB
];
unsigned
int
data_len
;
void
*
data
;
...
...
include/linux/list.h
View file @
39ceafe3
...
...
@@ -208,6 +208,18 @@ static inline int list_empty(const struct list_head *head)
return
head
->
next
==
head
;
}
/**
* list_empty_careful - tests whether a list is
* empty _and_ checks that no other CPU might be
* in the process of still modifying either member
* @head: the list to test.
*/
static
inline
int
list_empty_careful
(
const
struct
list_head
*
head
)
{
struct
list_head
*
next
=
head
->
next
;
return
(
next
==
head
)
&&
(
next
==
head
->
prev
);
}
static
inline
void
__list_splice
(
struct
list_head
*
list
,
struct
list_head
*
head
)
{
...
...
kernel/exit.c
View file @
39ceafe3
...
...
@@ -49,9 +49,11 @@ static void __unhash_process(struct task_struct *p)
void
release_task
(
struct
task_struct
*
p
)
{
int
zap_leader
;
task_t
*
leader
;
struct
dentry
*
proc_dentry
;
repeat:
BUG_ON
(
p
->
state
<
TASK_ZOMBIE
);
atomic_dec
(
&
p
->
user
->
processes
);
...
...
@@ -70,10 +72,21 @@ void release_task(struct task_struct * p)
* group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.)
*/
zap_leader
=
0
;
leader
=
p
->
group_leader
;
if
(
leader
!=
p
&&
thread_group_empty
(
leader
)
&&
leader
->
state
==
TASK_ZOMBIE
&&
leader
->
exit_signal
!=
-
1
)
if
(
leader
!=
p
&&
thread_group_empty
(
leader
)
&&
leader
->
state
==
TASK_ZOMBIE
)
{
BUG_ON
(
leader
->
exit_signal
==
-
1
);
do_notify_parent
(
leader
,
leader
->
exit_signal
);
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*
* do_notify_parent() will have marked it self-reaping in
* that case.
*/
zap_leader
=
(
leader
->
exit_signal
==
-
1
);
}
p
->
parent
->
cutime
+=
p
->
utime
+
p
->
cutime
;
p
->
parent
->
cstime
+=
p
->
stime
+
p
->
cstime
;
...
...
@@ -88,6 +101,10 @@ void release_task(struct task_struct * p)
proc_pid_flush
(
proc_dentry
);
release_thread
(
p
);
put_task_struct
(
p
);
p
=
leader
;
if
(
unlikely
(
zap_leader
))
goto
repeat
;
}
/* we are using it only for SMP init */
...
...
kernel/fork.c
View file @
39ceafe3
...
...
@@ -125,15 +125,28 @@ void remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
EXPORT_SYMBOL
(
remove_wait_queue
);
/*
* Note: we use "set_current_state()" _after_ the wait-queue add,
* because we need a memory barrier there on SMP, so that any
* wake-function that tests for the wait-queue being active
* will be guaranteed to see waitqueue addition _or_ subsequent
* tests in this thread will see the wakeup having taken place.
*
* The spin_unlock() itself is semi-permeable and only protects
* one way (it only protects stuff inside the critical region and
* stops them from bleeding out - it would still allow subsequent
* loads to move into the the critical region).
*/
void
prepare_to_wait
(
wait_queue_head_t
*
q
,
wait_queue_t
*
wait
,
int
state
)
{
unsigned
long
flags
;
__set_current_state
(
state
);
wait
->
flags
&=
~
WQ_FLAG_EXCLUSIVE
;
spin_lock_irqsave
(
&
q
->
lock
,
flags
);
if
(
list_empty
(
&
wait
->
task_list
))
__add_wait_queue
(
q
,
wait
);
set_current_state
(
state
);
spin_unlock_irqrestore
(
&
q
->
lock
,
flags
);
}
...
...
@@ -144,11 +157,11 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned
long
flags
;
__set_current_state
(
state
);
wait
->
flags
|=
WQ_FLAG_EXCLUSIVE
;
spin_lock_irqsave
(
&
q
->
lock
,
flags
);
if
(
list_empty
(
&
wait
->
task_list
))
__add_wait_queue_tail
(
q
,
wait
);
set_current_state
(
state
);
spin_unlock_irqrestore
(
&
q
->
lock
,
flags
);
}
...
...
@@ -159,7 +172,20 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
unsigned
long
flags
;
__set_current_state
(
TASK_RUNNING
);
if
(
!
list_empty
(
&
wait
->
task_list
))
{
/*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPU's that we haven't seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
if
(
!
list_empty_careful
(
&
wait
->
task_list
))
{
spin_lock_irqsave
(
&
q
->
lock
,
flags
);
list_del_init
(
&
wait
->
task_list
);
spin_unlock_irqrestore
(
&
q
->
lock
,
flags
);
...
...
net/ipv6/tcp_ipv6.c
View file @
39ceafe3
...
...
@@ -222,7 +222,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk)
write_lock
(
lock
);
}
sk_add_node
(
sk
,
list
);
__
sk_add_node
(
sk
,
list
);
sock_prot_inc_use
(
sk
->
sk_prot
);
write_unlock
(
lock
);
}
...
...
net/sched/sch_htb.c
View file @
39ceafe3
...
...
@@ -74,7 +74,7 @@
#define HTB_HYSTERESIS 1
/* whether to use mode hysteresis for speedup */
#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
#define HTB_VER 0x3000
d
/* major must be matched with number suplied by TC as version */
#define HTB_VER 0x3000
e
/* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
...
...
@@ -290,6 +290,11 @@ static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
* then finish and return direct queue.
*/
#define HTB_DIRECT (struct htb_class*)-1
static
inline
u32
htb_classid
(
struct
htb_class
*
cl
)
{
return
(
cl
&&
cl
!=
HTB_DIRECT
)
?
cl
->
classid
:
TC_H_UNSPEC
;
}
static
struct
htb_class
*
htb_classify
(
struct
sk_buff
*
skb
,
struct
Qdisc
*
sch
)
{
struct
htb_sched
*
q
=
(
struct
htb_sched
*
)
sch
->
data
;
...
...
@@ -703,7 +708,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch
->
q
.
qlen
++
;
sch
->
stats
.
packets
++
;
sch
->
stats
.
bytes
+=
skb
->
len
;
HTB_DBG
(
1
,
1
,
"htb_enq_ok cl=%X skb=%p
\n
"
,
cl
?
cl
->
classid
:
0
,
skb
);
HTB_DBG
(
1
,
1
,
"htb_enq_ok cl=%X skb=%p
\n
"
,
htb_classid
(
cl
)
,
skb
);
return
NET_XMIT_SUCCESS
;
}
...
...
@@ -731,7 +736,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
htb_activate
(
q
,
cl
);
sch
->
q
.
qlen
++
;
HTB_DBG
(
1
,
1
,
"htb_req_ok cl=%X skb=%p
\n
"
,
cl
?
cl
->
classid
:
0
,
skb
);
HTB_DBG
(
1
,
1
,
"htb_req_ok cl=%X skb=%p
\n
"
,
htb_classid
(
cl
)
,
skb
);
return
NET_XMIT_SUCCESS
;
}
...
...
@@ -1381,11 +1386,16 @@ static void htb_destroy(struct Qdisc* sch)
#ifdef HTB_RATECM
del_timer_sync
(
&
q
->
rttim
);
#endif
/* This line used to be after htb_destroy_class call below
and surprisingly it worked in 2.4. But it must precede it
because filter need its target class alive to be able to call
unbind_filter on it (without Oops). */
htb_destroy_filters
(
&
q
->
filter_list
);
while
(
!
list_empty
(
&
q
->
root
))
htb_destroy_class
(
sch
,
list_entry
(
q
->
root
.
next
,
struct
htb_class
,
sibling
));
htb_destroy_filters
(
&
q
->
filter_list
);
__skb_queue_purge
(
&
q
->
direct_queue
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment