Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
6490b110
Commit
6490b110
authored
Nov 09, 2003
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://gkernel.bkbits.net/libata-2.5
into home.osdl.org:/home/torvalds/v2.5/linux
parents
ef38b911
9ef63c5e
Changes
42
Hide whitespace changes
Inline
Side-by-side
Showing
42 changed files
with
215 additions
and
177 deletions
+215
-177
arch/arm/kernel/signal.c
arch/arm/kernel/signal.c
+12
-9
arch/i386/mach-voyager/voyager_smp.c
arch/i386/mach-voyager/voyager_smp.c
+2
-2
arch/ppc/kernel/vmlinux.lds.S
arch/ppc/kernel/vmlinux.lds.S
+11
-7
arch/x86_64/kernel/entry.S
arch/x86_64/kernel/entry.S
+2
-2
arch/x86_64/kernel/pci-gart.c
arch/x86_64/kernel/pci-gart.c
+1
-2
arch/x86_64/mm/fault.c
arch/x86_64/mm/fault.c
+9
-6
drivers/base/class.c
drivers/base/class.c
+1
-1
drivers/block/as-iosched.c
drivers/block/as-iosched.c
+3
-3
drivers/ide/Kconfig
drivers/ide/Kconfig
+2
-1
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/psmouse-base.c
+7
-16
drivers/md/raid1.c
drivers/md/raid1.c
+1
-1
drivers/net/3c509.c
drivers/net/3c509.c
+2
-0
drivers/pci/quirks.c
drivers/pci/quirks.c
+1
-1
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hcd.c
+3
-0
drivers/usb/net/kaweth.c
drivers/usb/net/kaweth.c
+3
-0
drivers/usb/net/usbnet.c
drivers/usb/net/usbnet.c
+3
-0
drivers/usb/storage/usb.c
drivers/usb/storage/usb.c
+15
-4
fs/char_dev.c
fs/char_dev.c
+1
-1
fs/cramfs/inode.c
fs/cramfs/inode.c
+24
-15
fs/dquot.c
fs/dquot.c
+6
-5
fs/ext2/balloc.c
fs/ext2/balloc.c
+6
-4
fs/jbd/journal.c
fs/jbd/journal.c
+12
-2
fs/nfsd/vfs.c
fs/nfsd/vfs.c
+0
-1
include/asm-i386/checksum.h
include/asm-i386/checksum.h
+2
-1
include/asm-x86_64/checksum.h
include/asm-x86_64/checksum.h
+2
-1
include/asm-x86_64/desc.h
include/asm-x86_64/desc.h
+1
-1
include/linux/sched.h
include/linux/sched.h
+5
-1
include/linux/signal.h
include/linux/signal.h
+1
-1
include/linux/times.h
include/linux/times.h
+1
-0
kernel/posix-timers.c
kernel/posix-timers.c
+1
-24
kernel/sched.c
kernel/sched.c
+34
-25
kernel/signal.c
kernel/signal.c
+7
-5
kernel/timer.c
kernel/timer.c
+0
-2
net/core/flow.c
net/core/flow.c
+8
-7
net/core/link_watch.c
net/core/link_watch.c
+5
-2
net/core/skbuff.c
net/core/skbuff.c
+3
-3
net/ipv4/netfilter/arpt_mangle.c
net/ipv4/netfilter/arpt_mangle.c
+2
-2
net/ipv4/route.c
net/ipv4/route.c
+2
-1
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_ipv4.c
+5
-3
net/ipv6/addrconf.c
net/ipv6/addrconf.c
+1
-10
net/ipv6/route.c
net/ipv6/route.c
+3
-2
net/ipv6/tcp_ipv6.c
net/ipv6/tcp_ipv6.c
+5
-3
No files found.
arch/arm/kernel/signal.c
View file @
6490b110
...
...
@@ -497,18 +497,21 @@ handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset,
*/
ret
|=
!
valid_user_regs
(
regs
);
/*
* Block the signal if we were unsuccessful.
*/
if
(
ret
!=
0
||
!
(
ka
->
sa
.
sa_flags
&
SA_NODEFER
))
{
spin_lock_irq
(
&
tsk
->
sighand
->
siglock
);
sigorsets
(
&
tsk
->
blocked
,
&
tsk
->
blocked
,
&
ka
->
sa
.
sa_mask
);
sigaddset
(
&
tsk
->
blocked
,
sig
);
recalc_sigpending
();
spin_unlock_irq
(
&
tsk
->
sighand
->
siglock
);
}
if
(
ret
==
0
)
{
if
(
ka
->
sa
.
sa_flags
&
SA_ONESHOT
)
ka
->
sa
.
sa_handler
=
SIG_DFL
;
if
(
!
(
ka
->
sa
.
sa_flags
&
SA_NODEFER
))
{
spin_lock_irq
(
&
tsk
->
sighand
->
siglock
);
sigorsets
(
&
tsk
->
blocked
,
&
tsk
->
blocked
,
&
ka
->
sa
.
sa_mask
);
sigaddset
(
&
tsk
->
blocked
,
sig
);
recalc_sigpending
();
spin_unlock_irq
(
&
tsk
->
sighand
->
siglock
);
}
return
;
}
...
...
arch/i386/mach-voyager/voyager_smp.c
View file @
6490b110
...
...
@@ -245,8 +245,8 @@ static __u32 cpu_booted_map;
static
cpumask_t
smp_commenced_mask
=
CPU_MASK_NONE
;
/* This is for the new dynamic CPU boot code */
volatile
cpumask_t
cpu_callin_map
=
CPU_MASK_NONE
;
volatile
cpumask_t
cpu_callout_map
=
CPU_MASK_NONE
;
cpumask_t
cpu_callin_map
=
CPU_MASK_NONE
;
cpumask_t
cpu_callout_map
=
CPU_MASK_NONE
;
/* The per processor IRQ masks (these are usually kept in sync) */
static
__u16
vic_irq_mask
[
NR_CPUS
]
__cacheline_aligned
;
...
...
arch/ppc/kernel/vmlinux.lds.S
View file @
6490b110
...
...
@@ -47,13 +47,17 @@ SECTIONS
.
fixup
:
{
*(
.
fixup
)
}
__start___ex_table
=
.
;
__ex_table
:
{
*(
__ex_table
)
}
__stop___ex_table
=
.
;
__start___bug_table
=
.
;
__bug_table
:
{
*(
__bug_table
)
}
__stop___bug_table
=
.
;
__ex_table
:
{
__start___ex_table
=
.
;
*(
__ex_table
)
__stop___ex_table
=
.
;
}
__bug_table
:
{
__start___bug_table
=
.
;
*(
__bug_table
)
__stop___bug_table
=
.
;
}
/
*
Read
-
write
section
,
merged
into
data
segment
:
*/
.
=
ALIGN
(
4096
)
;
...
...
arch/x86_64/kernel/entry.S
View file @
6490b110
...
...
@@ -219,8 +219,8 @@ tracesys:
movq
%
r10
,%
rcx
/*
fixup
for
C
*/
call
*
sys_call_table
(,%
rax
,
8
)
movq
%
rax
,
RAX
-
ARGOFFSET
(%
rsp
)
SAVE_REST
1
:
movq
%
rsp
,%
rdi
1
:
SAVE_REST
movq
%
rsp
,%
rdi
call
syscall_trace
RESTORE_TOP_OF_STACK
%
rbx
RESTORE_REST
...
...
arch/x86_64/kernel/pci-gart.c
View file @
6490b110
...
...
@@ -395,7 +395,7 @@ static int __pci_map_cont(struct scatterlist *sg, int start, int stopat,
for
(
i
=
start
;
i
<
stopat
;
i
++
)
{
struct
scatterlist
*
s
=
&
sg
[
i
];
unsigned
long
start_addr
=
s
->
dma_address
;
BUG_ON
(
i
>
0
&&
s
->
offset
);
BUG_ON
(
i
>
start
&&
s
->
offset
);
if
(
i
==
start
)
{
*
sout
=
*
s
;
sout
->
dma_address
=
iommu_bus_base
;
...
...
@@ -410,7 +410,6 @@ static int __pci_map_cont(struct scatterlist *sg, int start, int stopat,
addr
+=
PAGE_SIZE
;
iommu_page
++
;
}
BUG_ON
(
i
>
0
&&
addr
%
PAGE_SIZE
);
}
BUG_ON
(
iommu_page
-
iommu_start
!=
pages
);
return
0
;
...
...
arch/x86_64/mm/fault.c
View file @
6490b110
...
...
@@ -126,12 +126,6 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr)
break
;
}
}
#if 1
if
(
prefetch
)
printk
(
"%s: prefetch caused page fault at %lx/%lx
\n
"
,
current
->
comm
,
regs
->
rip
,
addr
);
#endif
return
prefetch
;
}
...
...
@@ -241,6 +235,15 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
if
(
unlikely
(
in_atomic
()
||
!
mm
))
goto
bad_area_nosemaphore
;
/* Work around K8 erratum #100
K8 in compat mode occasionally jumps to illegal addresses >4GB.
We catch this here in the page fault handler because these
addresses are not reachable. Just detect this case and return.
Any code segment in LDT is compatibility mode. */
if
((
regs
->
cs
==
__USER32_CS
||
(
regs
->
cs
&
(
1
<<
2
)))
&&
(
address
>>
32
))
return
;
again:
down_read
(
&
mm
->
mmap_sem
);
...
...
drivers/base/class.c
View file @
6490b110
...
...
@@ -255,6 +255,7 @@ static decl_subsys(class_obj, &ktype_class_device, &class_hotplug_ops);
void
class_device_initialize
(
struct
class_device
*
class_dev
)
{
kobj_set_kset_s
(
class_dev
,
class_obj_subsys
);
kobject_init
(
&
class_dev
->
kobj
);
INIT_LIST_HEAD
(
&
class_dev
->
node
);
}
...
...
@@ -277,7 +278,6 @@ int class_device_add(struct class_device *class_dev)
/* first, register with generic layer. */
kobject_set_name
(
&
class_dev
->
kobj
,
class_dev
->
class_id
);
kobj_set_kset_s
(
class_dev
,
class_obj_subsys
);
if
(
parent
)
class_dev
->
kobj
.
parent
=
&
parent
->
subsys
.
kset
.
kobj
;
...
...
drivers/block/as-iosched.c
View file @
6490b110
...
...
@@ -915,9 +915,10 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
if
(
unlikely
(
arq
->
state
!=
AS_RQ_DISPATCHED
))
return
;
if
(
ad
->
changed_batch
&&
ad
->
nr_dispatched
==
1
)
{
WARN_ON
(
ad
->
batch_data_dir
==
arq
->
is_sync
)
;
if
(
!
blk_fs_request
(
rq
))
return
;
if
(
ad
->
changed_batch
&&
ad
->
nr_dispatched
==
1
)
{
kblockd_schedule_work
(
&
ad
->
antic_work
);
ad
->
changed_batch
=
0
;
...
...
@@ -933,7 +934,6 @@ static void as_completed_request(request_queue_t *q, struct request *rq)
* and writeback caches
*/
if
(
ad
->
new_batch
&&
ad
->
batch_data_dir
==
arq
->
is_sync
)
{
WARN_ON
(
ad
->
batch_data_dir
!=
REQ_SYNC
);
update_write_batch
(
ad
);
ad
->
current_batch_expires
=
jiffies
+
ad
->
batch_expire
[
REQ_SYNC
];
...
...
drivers/ide/Kconfig
View file @
6490b110
...
...
@@ -428,9 +428,10 @@ config BLK_DEV_IDEDMA_PCI
if BLK_DEV_IDEDMA_PCI
# TCQ is disabled for now
config BLK_DEV_IDE_TCQ
bool "ATA tagged command queueing (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on EXPERIMENTAL
&& n
help
Support for tagged command queueing on ATA disk drives. This enables
the IDE layer to have multiple in-flight requests on hardware that
...
...
drivers/input/mouse/psmouse-base.c
View file @
6490b110
...
...
@@ -36,12 +36,10 @@ MODULE_PARM(psmouse_resetafter, "i");
MODULE_PARM_DESC
(
psmouse_resetafter
,
"Reset Synaptics Touchpad after so many bad packets (0 = never)."
);
MODULE_LICENSE
(
"GPL"
);
#define PSMOUSE_LOGITECH_SMARTSCROLL 1
static
int
psmouse_noext
;
int
psmouse_resolution
;
unsigned
int
psmouse_rate
;
int
psmouse_smartscroll
=
PSMOUSE_LOGITECH_SMARTSCROLL
;
int
psmouse_resolution
=
200
;
unsigned
int
psmouse_rate
=
100
;
int
psmouse_smartscroll
=
1
;
unsigned
int
psmouse_resetafter
;
static
char
*
psmouse_protocols
[]
=
{
"None"
,
"PS/2"
,
"PS2++"
,
"PS2T++"
,
"GenPS/2"
,
"ImPS/2"
,
"ImExPS/2"
,
"SynPS/2"
};
...
...
@@ -466,22 +464,15 @@ static void psmouse_initialize(struct psmouse *psmouse)
{
unsigned
char
param
[
2
];
/*
* We set the mouse report rate.
* We set the mouse report rate
, resolution and scaling
.
*/
if
(
psmouse_rate
)
if
(
!
psmouse_noext
)
{
psmouse_set_rate
(
psmouse
);
/*
* We also set the resolution and scaling.
*/
if
(
psmouse_resolution
)
psmouse_set_resolution
(
psmouse
);
psmouse_command
(
psmouse
,
NULL
,
PSMOUSE_CMD_SETSCALE11
);
psmouse_command
(
psmouse
,
NULL
,
PSMOUSE_CMD_SETSCALE11
);
}
/*
* We set the mouse into streaming mode.
...
...
drivers/md/raid1.c
View file @
6490b110
...
...
@@ -841,7 +841,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
}
if
(
atomic_dec_and_test
(
&
r1_bio
->
remaining
))
{
md_done_sync
(
mddev
,
r1_bio
->
master_bio
->
bi_size
>>
9
,
0
);
md_done_sync
(
mddev
,
r1_bio
->
master_bio
->
bi_size
>>
9
,
1
);
put_buf
(
r1_bio
);
}
}
...
...
drivers/net/3c509.c
View file @
6490b110
...
...
@@ -74,7 +74,9 @@ static int max_interrupt_work = 10;
#include <linux/config.h>
#include <linux/module.h>
#ifdef CONFIG_MCA
#include <linux/mca.h>
#endif
#include <linux/isapnp.h>
#include <linux/string.h>
#include <linux/interrupt.h>
...
...
drivers/pci/quirks.c
View file @
6490b110
...
...
@@ -646,7 +646,7 @@ static void __init quirk_disable_pxb(struct pci_dev *pdev)
int
interrupt_line_quirk
;
static
void
__init
quirk_via_bridge
(
struct
pci_dev
*
pdev
)
static
void
__
dev
init
quirk_via_bridge
(
struct
pci_dev
*
pdev
)
{
if
(
pdev
->
devfn
==
0
)
interrupt_line_quirk
=
1
;
...
...
drivers/usb/host/ehci-hcd.c
View file @
6490b110
...
...
@@ -426,8 +426,11 @@ static int ehci_start (struct usb_hcd *hcd)
*/
if
(
HCC_64BIT_ADDR
(
hcc_params
))
{
writel
(
0
,
&
ehci
->
regs
->
segment
);
#if 0
// this is deeply broken on almost all architectures
if (!pci_set_dma_mask (ehci->hcd.pdev, 0xffffffffffffffffULL))
ehci_info (ehci, "enabled 64bit PCI DMA\n");
#endif
}
/* help hc dma work well with cachelines */
...
...
drivers/usb/net/kaweth.c
View file @
6490b110
...
...
@@ -1120,8 +1120,11 @@ static int kaweth_probe(
usb_set_intfdata
(
intf
,
kaweth
);
#if 0
// dma_supported() is deeply broken on almost all architectures
if (dma_supported (&intf->dev, 0xffffffffffffffffULL))
kaweth->net->features |= NETIF_F_HIGHDMA;
#endif
SET_NETDEV_DEV
(
netdev
,
&
intf
->
dev
);
if
(
register_netdev
(
netdev
)
!=
0
)
{
...
...
drivers/usb/net/usbnet.c
View file @
6490b110
...
...
@@ -2972,9 +2972,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
strcpy
(
net
->
name
,
"usb%d"
);
memcpy
(
net
->
dev_addr
,
node_id
,
sizeof
node_id
);
#if 0
// dma_supported() is deeply broken on almost all architectures
// possible with some EHCI controllers
if (dma_supported (&udev->dev, 0xffffffffffffffffULL))
net->features |= NETIF_F_HIGHDMA;
#endif
net
->
change_mtu
=
usbnet_change_mtu
;
net
->
get_stats
=
usbnet_get_stats
;
...
...
drivers/usb/storage/usb.c
View file @
6490b110
...
...
@@ -417,10 +417,21 @@ static int usb_stor_control_thread(void * __us)
scsi_unlock
(
host
);
}
/* for (;;) */
/* notify the exit routine that we're actually exiting now */
complete
(
&
(
us
->
notify
));
return
0
;
/* notify the exit routine that we're actually exiting now
*
* complete()/wait_for_completion() is similar to up()/down(),
* except that complete() is safe in the case where the structure
* is getting deleted in a parallel mode of execution (i.e. just
* after the down() -- that's necessary for the thread-shutdown
* case.
*
* complete_and_exit() goes even further than this -- it is safe in
* the case that the thread of the caller is going away (not just
* the structure) -- this is necessary for the module-remove case.
* This is important in preemption kernels, which transfer the flow
* of execution immediately upon a complete().
*/
complete_and_exit
(
&
(
us
->
notify
),
0
);
}
/***********************************************************************
...
...
fs/char_dev.c
View file @
6490b110
...
...
@@ -434,7 +434,7 @@ void cdev_init(struct cdev *cdev, struct file_operations *fops)
static
struct
kobject
*
base_probe
(
dev_t
dev
,
int
*
part
,
void
*
data
)
{
request_module
(
"char-major-%d
"
,
MAJ
OR
(
dev
));
request_module
(
"char-major-%d
-%d"
,
MAJOR
(
dev
),
MIN
OR
(
dev
));
return
NULL
;
}
...
...
fs/cramfs/inode.c
View file @
6490b110
...
...
@@ -18,7 +18,6 @@
#include <linux/string.h>
#include <linux/blkdev.h>
#include <linux/cramfs_fs.h>
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <linux/cramfs_fs_sb.h>
#include <linux/buffer_head.h>
...
...
@@ -206,10 +205,10 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
sb_set_blocksize
(
sb
,
PAGE_CACHE_SIZE
);
/* Invalidate the read buffers on mount: think disk change.. */
down
(
&
read_mutex
);
for
(
i
=
0
;
i
<
READ_BUFFERS
;
i
++
)
buffer_blocknr
[
i
]
=
-
1
;
down
(
&
read_mutex
);
/* Read the first block and get the superblock from it */
memcpy
(
&
super
,
cramfs_read
(
sb
,
0
,
sizeof
(
super
)),
sizeof
(
super
));
up
(
&
read_mutex
);
...
...
@@ -217,7 +216,9 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
/* Do sanity checks on the superblock */
if
(
super
.
magic
!=
CRAMFS_MAGIC
)
{
/* check at 512 byte offset */
down
(
&
read_mutex
);
memcpy
(
&
super
,
cramfs_read
(
sb
,
512
,
sizeof
(
super
)),
sizeof
(
super
));
up
(
&
read_mutex
);
if
(
super
.
magic
!=
CRAMFS_MAGIC
)
{
if
(
!
silent
)
printk
(
KERN_ERR
"cramfs: wrong magic
\n
"
);
...
...
@@ -288,6 +289,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
struct
inode
*
inode
=
filp
->
f_dentry
->
d_inode
;
struct
super_block
*
sb
=
inode
->
i_sb
;
char
*
buf
;
unsigned
int
offset
;
int
copied
;
...
...
@@ -299,18 +301,21 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
if
(
offset
&
3
)
return
-
EINVAL
;
lock_kernel
();
buf
=
kmalloc
(
256
,
GFP_KERNEL
);
if
(
!
buf
)
return
-
ENOMEM
;
copied
=
0
;
while
(
offset
<
inode
->
i_size
)
{
struct
cramfs_inode
*
de
;
unsigned
long
nextoffset
;
char
*
name
;
ino_t
ino
;
mode_t
mode
;
int
namelen
,
error
;
down
(
&
read_mutex
);
de
=
cramfs_read
(
sb
,
OFFSET
(
inode
)
+
offset
,
sizeof
(
*
de
)
+
256
);
up
(
&
read_mutex
);
name
=
(
char
*
)(
de
+
1
);
/*
...
...
@@ -319,17 +324,21 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
* with zeroes.
*/
namelen
=
de
->
namelen
<<
2
;
memcpy
(
buf
,
name
,
namelen
);
ino
=
CRAMINO
(
de
);
mode
=
de
->
mode
;
up
(
&
read_mutex
);
nextoffset
=
offset
+
sizeof
(
*
de
)
+
namelen
;
for
(;;)
{
if
(
!
namelen
)
{
unlock_kernel
(
);
kfree
(
buf
);
return
-
EIO
;
}
if
(
name
[
namelen
-
1
])
if
(
buf
[
namelen
-
1
])
break
;
namelen
--
;
}
error
=
filldir
(
dirent
,
name
,
namelen
,
offset
,
CRAMINO
(
de
),
de
->
mode
>>
12
);
error
=
filldir
(
dirent
,
buf
,
namelen
,
offset
,
ino
,
mode
>>
12
);
if
(
error
)
break
;
...
...
@@ -337,7 +346,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
filp
->
f_pos
=
offset
;
copied
++
;
}
unlock_kernel
(
);
kfree
(
buf
);
return
0
;
}
...
...
@@ -349,16 +358,14 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
unsigned
int
offset
=
0
;
int
sorted
;
lock_kernel
(
);
down
(
&
read_mutex
);
sorted
=
CRAMFS_SB
(
dir
->
i_sb
)
->
flags
&
CRAMFS_FLAG_SORTED_DIRS
;
while
(
offset
<
dir
->
i_size
)
{
struct
cramfs_inode
*
de
;
char
*
name
;
int
namelen
,
retval
;
down
(
&
read_mutex
);
de
=
cramfs_read
(
dir
->
i_sb
,
OFFSET
(
dir
)
+
offset
,
sizeof
(
*
de
)
+
256
);
up
(
&
read_mutex
);
name
=
(
char
*
)(
de
+
1
);
/* Try to take advantage of sorted directories */
...
...
@@ -374,7 +381,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
for
(;;)
{
if
(
!
namelen
)
{
u
nlock_kernel
(
);
u
p
(
&
read_mutex
);
return
ERR_PTR
(
-
EIO
);
}
if
(
name
[
namelen
-
1
])
...
...
@@ -387,15 +394,16 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s
if
(
retval
>
0
)
continue
;
if
(
!
retval
)
{
d_add
(
dentry
,
get_cramfs_inode
(
dir
->
i_sb
,
de
));
unlock_kernel
();
struct
cramfs_inode
entry
=
*
de
;
up
(
&
read_mutex
);
d_add
(
dentry
,
get_cramfs_inode
(
dir
->
i_sb
,
&
entry
));
return
NULL
;
}
/* else (retval < 0) */
if
(
sorted
)
break
;
}
u
nlock_kernel
(
);
u
p
(
&
read_mutex
);
d_add
(
dentry
,
NULL
);
return
NULL
;
}
...
...
@@ -452,6 +460,7 @@ static struct address_space_operations cramfs_aops = {
* A directory can only readdir
*/
static
struct
file_operations
cramfs_directory_operations
=
{
.
llseek
=
generic_file_llseek
,
.
read
=
generic_read_dir
,
.
readdir
=
cramfs_readdir
,
};
...
...
fs/dquot.c
View file @
6490b110
...
...
@@ -128,16 +128,17 @@ static struct quota_format_type *find_quota_format(int id)
if
(
!
actqf
||
!
try_module_get
(
actqf
->
qf_owner
))
{
int
qm
;
spin_unlock
(
&
dq_list_lock
);
for
(
qm
=
0
;
module_names
[
qm
].
qm_fmt_id
&&
module_names
[
qm
].
qm_fmt_id
!=
id
;
qm
++
);
if
(
!
module_names
[
qm
].
qm_fmt_id
||
request_module
(
module_names
[
qm
].
qm_mod_name
))
{
actqf
=
NULL
;
goto
out
;
}
if
(
!
module_names
[
qm
].
qm_fmt_id
||
request_module
(
module_names
[
qm
].
qm_mod_name
))
return
NULL
;
spin_lock
(
&
dq_list_lock
);
for
(
actqf
=
quota_formats
;
actqf
&&
actqf
->
qf_fmt_id
!=
id
;
actqf
=
actqf
->
qf_next
);
if
(
actqf
&&
!
try_module_get
(
actqf
->
qf_owner
))
actqf
=
NULL
;
}
out:
spin_unlock
(
&
dq_list_lock
);
return
actqf
;
}
...
...
fs/ext2/balloc.c
View file @
6490b110
...
...
@@ -402,6 +402,7 @@ int ext2_new_block(struct inode *inode, unsigned long goal,
* Now search the rest of the groups. We assume that
* i and desc correctly point to the last group visited.
*/
retry:
for
(
bit
=
0
;
!
group_alloc
&&
bit
<
sbi
->
s_groups_count
;
bit
++
)
{
group_no
++
;
...
...
@@ -425,11 +426,12 @@ int ext2_new_block(struct inode *inode, unsigned long goal,
ret_block
=
grab_block
(
sb_bgl_lock
(
sbi
,
group_no
),
bitmap_bh
->
b_data
,
group_size
,
0
);
if
(
ret_block
<
0
)
{
ext2_error
(
sb
,
"ext2_new_block"
,
"Free blocks count corrupted for block group %d"
,
group_no
);
/*
* Someone else grabbed the last free block in this blockgroup
* before us. Retry the scan.
*/
group_alloc
=
0
;
goto
io_error
;
goto
retry
;
}
got_block:
...
...
fs/jbd/journal.c
View file @
6490b110
...
...
@@ -1729,8 +1729,18 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
J_ASSERT_BH
(
bh
,
buffer_jbd
(
bh
));
J_ASSERT_BH
(
bh
,
jh2bh
(
jh
)
==
bh
);
BUFFER_TRACE
(
bh
,
"remove journal_head"
);
J_ASSERT_BH
(
bh
,
!
jh
->
b_frozen_data
);
J_ASSERT_BH
(
bh
,
!
jh
->
b_committed_data
);
if
(
jh
->
b_frozen_data
)
{
printk
(
KERN_WARNING
"%s: freeing "
"b_frozen_data
\n
"
,
__FUNCTION__
);
kfree
(
jh
->
b_frozen_data
);
}
if
(
jh
->
b_committed_data
)
{
printk
(
KERN_WARNING
"%s: freeing "
"b_committed_data
\n
"
,
__FUNCTION__
);
kfree
(
jh
->
b_committed_data
);
}
bh
->
b_private
=
NULL
;
jh
->
b_bh
=
NULL
;
/* debug, really */
clear_buffer_jbd
(
bh
);
...
...
fs/nfsd/vfs.c
View file @
6490b110
...
...
@@ -1368,7 +1368,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
nfsd_sync_dir
(
tdentry
);
nfsd_sync_dir
(
fdentry
);
}
dput
(
ndentry
);
out_dput_new:
dput
(
ndentry
);
...
...
include/asm-i386/checksum.h
View file @
6490b110
...
...
@@ -83,7 +83,8 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
:
"=r"
(
sum
),
"=r"
(
iph
),
"=r"
(
ihl
)
:
"1"
(
iph
),
"2"
(
ihl
));
:
"1"
(
iph
),
"2"
(
ihl
)
:
"memory"
);
return
(
sum
);
}
...
...
include/asm-x86_64/checksum.h
View file @
6490b110
...
...
@@ -68,7 +68,8 @@ static inline unsigned short ip_fast_csum(unsigned char *iph, unsigned int ihl)
are modified, we must also specify them as outputs, or gcc
will assume they contain their original values. */
:
"=r"
(
sum
),
"=r"
(
iph
),
"=r"
(
ihl
)
:
"1"
(
iph
),
"2"
(
ihl
));
:
"1"
(
iph
),
"2"
(
ihl
)
:
"memory"
);
return
(
sum
);
}
...
...
include/asm-x86_64/desc.h
View file @
6490b110
...
...
@@ -118,7 +118,7 @@ static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned
d
.
base1
=
PTR_MIDDLE
(
tss
)
&
0xFF
;
d
.
type
=
type
;
d
.
p
=
1
;
d
.
limit1
=
0xF
;
d
.
limit1
=
(
size
>>
16
)
&
0xF
;
d
.
base2
=
(
PTR_MIDDLE
(
tss
)
>>
8
)
&
0xFF
;
d
.
base3
=
PTR_HIGH
(
tss
);
memcpy
(
ptr
,
&
d
,
16
);
...
...
include/linux/sched.h
View file @
6490b110
...
...
@@ -574,7 +574,11 @@ extern void do_timer(struct pt_regs *);
extern
int
FASTCALL
(
wake_up_state
(
struct
task_struct
*
tsk
,
unsigned
int
state
));
extern
int
FASTCALL
(
wake_up_process
(
struct
task_struct
*
tsk
));
extern
int
FASTCALL
(
wake_up_process_kick
(
struct
task_struct
*
tsk
));
#ifdef CONFIG_SMP
extern
void
FASTCALL
(
kick_process
(
struct
task_struct
*
tsk
));
#else
static
inline
void
kick_process
(
struct
task_struct
*
tsk
)
{
}
#endif
extern
void
FASTCALL
(
wake_up_forked_process
(
struct
task_struct
*
tsk
));
extern
void
FASTCALL
(
sched_exit
(
task_t
*
p
));
...
...
include/linux/signal.h
View file @
6490b110
...
...
@@ -214,7 +214,7 @@ extern int sigprocmask(int, sigset_t *, sigset_t *);
struct
pt_regs
;
extern
int
get_signal_to_deliver
(
siginfo_t
*
info
,
struct
pt_regs
*
regs
,
void
*
cookie
);
#endif
#define FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
#endif
/* __KERNEL__ */
#endif
/* _LINUX_SIGNAL_H */
include/linux/times.h
View file @
6490b110
...
...
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <asm/div64.h>
#include <asm/types.h>
#include <asm/param.h>
#if (HZ % USER_HZ)==0
# define jiffies_to_clock_t(x) ((x) / (HZ / USER_HZ))
...
...
kernel/posix-timers.c
View file @
6490b110
...
...
@@ -1104,29 +1104,6 @@ long clock_nanosleep_restart(struct restart_block *restart_block);
extern
long
do_clock_nanosleep
(
clockid_t
which_clock
,
int
flags
,
struct
timespec
*
t
);
#ifdef FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
asmlinkage
long
sys_nanosleep
(
struct
timespec
__user
*
rqtp
,
struct
timespec
__user
*
rmtp
)
{
struct
timespec
t
;
long
ret
;
if
(
copy_from_user
(
&
t
,
rqtp
,
sizeof
(
t
)))
return
-
EFAULT
;
if
((
unsigned
)
t
.
tv_nsec
>=
NSEC_PER_SEC
||
t
.
tv_sec
<
0
)
return
-
EINVAL
;
ret
=
do_clock_nanosleep
(
CLOCK_REALTIME
,
0
,
&
t
);
if
(
ret
==
-
ERESTART_RESTARTBLOCK
&&
rmtp
&&
copy_to_user
(
rmtp
,
&
t
,
sizeof
(
t
)))
return
-
EFAULT
;
return
ret
;
}
#endif // ! FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
asmlinkage
long
sys_clock_nanosleep
(
clockid_t
which_clock
,
int
flags
,
const
struct
timespec
__user
*
rqtp
,
...
...
@@ -1244,7 +1221,7 @@ do_clock_nanosleep(clockid_t which_clock, int flags, struct timespec *tsave)
return
0
;
}
/*
* This will restart
either clock_nanosleep or clock_nanosleep
* This will restart
clock_nanosleep. Incorrectly, btw.
*/
long
clock_nanosleep_restart
(
struct
restart_block
*
restart_block
)
...
...
kernel/sched.c
View file @
6490b110
...
...
@@ -530,6 +530,15 @@ static inline void resched_task(task_t *p)
#endif
}
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
inline
int
task_curr
(
task_t
*
p
)
{
return
cpu_curr
(
task_cpu
(
p
))
==
p
;
}
#ifdef CONFIG_SMP
/*
...
...
@@ -568,6 +577,27 @@ void wait_task_inactive(task_t * p)
task_rq_unlock
(
rq
,
&
flags
);
preempt_enable
();
}
/***
* kick_process - kick a running thread to enter/exit the kernel
* @p: the to-be-kicked thread
*
* Cause a process which is running on another CPU to enter
* kernel-mode, without any delay. (to get signals handled.)
*/
void
kick_process
(
task_t
*
p
)
{
int
cpu
;
preempt_disable
();
cpu
=
task_cpu
(
p
);
if
((
cpu
!=
smp_processor_id
())
&&
task_curr
(
p
))
smp_send_reschedule
(
cpu
);
preempt_enable
();
}
EXPORT_SYMBOL_GPL
(
kick_process
);
#endif
/***
...
...
@@ -575,7 +605,6 @@ void wait_task_inactive(task_t * p)
* @p: the to-be-woken-up thread
* @state: the mask of task states that can be woken
* @sync: do a synchronous wakeup?
* @kick: kick the CPU if the task is already running?
*
* Put it on the run-queue if it's not already there. The "current"
* thread is always on the run-queue (except when the actual
...
...
@@ -585,7 +614,7 @@ void wait_task_inactive(task_t * p)
*
* returns failure only if the task is already active.
*/
static
int
try_to_wake_up
(
task_t
*
p
,
unsigned
int
state
,
int
sync
,
int
kick
)
static
int
try_to_wake_up
(
task_t
*
p
,
unsigned
int
state
,
int
sync
)
{
unsigned
long
flags
;
int
success
=
0
;
...
...
@@ -626,33 +655,22 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync, int kick)
}
success
=
1
;
}
#ifdef CONFIG_SMP
else
if
(
unlikely
(
kick
)
&&
task_running
(
rq
,
p
)
&&
(
task_cpu
(
p
)
!=
smp_processor_id
()))
smp_send_reschedule
(
task_cpu
(
p
));
#endif
p
->
state
=
TASK_RUNNING
;
}
task_rq_unlock
(
rq
,
&
flags
);
return
success
;
}
int
wake_up_process
(
task_t
*
p
)
{
return
try_to_wake_up
(
p
,
TASK_STOPPED
|
TASK_INTERRUPTIBLE
|
TASK_UNINTERRUPTIBLE
,
0
,
0
);
return
try_to_wake_up
(
p
,
TASK_STOPPED
|
TASK_INTERRUPTIBLE
|
TASK_UNINTERRUPTIBLE
,
0
);
}
EXPORT_SYMBOL
(
wake_up_process
);
int
wake_up_process_kick
(
task_t
*
p
)
{
return
try_to_wake_up
(
p
,
TASK_STOPPED
|
TASK_INTERRUPTIBLE
|
TASK_UNINTERRUPTIBLE
,
0
,
1
);
}
int
wake_up_state
(
task_t
*
p
,
unsigned
int
state
)
{
return
try_to_wake_up
(
p
,
state
,
0
,
0
);
return
try_to_wake_up
(
p
,
state
,
0
);
}
/*
...
...
@@ -1621,7 +1639,7 @@ EXPORT_SYMBOL(preempt_schedule);
int
default_wake_function
(
wait_queue_t
*
curr
,
unsigned
mode
,
int
sync
)
{
task_t
*
p
=
curr
->
task
;
return
try_to_wake_up
(
p
,
mode
,
sync
,
0
);
return
try_to_wake_up
(
p
,
mode
,
sync
);
}
EXPORT_SYMBOL
(
default_wake_function
);
...
...
@@ -1941,15 +1959,6 @@ int task_nice(task_t *p)
EXPORT_SYMBOL
(
task_nice
);
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
*/
int
task_curr
(
task_t
*
p
)
{
return
cpu_curr
(
task_cpu
(
p
))
==
p
;
}
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
...
...
kernel/signal.c
View file @
6490b110
...
...
@@ -538,8 +538,9 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
inline
void
signal_wake_up
(
struct
task_struct
*
t
,
int
resume
)
{
unsigned
int
mask
;
int
woken
;
set_tsk_thread_flag
(
t
,
TIF_SIGPENDING
);
set_tsk_thread_flag
(
t
,
TIF_SIGPENDING
);
/*
* If resume is set, we want to wake it up in the TASK_STOPPED case.
...
...
@@ -551,10 +552,11 @@ inline void signal_wake_up(struct task_struct *t, int resume)
mask
=
TASK_INTERRUPTIBLE
;
if
(
resume
)
mask
|=
TASK_STOPPED
;
if
(
t
->
state
&
mask
)
{
wake_up_process_kick
(
t
);
return
;
}
woken
=
0
;
if
(
t
->
state
&
mask
)
woken
=
wake_up_state
(
t
,
mask
);
if
(
!
woken
)
kick_process
(
t
);
}
/*
...
...
kernel/timer.c
View file @
6490b110
...
...
@@ -1059,7 +1059,6 @@ asmlinkage long sys_gettid(void)
{
return
current
->
pid
;
}
#ifndef FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
static
long
nanosleep_restart
(
struct
restart_block
*
restart
)
{
...
...
@@ -1118,7 +1117,6 @@ asmlinkage long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp)
}
return
ret
;
}
#endif // ! FOLD_NANO_SLEEP_INTO_CLOCK_NANO_SLEEP
/*
* sys_sysinfo - fill in sysinfo struct
...
...
net/core/flow.c
View file @
6490b110
...
...
@@ -19,6 +19,7 @@
#include <linux/bitops.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <net/flow.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
...
...
@@ -65,7 +66,7 @@ static struct timer_list flow_hash_rnd_timer;
struct
flow_flush_info
{
atomic_t
cpuleft
;
unsigned
long
cpumap
;
cpumask_t
cpumap
;
struct
completion
completion
;
};
static
DEFINE_PER_CPU
(
struct
tasklet_struct
,
flow_flush_tasklets
)
=
{
NULL
};
...
...
@@ -73,7 +74,7 @@ static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
static
DECLARE_MUTEX
(
flow_cache_cpu_sem
);
static
unsigned
long
flow_cache_cpu_map
;
static
cpumask_t
flow_cache_cpu_map
;
static
unsigned
int
flow_cache_cpu_count
;
static
void
flow_cache_new_hashrnd
(
unsigned
long
arg
)
...
...
@@ -81,7 +82,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
int
i
;
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
if
(
test_bit
(
i
,
&
flow_cache_cpu_map
))
if
(
cpu_isset
(
i
,
flow_cache_cpu_map
))
flow_hash_rnd_recalc
(
i
)
=
1
;
flow_hash_rnd_timer
.
expires
=
jiffies
+
FLOW_HASH_RND_PERIOD
;
...
...
@@ -178,7 +179,7 @@ void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
cpu
=
smp_processor_id
();
fle
=
NULL
;
if
(
!
test_bit
(
cpu
,
&
flow_cache_cpu_map
))
if
(
!
cpu_isset
(
cpu
,
flow_cache_cpu_map
))
goto
nocache
;
if
(
flow_hash_rnd_recalc
(
cpu
))
...
...
@@ -277,7 +278,7 @@ static void flow_cache_flush_per_cpu(void *data)
struct
tasklet_struct
*
tasklet
;
cpu
=
smp_processor_id
();
if
(
!
test_bit
(
cpu
,
&
info
->
cpumap
))
if
(
!
cpu_isset
(
cpu
,
info
->
cpumap
))
return
;
tasklet
=
flow_flush_tasklet
(
cpu
);
...
...
@@ -301,7 +302,7 @@ void flow_cache_flush(void)
local_bh_disable
();
smp_call_function
(
flow_cache_flush_per_cpu
,
&
info
,
1
,
0
);
if
(
test_bit
(
smp_processor_id
(),
&
info
.
cpumap
))
if
(
cpu_isset
(
smp_processor_id
(),
info
.
cpumap
))
flow_cache_flush_tasklet
((
unsigned
long
)
&
info
);
local_bh_enable
();
...
...
@@ -341,7 +342,7 @@ static int __devinit flow_cache_cpu_prepare(int cpu)
static
int
__devinit
flow_cache_cpu_online
(
int
cpu
)
{
down
(
&
flow_cache_cpu_sem
);
set_bit
(
cpu
,
&
flow_cache_cpu_map
);
cpu_set
(
cpu
,
flow_cache_cpu_map
);
flow_cache_cpu_count
++
;
up
(
&
flow_cache_cpu_sem
);
...
...
net/core/link_watch.c
View file @
6490b110
...
...
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
#include <linux/jiffies.h>
#include <linux/spinlock.h>
...
...
@@ -91,9 +92,11 @@ static void linkwatch_event(void *dummy)
linkwatch_nextevent
=
jiffies
+
HZ
;
clear_bit
(
LW_RUNNING
,
&
linkwatch_flags
);
rtnl_lock
();
rtnl_shlock
();
rtnl_exlock
();
linkwatch_run_queue
();
rtnl_unlock
();
rtnl_exunlock
();
rtnl_shunlock
();
}
...
...
net/core/skbuff.c
View file @
6490b110
...
...
@@ -595,10 +595,10 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
head_copy_len
=
skb_headroom
(
skb
);
head_copy_off
=
0
;
if
(
newheadroom
<
head_copy_len
)
{
head_copy_off
=
head_copy_len
-
newheadroom
;
if
(
newheadroom
<=
head_copy_len
)
head_copy_len
=
newheadroom
;
}
else
head_copy_off
=
newheadroom
-
head_copy_len
;
/* Copy the linear header and data. */
if
(
skb_copy_bits
(
skb
,
-
head_copy_len
,
n
->
head
+
head_copy_off
,
...
...
net/ipv4/netfilter/arpt_mangle.c
View file @
6490b110
...
...
@@ -4,8 +4,8 @@
#include <net/sock.h>
MODULE_LICENSE
(
"GPL"
);
MODULE_AUTHOR
(
"
David S. Miller <davem@redhat.com
>"
);
MODULE_DESCRIPTION
(
"arptables
mangle table
"
);
MODULE_AUTHOR
(
"
Bart De Schuymer <bdschuym@pandora.be
>"
);
MODULE_DESCRIPTION
(
"arptables
arp payload mangle target
"
);
static
unsigned
int
target
(
struct
sk_buff
**
pskb
,
unsigned
int
hooknum
,
const
struct
net_device
*
in
,
...
...
net/ipv4/route.c
View file @
6490b110
...
...
@@ -89,6 +89,7 @@
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
...
...
@@ -2309,7 +2310,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
ci
.
rta_used
=
rt
->
u
.
dst
.
__use
;
ci
.
rta_clntref
=
atomic_read
(
&
rt
->
u
.
dst
.
__refcnt
);
if
(
rt
->
u
.
dst
.
expires
)
ci
.
rta_expires
=
rt
->
u
.
dst
.
expires
-
jiffies
;
ci
.
rta_expires
=
jiffies_to_clock_t
(
rt
->
u
.
dst
.
expires
-
jiffies
)
;
else
ci
.
rta_expires
=
0
;
ci
.
rta_error
=
rt
->
u
.
dst
.
error
;
...
...
net/ipv4/tcp_ipv4.c
View file @
6490b110
...
...
@@ -61,6 +61,7 @@
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
#include <net/icmp.h>
#include <net/tcp.h>
...
...
@@ -2490,7 +2491,7 @@ static void get_openreq4(struct sock *sk, struct open_request *req,
TCP_SYN_RECV
,
0
,
0
,
/* could print option size, but that is af dependent. */
1
,
/* timers active (only the expire timer) */
ttd
,
jiffies_to_clock_t
(
ttd
)
,
req
->
retrans
,
uid
,
0
,
/* non standard timer */
...
...
@@ -2528,7 +2529,8 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i)
"%08X %5d %8d %lu %d %p %u %u %u %u %d"
,
i
,
src
,
srcp
,
dest
,
destp
,
sp
->
sk_state
,
tp
->
write_seq
-
tp
->
snd_una
,
tp
->
rcv_nxt
-
tp
->
copied_seq
,
timer_active
,
timer_expires
-
jiffies
,
timer_active
,
jiffies_to_clock_t
(
timer_expires
-
jiffies
),
tp
->
retransmits
,
sock_i_uid
(
sp
),
tp
->
probes_out
,
...
...
@@ -2556,7 +2558,7 @@ static void get_timewait4_sock(struct tcp_tw_bucket *tw, char *tmpbuf, int i)
sprintf
(
tmpbuf
,
"%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08X %08X %5d %8d %d %d %p"
,
i
,
src
,
srcp
,
dest
,
destp
,
tw
->
tw_substate
,
0
,
0
,
3
,
ttd
,
0
,
0
,
0
,
0
,
3
,
jiffies_to_clock_t
(
ttd
)
,
0
,
0
,
0
,
0
,
atomic_read
(
&
tw
->
tw_refcnt
),
tw
);
}
...
...
net/ipv6/addrconf.c
View file @
6490b110
...
...
@@ -571,15 +571,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
ifp
->
dead
=
1
;
#ifdef CONFIG_IPV6_PRIVACY
spin_lock_bh
(
&
ifp
->
lock
);
if
(
ifp
->
ifpub
)
{
__in6_ifa_put
(
ifp
->
ifpub
);
ifp
->
ifpub
=
NULL
;
}
spin_unlock_bh
(
&
ifp
->
lock
);
#endif
write_lock_bh
(
&
addrconf_hash_lock
);
for
(
ifap
=
&
inet6_addr_lst
[
hash
];
(
ifa
=*
ifap
)
!=
NULL
;
ifap
=
&
ifa
->
lst_next
)
{
...
...
@@ -600,7 +591,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
if
(
ifa
==
ifp
)
{
*
ifap
=
ifa
->
tmp_next
;
if
(
ifp
->
ifpub
)
{
__
in6_ifa_put
(
ifp
->
ifpub
);
in6_ifa_put
(
ifp
->
ifpub
);
ifp
->
ifpub
=
NULL
;
}
__in6_ifa_put
(
ifp
);
...
...
net/ipv6/route.c
View file @
6490b110
...
...
@@ -27,6 +27,7 @@
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
...
...
@@ -717,7 +718,7 @@ int ip6_route_add(struct in6_rtmsg *rtmsg, struct nlmsghdr *nlh, void *_rtattr)
return
-
ENOMEM
;
rt
->
u
.
dst
.
obsolete
=
-
1
;
rt
->
rt6i_expires
=
rtmsg
->
rtmsg_info
;
rt
->
rt6i_expires
=
clock_t_to_jiffies
(
rtmsg
->
rtmsg_info
)
;
if
(
nlh
&&
(
r
=
NLMSG_DATA
(
nlh
)))
{
rt
->
rt6i_protocol
=
r
->
rtm_protocol
;
}
else
{
...
...
@@ -1535,7 +1536,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
RTA_PUT
(
skb
,
RTA_PRIORITY
,
4
,
&
rt
->
rt6i_metric
);
ci
.
rta_lastuse
=
jiffies
-
rt
->
u
.
dst
.
lastuse
;
if
(
rt
->
rt6i_expires
)
ci
.
rta_expires
=
rt
->
rt6i_expires
-
jiffies
;
ci
.
rta_expires
=
jiffies_to_clock_t
(
rt
->
rt6i_expires
-
jiffies
)
;
else
ci
.
rta_expires
=
0
;
ci
.
rta_used
=
rt
->
u
.
dst
.
__use
;
...
...
net/ipv6/tcp_ipv6.c
View file @
6490b110
...
...
@@ -39,6 +39,7 @@
#include <linux/init.h>
#include <linux/jhash.h>
#include <linux/ipsec.h>
#include <linux/times.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
...
...
@@ -1941,7 +1942,7 @@ static void get_openreq6(struct seq_file *seq,
TCP_SYN_RECV
,
0
,
0
,
/* could print option size, but that is af dependent. */
1
,
/* timers active (only the expire timer) */
ttd
,
jiffies_to_clock_t
(
ttd
)
,
req
->
retrans
,
uid
,
0
,
/* non standard timer */
...
...
@@ -1987,7 +1988,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
dest
->
s6_addr32
[
2
],
dest
->
s6_addr32
[
3
],
destp
,
sp
->
sk_state
,
tp
->
write_seq
-
tp
->
snd_una
,
tp
->
rcv_nxt
-
tp
->
copied_seq
,
timer_active
,
timer_expires
-
jiffies
,
timer_active
,
jiffies_to_clock_t
(
timer_expires
-
jiffies
),
tp
->
retransmits
,
sock_i_uid
(
sp
),
tp
->
probes_out
,
...
...
@@ -2022,7 +2024,7 @@ static void get_timewait6_sock(struct seq_file *seq,
dest
->
s6_addr32
[
0
],
dest
->
s6_addr32
[
1
],
dest
->
s6_addr32
[
2
],
dest
->
s6_addr32
[
3
],
destp
,
tw
->
tw_substate
,
0
,
0
,
3
,
ttd
,
0
,
0
,
0
,
0
,
3
,
jiffies_to_clock_t
(
ttd
)
,
0
,
0
,
0
,
0
,
atomic_read
(
&
tw
->
tw_refcnt
),
tw
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment