Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d51c905a
Commit
d51c905a
authored
Feb 04, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
v2.4.10.0.1 -> v2.4.10.0.2
- more buffers-in-pagecache coherency
parent
a41cd6e4
Changes
14
Show whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
103 additions
and
175 deletions
+103
-175
Makefile
Makefile
+2
-2
arch/i386/kernel/pci-pc.c
arch/i386/kernel/pci-pc.c
+3
-7
drivers/net/ppp_generic.c
drivers/net/ppp_generic.c
+6
-5
drivers/net/pppoe.c
drivers/net/pppoe.c
+5
-1
drivers/usb/usb-uhci.c
drivers/usb/usb-uhci.c
+3
-2
fs/block_dev.c
fs/block_dev.c
+14
-16
fs/buffer.c
fs/buffer.c
+36
-75
include/linux/fs.h
include/linux/fs.h
+0
-5
include/linux/list.h
include/linux/list.h
+0
-1
include/linux/mm.h
include/linux/mm.h
+7
-6
include/linux/slab.h
include/linux/slab.h
+1
-1
mm/filemap.c
mm/filemap.c
+22
-52
mm/memory.c
mm/memory.c
+3
-1
mm/page_alloc.c
mm/page_alloc.c
+1
-1
No files found.
Makefile
View file @
d51c905a
VERSION
=
2
PATCHLEVEL
=
4
SUBLEVEL
=
1
0
EXTRAVERSION
=
SUBLEVEL
=
1
1
EXTRAVERSION
=
-pre1
KERNELRELEASE
=
$(VERSION)
.
$(PATCHLEVEL)
.
$(SUBLEVEL)$(EXTRAVERSION)
...
...
arch/i386/kernel/pci-pc.c
View file @
d51c905a
...
...
@@ -261,18 +261,14 @@ static int pci_conf2_read_config_word(struct pci_dev *dev, int where, u16 *value
u32
data
;
result
=
pci_conf2_read
(
0
,
dev
->
bus
->
number
,
PCI_SLOT
(
dev
->
devfn
),
PCI_FUNC
(
dev
->
devfn
),
where
,
2
,
&
data
);
*
value
=
(
u
8
)
data
;
*
value
=
(
u
16
)
data
;
return
result
;
}
static
int
pci_conf2_read_config_dword
(
struct
pci_dev
*
dev
,
int
where
,
u32
*
value
)
{
int
result
;
u32
data
;
result
=
pci_conf2_read
(
0
,
dev
->
bus
->
number
,
PCI_SLOT
(
dev
->
devfn
),
PCI_FUNC
(
dev
->
devfn
),
where
,
4
,
&
data
);
*
value
=
(
u8
)
data
;
return
result
;
return
pci_conf2_read
(
0
,
dev
->
bus
->
number
,
PCI_SLOT
(
dev
->
devfn
),
PCI_FUNC
(
dev
->
devfn
),
where
,
4
,
value
);
}
static
int
pci_conf2_write_config_byte
(
struct
pci_dev
*
dev
,
int
where
,
u8
value
)
...
...
drivers/net/ppp_generic.c
View file @
d51c905a
...
...
@@ -2105,13 +2105,12 @@ ppp_register_compressor(struct compressor *cp)
{
struct
compressor_entry
*
ce
;
int
ret
;
spin_lock
(
&
compressor_list_lock
);
ret
=
-
EEXIST
;
if
(
find_comp_entry
(
cp
->
compress_proto
)
!=
0
)
goto
out
;
ret
=
-
ENOMEM
;
ce
=
kmalloc
(
sizeof
(
struct
compressor_entry
),
GFP_
KERNEL
);
ce
=
kmalloc
(
sizeof
(
struct
compressor_entry
),
GFP_
ATOMIC
);
if
(
ce
==
0
)
goto
out
;
ret
=
0
;
...
...
@@ -2216,11 +2215,11 @@ ppp_create_interface(int unit, int *retp)
/* Create a new ppp structure and link it before `list'. */
ret
=
-
ENOMEM
;
ppp
=
kmalloc
(
sizeof
(
struct
ppp
),
GFP_
KERNEL
);
ppp
=
kmalloc
(
sizeof
(
struct
ppp
),
GFP_
ATOMIC
);
if
(
ppp
==
0
)
goto
out
;
memset
(
ppp
,
0
,
sizeof
(
struct
ppp
));
dev
=
kmalloc
(
sizeof
(
struct
net_device
),
GFP_
KERNEL
);
dev
=
kmalloc
(
sizeof
(
struct
net_device
),
GFP_
ATOMIC
);
if
(
dev
==
0
)
{
kfree
(
ppp
);
goto
out
;
...
...
@@ -2285,6 +2284,7 @@ init_ppp_file(struct ppp_file *pf, int kind)
static
void
ppp_destroy_interface
(
struct
ppp
*
ppp
)
{
struct
net_device
*
dev
;
int
n_channels
;
spin_lock
(
&
all_ppp_lock
);
list_del
(
&
ppp
->
file
.
list
);
...
...
@@ -2314,6 +2314,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
#endif
/* CONFIG_PPP_FILTER */
dev
=
ppp
->
dev
;
ppp
->
dev
=
0
;
n_channels
=
ppp
->
n_channels
;
ppp_unlock
(
ppp
);
if
(
dev
)
{
...
...
@@ -2329,7 +2330,7 @@ static void ppp_destroy_interface(struct ppp *ppp)
* ppp structure. Otherwise we leave it around until the
* last channel disconnects from it.
*/
if
(
ppp
->
n_channels
==
0
)
if
(
n_channels
==
0
)
kfree
(
ppp
);
spin_unlock
(
&
all_ppp_lock
);
...
...
drivers/net/pppoe.c
View file @
d51c905a
...
...
@@ -541,12 +541,16 @@ int pppoe_release(struct socket *sock)
sk
->
state
=
PPPOX_DEAD
;
po
=
sk
->
protinfo
.
pppox
;
if
(
po
->
pppoe_pa
.
sid
)
if
(
po
->
pppoe_pa
.
sid
)
{
delete_item
(
po
->
pppoe_pa
.
sid
,
po
->
pppoe_pa
.
remote
);
po
->
pppoe_pa
.
sid
=
0
;
}
if
(
po
->
pppoe_dev
)
dev_put
(
po
->
pppoe_dev
);
po
->
pppoe_dev
=
NULL
;
sock_orphan
(
sk
);
sock
->
sk
=
NULL
;
...
...
drivers/usb/usb-uhci.c
View file @
d51c905a
...
...
@@ -2528,7 +2528,7 @@ _static int process_iso (uhci_t *s, urb_t *urb, int mode)
int
i
;
int
ret
=
0
;
urb_priv_t
*
urb_priv
=
urb
->
hcpriv
;
struct
list_head
*
p
=
urb_priv
->
desc_list
.
next
;
struct
list_head
*
p
=
urb_priv
->
desc_list
.
next
,
*
p_tmp
;
uhci_desc_t
*
desc
=
list_entry
(
urb_priv
->
desc_list
.
prev
,
uhci_desc_t
,
desc_list
);
dbg
(
"urb contains iso request"
);
...
...
@@ -2578,8 +2578,9 @@ _static int process_iso (uhci_t *s, urb_t *urb, int mode)
dbg
(
"process_iso: %i: len:%d %08x status:%x"
,
i
,
urb
->
iso_frame_desc
[
i
].
actual_length
,
le32_to_cpu
(
desc
->
hw
.
td
.
status
),
urb
->
iso_frame_desc
[
i
].
status
);
list_del
(
p
)
;
p_tmp
=
p
;
p
=
p
->
next
;
list_del
(
p_tmp
);
delete_desc
(
s
,
desc
);
}
...
...
fs/block_dev.c
View file @
d51c905a
...
...
@@ -67,6 +67,17 @@ static unsigned int max_block(kdev_t dev)
return
retval
;
}
static
loff_t
blkdev_size
(
kdev_t
dev
)
{
unsigned
int
blocks
=
~
0U
;
int
major
=
MAJOR
(
dev
);
if
(
blk_size
[
major
])
{
int
minor
=
MINOR
(
dev
);
blocks
=
blk_size
[
major
][
minor
];
}
return
(
loff_t
)
blocks
<<
BLOCK_SIZE_BITS
;
}
static
inline
int
blkdev_get_block
(
struct
inode
*
inode
,
long
iblock
,
struct
buffer_head
*
bh_result
)
{
...
...
@@ -308,7 +319,6 @@ static int __blkdev_commit_write(struct inode *inode, struct page *page,
set_bit
(
BH_Uptodate
,
&
bh
->
b_state
);
if
(
!
atomic_set_buffer_dirty
(
bh
))
{
__mark_dirty
(
bh
);
buffer_insert_inode_data_queue
(
bh
,
inode
);
need_balance_dirty
=
1
;
}
}
...
...
@@ -404,6 +414,7 @@ static struct super_block *bd_read_super(struct super_block *sb, void *data, int
root
->
i_mode
=
S_IFDIR
|
S_IRUSR
|
S_IWUSR
;
root
->
i_uid
=
root
->
i_gid
=
0
;
root
->
i_atime
=
root
->
i_mtime
=
root
->
i_ctime
=
CURRENT_TIME
;
sb
->
s_maxbytes
=
~
0ULL
;
sb
->
s_blocksize
=
1024
;
sb
->
s_blocksize_bits
=
10
;
sb
->
s_magic
=
0x62646576
;
...
...
@@ -521,9 +532,11 @@ struct block_device *bdget(dev_t dev)
new_bdev
->
bd_dev
=
dev
;
new_bdev
->
bd_op
=
NULL
;
new_bdev
->
bd_inode
=
inode
;
inode
->
i_size
=
blkdev_size
(
dev
);
inode
->
i_rdev
=
to_kdev_t
(
dev
);
inode
->
i_bdev
=
new_bdev
;
inode
->
i_data
.
a_ops
=
&
def_blk_aops
;
inode
->
i_data
.
gfp_mask
=
GFP_USER
;
spin_lock
(
&
bdev_lock
);
bdev
=
bdfind
(
dev
,
head
);
if
(
!
bdev
)
{
...
...
@@ -810,22 +823,7 @@ int blkdev_put(struct block_device *bdev, int kind)
down
(
&
bdev
->
bd_sem
);
lock_kernel
();
if
(
kind
==
BDEV_FILE
)
{
struct
super_block
*
sb
;
__block_fsync
(
bd_inode
);
/* Janitorianism: this shit must go away */
sb
=
get_super
(
bd_inode
->
i_rdev
);
if
(
sb
)
{
if
(
sb
->
s_flags
&
MS_RDONLY
)
{
shrink_dcache_sb
(
sb
);
invalidate_inodes
(
sb
);
invalidate_buffers
(
bd_inode
->
i_rdev
);
}
lock_super
(
sb
);
unlock_super
(
sb
);
drop_super
(
sb
);
}
}
else
if
(
kind
==
BDEV_FS
)
fsync_no_super
(
rdev
);
if
(
!--
bdev
->
bd_openers
)
{
...
...
fs/buffer.c
View file @
d51c905a
...
...
@@ -52,22 +52,13 @@
#include <asm/bitops.h>
#include <asm/mmu_context.h>
#define NR_SIZES 7
static
char
buffersize_index
[
65
]
=
{
-
1
,
0
,
1
,
-
1
,
2
,
-
1
,
-
1
,
-
1
,
3
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
4
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
5
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
-
1
,
6
};
#define BUFSIZE_INDEX(X) ((int) buffersize_index[(X)>>9])
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
#define NR_RESERVED (10*MAX_BUF_PER_PAGE)
#define MAX_UNUSED_BUFFERS NR_RESERVED+20
/* don't ever have more than this
number of unused buffer heads */
/* Anti-deadlock ordering:
* lru_list_lock > hash_table_lock >
free_list_lock >
unused_list_lock
* lru_list_lock > hash_table_lock > unused_list_lock
*/
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_inode_buffers)
...
...
@@ -80,6 +71,11 @@ static unsigned int bh_hash_shift;
static
struct
buffer_head
**
hash_table
;
static
rwlock_t
hash_table_lock
=
RW_LOCK_UNLOCKED
;
#define BUF_CLEAN 0
#define BUF_LOCKED 1
/* Buffers scheduled for write */
#define BUF_DIRTY 2
/* Dirty buffers, not yet scheduled for write */
#define NR_LIST 3
static
struct
buffer_head
*
lru_list
[
NR_LIST
];
static
spinlock_t
lru_list_lock
=
SPIN_LOCK_UNLOCKED
;
static
int
nr_buffers_type
[
NR_LIST
];
...
...
@@ -90,14 +86,8 @@ static int nr_unused_buffer_heads;
static
spinlock_t
unused_list_lock
=
SPIN_LOCK_UNLOCKED
;
static
DECLARE_WAIT_QUEUE_HEAD
(
buffer_wait
);
struct
bh_free_head
{
struct
buffer_head
*
list
;
spinlock_t
lock
;
};
static
struct
bh_free_head
free_list
[
NR_SIZES
];
static
void
truncate_buffers
(
kdev_t
dev
);
static
int
grow_buffers
(
kdev_t
dev
,
int
block
,
int
size
);
static
int
grow_buffers
(
kdev_t
dev
,
unsigned
long
block
,
int
size
);
static
void
__refile_buffer
(
struct
buffer_head
*
);
/* This is used by some architectures to estimate available memory. */
...
...
@@ -482,12 +472,16 @@ asmlinkage long sys_fdatasync(unsigned int fd)
((block) << (bh_hash_shift - 12))))
#define hash(dev,block) hash_table[(_hashfn(HASHDEV(dev),block) & bh_hash_mask)]
static
__inline__
void
__hash_link
(
struct
buffer_head
*
bh
,
struct
buffer_head
**
head
)
static
inline
void
__insert_into_hash_list
(
struct
buffer_head
*
bh
)
{
if
((
bh
->
b_next
=
*
head
)
!=
NULL
)
bh
->
b_next
->
b_pprev
=
&
bh
->
b_next
;
struct
buffer_head
**
head
=
&
hash
(
bh
->
b_dev
,
bh
->
b_blocknr
);
struct
buffer_head
*
next
=
*
head
;
*
head
=
bh
;
bh
->
b_pprev
=
head
;
bh
->
b_next
=
next
;
if
(
next
!=
NULL
)
next
->
b_pprev
=
&
bh
->
b_next
;
}
static
__inline__
void
__hash_unlink
(
struct
buffer_head
*
bh
)
...
...
@@ -504,6 +498,8 @@ static void __insert_into_lru_list(struct buffer_head * bh, int blist)
{
struct
buffer_head
**
bhp
=
&
lru_list
[
blist
];
if
(
bh
->
b_prev_free
||
bh
->
b_next_free
)
BUG
();
if
(
!*
bhp
)
{
*
bhp
=
bh
;
bh
->
b_prev_free
=
bh
;
...
...
@@ -531,19 +527,6 @@ static void __remove_from_lru_list(struct buffer_head * bh, int blist)
}
}
static
void
__remove_from_free_list
(
struct
buffer_head
*
bh
,
int
index
)
{
if
(
bh
->
b_next_free
==
bh
)
free_list
[
index
].
list
=
NULL
;
else
{
bh
->
b_prev_free
->
b_next_free
=
bh
->
b_next_free
;
bh
->
b_next_free
->
b_prev_free
=
bh
->
b_prev_free
;
if
(
free_list
[
index
].
list
==
bh
)
free_list
[
index
].
list
=
bh
->
b_next_free
;
}
bh
->
b_next_free
=
bh
->
b_prev_free
=
NULL
;
}
/* must be called with both the hash_table_lock and the lru_list_lock
held */
static
void
__remove_from_queues
(
struct
buffer_head
*
bh
)
...
...
@@ -552,14 +535,6 @@ static void __remove_from_queues(struct buffer_head *bh)
__remove_from_lru_list
(
bh
,
bh
->
b_list
);
}
static
void
__insert_into_queues
(
struct
buffer_head
*
bh
)
{
struct
buffer_head
**
head
=
&
hash
(
bh
->
b_dev
,
bh
->
b_blocknr
);
__hash_link
(
bh
,
head
);
__insert_into_lru_list
(
bh
,
bh
->
b_list
);
}
struct
buffer_head
*
get_hash_table
(
kdev_t
dev
,
int
block
,
int
size
)
{
struct
buffer_head
*
bh
,
**
p
=
&
hash
(
dev
,
block
);
...
...
@@ -1214,6 +1189,7 @@ static __inline__ void __put_unused_buffer_head(struct buffer_head * bh)
if
(
nr_unused_buffer_heads
>=
MAX_UNUSED_BUFFERS
)
{
kmem_cache_free
(
bh_cachep
,
bh
);
}
else
{
bh
->
b_dev
=
B_FREE
;
bh
->
b_blocknr
=
-
1
;
bh
->
b_this_page
=
NULL
;
...
...
@@ -1320,7 +1296,7 @@ static struct buffer_head * create_buffers(struct page * page, unsigned long siz
if
(
!
bh
)
goto
no_grow
;
bh
->
b_dev
=
B_FREE
;
/* Flag as unused */
bh
->
b_dev
=
NODEV
;
bh
->
b_this_page
=
head
;
head
=
bh
;
...
...
@@ -1376,15 +1352,18 @@ static struct buffer_head * create_buffers(struct page * page, unsigned long siz
/*
* Called when truncating a buffer on a page completely.
*
* We can avoid IO by marking it clean.
* FIXME!! FIXME!! FIXME!! We need to unmap it too,
* so that the filesystem won't write to it. There's
* some bug somewhere..
*/
static
void
discard_buffer
(
struct
buffer_head
*
bh
)
{
if
(
buffer_mapped
(
bh
))
{
mark_buffer_clean
(
bh
);
lock_buffer
(
bh
);
clear_bit
(
BH_Uptodate
,
&
bh
->
b_state
);
clear_bit
(
BH_Mapped
,
&
bh
->
b_state
);
clear_bit
(
BH_Req
,
&
bh
->
b_state
);
clear_bit
(
BH_New
,
&
bh
->
b_state
);
unlock_buffer
(
bh
);
}
}
/*
...
...
@@ -2120,7 +2099,6 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
}
tmp
=
bhs
[
bhind
++
];
tmp
->
b_dev
=
B_FREE
;
tmp
->
b_size
=
size
;
set_bh_page
(
tmp
,
map
,
offset
);
tmp
->
b_this_page
=
tmp
;
...
...
@@ -2304,7 +2282,6 @@ static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size
if
(
Page_Uptodate
(
page
))
uptodate
|=
1
<<
BH_Uptodate
;
spin_lock
(
&
lru_list_lock
);
write_lock
(
&
hash_table_lock
);
do
{
if
(
!
(
bh
->
b_state
&
(
1
<<
BH_Mapped
)))
{
...
...
@@ -2314,23 +2291,21 @@ static void hash_page_buffers(struct page *page, kdev_t dev, int block, int size
bh
->
b_state
=
uptodate
;
}
/* Insert the buffer into the regular lists */
if
(
!
bh
->
b_pprev
)
{
__insert_into_queues
(
bh
);
}
/* Insert the buffer into the hash lists if necessary */
if
(
!
bh
->
b_pprev
)
__insert_into_hash_list
(
bh
);
block
++
;
bh
=
bh
->
b_this_page
;
}
while
(
bh
!=
head
);
write_unlock
(
&
hash_table_lock
);
spin_unlock
(
&
lru_list_lock
);
}
/*
* Try to increase the number of buffers available: the size argument
* is used to determine what kind of buffers we want.
*/
static
int
grow_buffers
(
kdev_t
dev
,
int
block
,
int
size
)
static
int
grow_buffers
(
kdev_t
dev
,
unsigned
long
block
,
int
size
)
{
struct
page
*
page
;
struct
block_device
*
bdev
;
...
...
@@ -2389,7 +2364,7 @@ static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask)
ll_rw_block
(
WRITE
,
1
,
&
p
);
tryagain
=
0
;
}
else
if
(
buffer_locked
(
p
))
{
if
(
gfp_mask
&
__GFP_WAIT
)
{
if
(
gfp_mask
&
__GFP_WAIT
BUF
)
{
wait_on_buffer
(
p
);
tryagain
=
1
;
}
else
...
...
@@ -2424,12 +2399,10 @@ static int sync_page_buffers(struct buffer_head *bh, unsigned int gfp_mask)
int
try_to_free_buffers
(
struct
page
*
page
,
unsigned
int
gfp_mask
)
{
struct
buffer_head
*
tmp
,
*
bh
=
page
->
buffers
;
int
index
=
BUFSIZE_INDEX
(
bh
->
b_size
);
cleaned_buffers_try_again:
spin_lock
(
&
lru_list_lock
);
write_lock
(
&
hash_table_lock
);
spin_lock
(
&
free_list
[
index
].
lock
);
tmp
=
bh
;
do
{
if
(
buffer_busy
(
tmp
))
...
...
@@ -2443,14 +2416,10 @@ int try_to_free_buffers(struct page * page, unsigned int gfp_mask)
struct
buffer_head
*
p
=
tmp
;
tmp
=
tmp
->
b_this_page
;
/* The buffer can be either on the regular
* queues or on the free list..
*/
if
(
p
->
b_dev
!=
B_FREE
)
{
if
(
p
->
b_dev
==
B_FREE
)
BUG
();
remove_inode_queue
(
p
);
__remove_from_queues
(
p
);
}
else
__remove_from_free_list
(
p
,
index
);
__put_unused_buffer_head
(
p
);
}
while
(
tmp
!=
bh
);
spin_unlock
(
&
unused_list_lock
);
...
...
@@ -2461,14 +2430,12 @@ int try_to_free_buffers(struct page * page, unsigned int gfp_mask)
/* And free the page */
page
->
buffers
=
NULL
;
page_cache_release
(
page
);
spin_unlock
(
&
free_list
[
index
].
lock
);
write_unlock
(
&
hash_table_lock
);
spin_unlock
(
&
lru_list_lock
);
return
1
;
busy_buffer_page:
/* Uhhuh, start writeback so that we don't end up with all dirty pages */
spin_unlock
(
&
free_list
[
index
].
lock
);
write_unlock
(
&
hash_table_lock
);
spin_unlock
(
&
lru_list_lock
);
if
(
gfp_mask
&
__GFP_IO
)
{
...
...
@@ -2581,12 +2548,6 @@ void __init buffer_init(unsigned long mempages)
for
(
i
=
0
;
i
<
nr_hash
;
i
++
)
hash_table
[
i
]
=
NULL
;
/* Setup free lists. */
for
(
i
=
0
;
i
<
NR_SIZES
;
i
++
)
{
free_list
[
i
].
list
=
NULL
;
free_list
[
i
].
lock
=
SPIN_LOCK_UNLOCKED
;
}
/* Setup lru lists. */
for
(
i
=
0
;
i
<
NR_LIST
;
i
++
)
lru_list
[
i
]
=
NULL
;
...
...
include/linux/fs.h
View file @
d51c905a
...
...
@@ -1086,11 +1086,6 @@ extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
/* reiserfs_writepage needs this */
extern
void
set_buffer_async_io
(
struct
buffer_head
*
bh
)
;
#define BUF_CLEAN 0
#define BUF_LOCKED 1
/* Buffers scheduled for write */
#define BUF_DIRTY 2
/* Dirty buffers, not yet scheduled for write */
#define NR_LIST 3
static
inline
void
get_bh
(
struct
buffer_head
*
bh
)
{
atomic_inc
(
&
(
bh
)
->
b_count
);
...
...
include/linux/list.h
View file @
d51c905a
...
...
@@ -92,7 +92,6 @@ static __inline__ void __list_del(struct list_head * prev,
static
__inline__
void
list_del
(
struct
list_head
*
entry
)
{
__list_del
(
entry
->
prev
,
entry
->
next
);
entry
->
next
=
entry
->
prev
=
0
;
}
/**
...
...
include/linux/mm.h
View file @
d51c905a
...
...
@@ -550,16 +550,17 @@ extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
#define __GFP_IO 0x40
/* Can start low memory physical IO? */
#define __GFP_HIGHIO 0x80
/* Can start high mem physical IO? */
#define __GFP_FS 0x100
/* Can call down to low-level FS? */
#define __GFP_WAITBUF 0x200
/* Can we wait for buffers to complete? */
#define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
#define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO
| __GFP_WAITBUF
)
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_
WAITBUF | __GFP_
FS)
#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_
WAITBUF | __GFP_
FS | __GFP_HIGHMEM)
#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_
WAITBUF | __GFP_
FS)
#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_
WAITBUF | __GFP_
FS)
#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_
WAITBUF | __GFP_
FS)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
...
...
include/linux/slab.h
View file @
d51c905a
...
...
@@ -24,7 +24,7 @@ typedef struct kmem_cache_s kmem_cache_t;
#define SLAB_NFS GFP_NFS
#define SLAB_DMA GFP_DMA
#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_FS)
#define SLAB_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_HIGHIO|__GFP_
WAITBUF|__GFP_
FS)
#define SLAB_NO_GROW 0x00001000UL
/* don't grow a cache */
/* flags to pass to kmem_cache_create().
...
...
mm/filemap.c
View file @
d51c905a
...
...
@@ -873,6 +873,13 @@ struct page * find_or_create_page(struct address_space *mapping, unsigned long i
return
page
;
}
/*
* Returns locked page at given index in given cache, creating it if needed.
*/
struct
page
*
grab_cache_page
(
struct
address_space
*
mapping
,
unsigned
long
index
)
{
return
find_or_create_page
(
mapping
,
index
,
mapping
->
gfp_mask
);
}
#if 0
...
...
@@ -1005,24 +1012,6 @@ static inline int get_max_readahead(struct inode * inode)
return
max_readahead
[
MAJOR
(
inode
->
i_dev
)][
MINOR
(
inode
->
i_dev
)];
}
static
inline
unsigned
long
calc_end_index
(
struct
inode
*
inode
)
{
unsigned
long
end_index
;
end_index
=
inode
->
i_size
>>
PAGE_CACHE_SHIFT
;
return
end_index
;
}
static
inline
loff_t
calc_rsize
(
struct
inode
*
inode
)
{
loff_t
rsize
;
rsize
=
inode
->
i_size
;
return
rsize
;
}
static
void
generic_file_readahead
(
int
reada_ok
,
struct
file
*
filp
,
struct
inode
*
inode
,
struct
page
*
page
)
...
...
@@ -1033,7 +1022,7 @@ static void generic_file_readahead(int reada_ok,
unsigned
long
raend
;
int
max_readahead
=
get_max_readahead
(
inode
);
end_index
=
calc_end_index
(
inode
)
;
end_index
=
inode
->
i_size
>>
PAGE_CACHE_SHIFT
;
raend
=
filp
->
f_raend
;
max_ahead
=
0
;
...
...
@@ -1157,8 +1146,8 @@ void mark_page_accessed(struct page *page)
*/
void
do_generic_file_read
(
struct
file
*
filp
,
loff_t
*
ppos
,
read_descriptor_t
*
desc
,
read_actor_t
actor
)
{
struct
inode
*
inode
=
filp
->
f_dentry
->
d_inode
;
struct
address_space
*
mapping
=
inode
->
i_mapping
;
struct
address_space
*
mapping
=
filp
->
f_dentry
->
d_inode
->
i_mapping
;
struct
inode
*
inode
=
mapping
->
host
;
unsigned
long
index
,
offset
;
struct
page
*
cached_page
;
int
reada_ok
;
...
...
@@ -1212,13 +1201,13 @@ void do_generic_file_read(struct file * filp, loff_t *ppos, read_descriptor_t *
struct
page
*
page
,
**
hash
;
unsigned
long
end_index
,
nr
,
ret
;
end_index
=
calc_end_index
(
inode
)
;
end_index
=
inode
->
i_size
>>
PAGE_CACHE_SHIFT
;
if
(
index
>
end_index
)
break
;
nr
=
PAGE_CACHE_SIZE
;
if
(
index
==
end_index
)
{
nr
=
calc_rsize
(
inode
)
&
~
PAGE_CACHE_MASK
;
nr
=
inode
->
i_size
&
~
PAGE_CACHE_MASK
;
if
(
nr
<=
offset
)
break
;
}
...
...
@@ -1595,7 +1584,6 @@ struct page * filemap_nopage(struct vm_area_struct * area,
struct
address_space
*
mapping
=
inode
->
i_mapping
;
struct
page
*
page
,
**
hash
,
*
old_page
;
unsigned
long
size
,
pgoff
;
loff_t
rsize
;
pgoff
=
((
address
-
area
->
vm_start
)
>>
PAGE_CACHE_SHIFT
)
+
area
->
vm_pgoff
;
...
...
@@ -1604,8 +1592,7 @@ struct page * filemap_nopage(struct vm_area_struct * area,
* An external ptracer can access pages that normally aren't
* accessible..
*/
rsize
=
calc_rsize
(
inode
);
size
=
(
rsize
+
PAGE_CACHE_SIZE
-
1
)
>>
PAGE_CACHE_SHIFT
;
size
=
(
inode
->
i_size
+
PAGE_CACHE_SIZE
-
1
)
>>
PAGE_CACHE_SHIFT
;
if
((
pgoff
>=
size
)
&&
(
area
->
vm_mm
==
current
->
mm
))
return
NULL
;
...
...
@@ -2104,14 +2091,13 @@ static long madvise_willneed(struct vm_area_struct * vma,
long
error
=
-
EBADF
;
struct
file
*
file
;
unsigned
long
size
,
rlim_rss
;
loff_t
rsize
;
/* Doesn't work if there's no mapped file. */
if
(
!
vma
->
vm_file
)
return
error
;
file
=
vma
->
vm_file
;
rsize
=
calc_rsize
(
file
->
f_dentry
->
d_inode
);
size
=
(
rsize
+
PAGE_CACHE_SIZE
-
1
)
>>
PAGE_CACHE_SHIFT
;
size
=
(
file
->
f_dentry
->
d_inode
->
i_size
+
PAGE_CACHE_SIZE
-
1
)
>>
PAGE_CACHE_SHIFT
;
start
=
((
start
-
vma
->
vm_start
)
>>
PAGE_SHIFT
)
+
vma
->
vm_pgoff
;
if
(
end
>
vma
->
vm_end
)
...
...
@@ -2549,19 +2535,6 @@ static inline struct page * __grab_cache_page(struct address_space *mapping,
return
page
;
}
/*
* Returns locked page at given index in given cache, creating it if needed.
*/
struct
page
*
grab_cache_page
(
struct
address_space
*
mapping
,
unsigned
long
index
)
{
struct
page
*
cached_page
=
NULL
;
struct
page
*
page
=
__grab_cache_page
(
mapping
,
index
,
&
cached_page
);
if
(
cached_page
)
page_cache_release
(
cached_page
);
return
page
;
}
inline
void
remove_suid
(
struct
inode
*
inode
)
{
unsigned
int
mode
;
...
...
@@ -2595,8 +2568,8 @@ inline void remove_suid(struct inode *inode)
ssize_t
generic_file_write
(
struct
file
*
file
,
const
char
*
buf
,
size_t
count
,
loff_t
*
ppos
)
{
struct
inode
*
inode
=
file
->
f_dentry
->
d_inode
;
struct
address_space
*
mapping
=
inode
->
i_mapping
;
struct
address_space
*
mapping
=
file
->
f_dentry
->
d_inode
->
i_mapping
;
struct
inode
*
inode
=
mapping
->
host
;
unsigned
long
limit
=
current
->
rlim
[
RLIMIT_FSIZE
].
rlim_cur
;
loff_t
pos
;
struct
page
*
page
,
*
cached_page
;
...
...
@@ -2628,8 +2601,7 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
written
=
0
;
/* FIXME: this is for backwards compatibility with 2.4 */
if
(
!
S_ISBLK
(
inode
->
i_mode
)
&&
file
->
f_flags
&
O_APPEND
)
if
(
file
->
f_flags
&
O_APPEND
)
pos
=
inode
->
i_size
;
/*
...
...
@@ -2690,17 +2662,15 @@ generic_file_write(struct file *file,const char *buf,size_t count, loff_t *ppos)
err
=
-
EPERM
;
goto
out
;
}
if
(
pos
>=
calc_rsize
(
inode
))
{
if
(
count
||
pos
>
calc_rsize
(
inode
))
{
/* FIXME: this is for backwards compatibility with 2.4 */
if
(
pos
>=
inode
->
i_size
)
{
if
(
count
||
pos
>
inode
->
i_size
)
{
err
=
-
ENOSPC
;
goto
out
;
}
/* zero-length writes at blkdev end are OK */
}
if
(
pos
+
count
>
calc_rsize
(
inode
)
)
count
=
calc_rsize
(
inode
)
-
pos
;
if
(
pos
+
count
>
inode
->
i_size
)
count
=
inode
->
i_size
-
pos
;
}
err
=
0
;
...
...
mm/memory.c
View file @
d51c905a
...
...
@@ -319,6 +319,8 @@ static inline int zap_pte_range(mmu_gather_t *tlb, pmd_t * pmd, unsigned long ad
if
(
pte_none
(
pte
))
continue
;
if
(
pte_present
(
pte
))
{
struct
page
*
page
=
pte_page
(
pte
);
if
(
VALID_PAGE
(
page
)
&&
!
PageReserved
(
page
))
freed
++
;
/* This will eventually call __free_pte on the pte. */
tlb_remove_page
(
tlb
,
ptep
,
address
+
offset
);
...
...
mm/page_alloc.c
View file @
d51c905a
...
...
@@ -480,7 +480,7 @@ unsigned int nr_free_buffer_pages (void)
zone_t
**
zonep
,
*
zone
;
do
{
zonelist
=
pgdat
->
node_zonelists
+
__GFP_HIGHMEM
;
zonelist
=
pgdat
->
node_zonelists
+
(
GFP_USER
&
GFP_ZONEMASK
)
;
zonep
=
zonelist
->
zones
;
for
(
zone
=
*
zonep
++
;
zone
;
zone
=
*
zonep
++
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment