Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
1463079f
Commit
1463079f
authored
Apr 22, 2004
by
Christoph Hellwig
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[XFS] really kill the pagebuf vs xfs_buf confusion
SGI Modid: xfs-linux:xfs-kern:167628a
parent
b2d94fa0
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
132 additions
and
141 deletions
+132
-141
fs/xfs/linux/xfs_buf.c
fs/xfs/linux/xfs_buf.c
+65
-65
fs/xfs/linux/xfs_buf.h
fs/xfs/linux/xfs_buf.h
+53
-62
fs/xfs/linux/xfs_ioctl.c
fs/xfs/linux/xfs_ioctl.c
+1
-1
fs/xfs/linux/xfs_lrw.c
fs/xfs/linux/xfs_lrw.c
+2
-2
fs/xfs/linux/xfs_lrw.h
fs/xfs/linux/xfs_lrw.h
+3
-3
fs/xfs/linux/xfs_super.h
fs/xfs/linux/xfs_super.h
+8
-8
No files found.
fs/xfs/linux/xfs_buf.c
View file @
1463079f
...
...
@@ -36,7 +36,7 @@
* The page_buf module provides an abstract buffer cache model on top of
* the Linux page cache. Cached metadata blocks for a file system are
* hashed to the inode for the block device. The page_buf module
* assembles buffer (
page
_buf_t) objects on demand to aggregate such
* assembles buffer (
xfs
_buf_t) objects on demand to aggregate such
* cached pages for I/O.
*
*
...
...
@@ -71,7 +71,7 @@
STATIC
kmem_cache_t
*
pagebuf_cache
;
STATIC
void
pagebuf_daemon_wakeup
(
void
);
STATIC
void
pagebuf_delwri_queue
(
page
_buf_t
*
,
int
);
STATIC
void
pagebuf_delwri_queue
(
xfs
_buf_t
*
,
int
);
STATIC
struct
workqueue_struct
*
pagebuf_logio_workqueue
;
STATIC
struct
workqueue_struct
*
pagebuf_dataio_workqueue
;
...
...
@@ -82,7 +82,7 @@ STATIC struct workqueue_struct *pagebuf_dataio_workqueue;
#ifdef PAGEBUF_TRACE
void
pagebuf_trace
(
page
_buf_t
*
pb
,
xfs
_buf_t
*
pb
,
char
*
id
,
void
*
data
,
void
*
ra
)
...
...
@@ -169,7 +169,7 @@ _bhash(
* Mapping of multi-page buffers into contiguous virtual space
*/
STATIC
void
*
pagebuf_mapout_locked
(
page
_buf_t
*
);
STATIC
void
*
pagebuf_mapout_locked
(
xfs
_buf_t
*
);
typedef
struct
a_list
{
void
*
vm_addr
;
...
...
@@ -229,8 +229,8 @@ purge_addresses(void)
STATIC
void
_pagebuf_initialize
(
page
_buf_t
*
pb
,
pb_target
_t
*
target
,
xfs
_buf_t
*
pb
,
xfs_buftarg
_t
*
target
,
loff_t
range_base
,
size_t
range_length
,
page_buf_flags_t
flags
)
...
...
@@ -240,7 +240,7 @@ _pagebuf_initialize(
*/
flags
&=
~
(
PBF_LOCK
|
PBF_MAPPED
|
PBF_DONT_BLOCK
|
PBF_READ_AHEAD
);
memset
(
pb
,
0
,
sizeof
(
page
_buf_t
));
memset
(
pb
,
0
,
sizeof
(
xfs
_buf_t
));
atomic_set
(
&
pb
->
pb_hold
,
1
);
init_MUTEX_LOCKED
(
&
pb
->
pb_iodonesema
);
INIT_LIST_HEAD
(
&
pb
->
pb_list
);
...
...
@@ -256,7 +256,7 @@ _pagebuf_initialize(
*/
pb
->
pb_buffer_length
=
pb
->
pb_count_desired
=
range_length
;
pb
->
pb_flags
=
flags
|
PBF_NONE
;
pb
->
pb_bn
=
PAGE
_BUF_DADDR_NULL
;
pb
->
pb_bn
=
XFS
_BUF_DADDR_NULL
;
atomic_set
(
&
pb
->
pb_pin_count
,
0
);
init_waitqueue_head
(
&
pb
->
pb_waiters
);
...
...
@@ -270,7 +270,7 @@ _pagebuf_initialize(
*/
STATIC
int
_pagebuf_get_pages
(
page
_buf_t
*
pb
,
xfs
_buf_t
*
pb
,
int
page_count
,
page_buf_flags_t
flags
)
{
...
...
@@ -296,7 +296,7 @@ _pagebuf_get_pages(
*/
STATIC
inline
void
_pagebuf_freepages
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
int
buf_index
;
...
...
@@ -318,7 +318,7 @@ _pagebuf_freepages(
*/
void
pagebuf_free
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
PB_TRACE
(
pb
,
"free"
,
0
);
...
...
@@ -367,7 +367,7 @@ pagebuf_free(
*/
STATIC
int
_pagebuf_lookup_pages
(
page
_buf_t
*
pb
,
xfs
_buf_t
*
pb
,
struct
address_space
*
aspace
,
page_buf_flags_t
flags
)
{
...
...
@@ -531,20 +531,20 @@ _pagebuf_lookup_pages(
* which may imply that this call will block until those buffers
* are unlocked. No I/O is implied by this call.
*/
STATIC
page
_buf_t
*
STATIC
xfs
_buf_t
*
_pagebuf_find
(
/* find buffer for block */
pb_target
_t
*
target
,
/* target for block */
xfs_buftarg
_t
*
target
,
/* target for block */
loff_t
ioff
,
/* starting offset of range */
size_t
isize
,
/* length of range */
page_buf_flags_t
flags
,
/* PBF_TRYLOCK */
page
_buf_t
*
new_pb
)
/* newly allocated buffer */
xfs
_buf_t
*
new_pb
)
/* newly allocated buffer */
{
loff_t
range_base
;
size_t
range_length
;
int
hval
;
pb_hash_t
*
h
;
struct
list_head
*
p
;
page
_buf_t
*
pb
;
xfs
_buf_t
*
pb
;
int
not_locked
;
range_base
=
(
ioff
<<
BBSHIFT
);
...
...
@@ -561,7 +561,7 @@ _pagebuf_find( /* find buffer for block */
spin_lock
(
&
h
->
pb_hash_lock
);
list_for_each
(
p
,
&
h
->
pb_hash
)
{
pb
=
list_entry
(
p
,
page
_buf_t
,
pb_hash_list
);
pb
=
list_entry
(
p
,
xfs
_buf_t
,
pb_hash_list
);
if
(
pb
->
pb_target
==
target
&&
pb
->
pb_file_offset
==
range_base
&&
...
...
@@ -641,10 +641,10 @@ _pagebuf_find( /* find buffer for block */
* pages are present in the buffer, not all of every page may be
* valid.
*/
page
_buf_t
*
xfs
_buf_t
*
pagebuf_find
(
/* find buffer for block */
/* if the block is in memory */
pb_target
_t
*
target
,
/* target for block */
xfs_buftarg
_t
*
target
,
/* target for block */
loff_t
ioff
,
/* starting offset of range */
size_t
isize
,
/* length of range */
page_buf_flags_t
flags
)
/* PBF_TRYLOCK */
...
...
@@ -661,14 +661,14 @@ pagebuf_find( /* find buffer for block */
* although backing storage may not be. If PBF_READ is set in
* flags, pagebuf_iostart is called also.
*/
page
_buf_t
*
xfs
_buf_t
*
pagebuf_get
(
/* allocate a buffer */
pb_target
_t
*
target
,
/* target for buffer */
xfs_buftarg
_t
*
target
,
/* target for buffer */
loff_t
ioff
,
/* starting offset of range */
size_t
isize
,
/* length of range */
page_buf_flags_t
flags
)
/* PBF_TRYLOCK */
{
page
_buf_t
*
pb
,
*
new_pb
;
xfs
_buf_t
*
pb
,
*
new_pb
;
int
error
;
new_pb
=
pagebuf_allocate
(
flags
);
...
...
@@ -732,14 +732,14 @@ pagebuf_get( /* allocate a buffer */
/*
* Create a skeletal pagebuf (no pages associated with it).
*/
page
_buf_t
*
xfs
_buf_t
*
pagebuf_lookup
(
struct
pb_target
*
target
,
xfs_buftarg_t
*
target
,
loff_t
ioff
,
size_t
isize
,
page_buf_flags_t
flags
)
{
page
_buf_t
*
pb
;
xfs
_buf_t
*
pb
;
pb
=
pagebuf_allocate
(
flags
);
if
(
pb
)
{
...
...
@@ -754,7 +754,7 @@ pagebuf_lookup(
*/
void
pagebuf_readahead
(
pb_target
_t
*
target
,
xfs_buftarg
_t
*
target
,
loff_t
ioff
,
size_t
isize
,
page_buf_flags_t
flags
)
...
...
@@ -771,12 +771,12 @@ pagebuf_readahead(
pagebuf_get
(
target
,
ioff
,
isize
,
flags
);
}
page
_buf_t
*
xfs
_buf_t
*
pagebuf_get_empty
(
size_t
len
,
pb_target
_t
*
target
)
xfs_buftarg
_t
*
target
)
{
page
_buf_t
*
pb
;
xfs
_buf_t
*
pb
;
pb
=
pagebuf_allocate
(
0
);
if
(
pb
)
...
...
@@ -798,7 +798,7 @@ mem_to_page(
int
pagebuf_associate_memory
(
page
_buf_t
*
pb
,
xfs
_buf_t
*
pb
,
void
*
mem
,
size_t
len
)
{
...
...
@@ -906,7 +906,7 @@ pagebuf_get_no_daddr(
*/
void
pagebuf_hold
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
atomic_inc
(
&
pb
->
pb_hold
);
PB_TRACE
(
pb
,
"hold"
,
0
);
...
...
@@ -920,7 +920,7 @@ pagebuf_hold(
*/
void
pagebuf_rele
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
pb_hash_t
*
hash
=
pb_hash
(
pb
);
...
...
@@ -979,7 +979,7 @@ pagebuf_rele(
int
pagebuf_cond_lock
(
/* lock buffer, if not locked */
/* returns -EBUSY if locked) */
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
int
locked
;
...
...
@@ -998,7 +998,7 @@ pagebuf_cond_lock( /* lock buffer, if not locked */
*/
int
pagebuf_lock_value
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
return
(
atomic_read
(
&
pb
->
pb_sema
.
count
));
}
...
...
@@ -1013,7 +1013,7 @@ pagebuf_lock_value(
*/
int
pagebuf_lock
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
PB_TRACE
(
pb
,
"lock"
,
0
);
if
(
atomic_read
(
&
pb
->
pb_io_remaining
))
...
...
@@ -1033,7 +1033,7 @@ pagebuf_lock(
*/
void
pagebuf_unlock
(
/* unlock buffer */
page
_buf_t
*
pb
)
/* buffer to unlock */
xfs
_buf_t
*
pb
)
/* buffer to unlock */
{
PB_CLEAR_OWNER
(
pb
);
up
(
&
pb
->
pb_sema
);
...
...
@@ -1061,7 +1061,7 @@ pagebuf_unlock( /* unlock buffer */
*/
void
pagebuf_pin
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
atomic_inc
(
&
pb
->
pb_pin_count
);
PB_TRACE
(
pb
,
"pin"
,
(
long
)
pb
->
pb_pin_count
.
counter
);
...
...
@@ -1076,7 +1076,7 @@ pagebuf_pin(
*/
void
pagebuf_unpin
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
if
(
atomic_dec_and_test
(
&
pb
->
pb_pin_count
))
{
wake_up_all
(
&
pb
->
pb_waiters
);
...
...
@@ -1086,7 +1086,7 @@ pagebuf_unpin(
int
pagebuf_ispin
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
return
atomic_read
(
&
pb
->
pb_pin_count
);
}
...
...
@@ -1100,7 +1100,7 @@ pagebuf_ispin(
*/
static
inline
void
_pagebuf_wait_unpin
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
DECLARE_WAITQUEUE
(
wait
,
current
);
...
...
@@ -1135,7 +1135,7 @@ void
pagebuf_iodone_work
(
void
*
v
)
{
page_buf_t
*
pb
=
(
page
_buf_t
*
)
v
;
xfs_buf_t
*
pb
=
(
xfs
_buf_t
*
)
v
;
if
(
pb
->
pb_iodone
)
{
(
*
(
pb
->
pb_iodone
))
(
pb
);
...
...
@@ -1151,7 +1151,7 @@ pagebuf_iodone_work(
void
pagebuf_iodone
(
page
_buf_t
*
pb
,
xfs
_buf_t
*
pb
,
int
dataio
,
int
schedule
)
{
...
...
@@ -1182,7 +1182,7 @@ pagebuf_iodone(
*/
void
pagebuf_ioerror
(
/* mark/clear buffer error flag */
page
_buf_t
*
pb
,
/* buffer to mark */
xfs
_buf_t
*
pb
,
/* buffer to mark */
unsigned
int
error
)
/* error to store (0 if none) */
{
pb
->
pb_error
=
error
;
...
...
@@ -1203,7 +1203,7 @@ pagebuf_ioerror( /* mark/clear buffer error flag */
*/
int
pagebuf_iostart
(
/* start I/O on a buffer */
page
_buf_t
*
pb
,
/* buffer to start */
xfs
_buf_t
*
pb
,
/* buffer to start */
page_buf_flags_t
flags
)
/* PBF_LOCK, PBF_ASYNC, PBF_READ, */
/* PBF_WRITE, PBF_DELWRI, */
/* PBF_DONT_BLOCK */
...
...
@@ -1224,7 +1224,7 @@ pagebuf_iostart( /* start I/O on a buffer */
pb
->
pb_flags
|=
flags
&
(
PBF_READ
|
PBF_WRITE
|
PBF_ASYNC
|
\
PBF_READ_AHEAD
|
PBF_RUN_QUEUES
);
BUG_ON
(
pb
->
pb_bn
==
PAGE
_BUF_DADDR_NULL
);
BUG_ON
(
pb
->
pb_bn
==
XFS
_BUF_DADDR_NULL
);
/* For writes allow an alternate strategy routine to precede
* the actual I/O request (which may not be issued at all in
...
...
@@ -1250,7 +1250,7 @@ pagebuf_iostart( /* start I/O on a buffer */
STATIC
__inline__
int
_pagebuf_iolocked
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
ASSERT
(
pb
->
pb_flags
&
(
PBF_READ
|
PBF_WRITE
));
if
(
pb
->
pb_flags
&
PBF_READ
)
...
...
@@ -1260,7 +1260,7 @@ _pagebuf_iolocked(
STATIC
__inline__
void
_pagebuf_iodone
(
page
_buf_t
*
pb
,
xfs
_buf_t
*
pb
,
int
schedule
)
{
if
(
atomic_dec_and_test
(
&
pb
->
pb_io_remaining
)
==
1
)
{
...
...
@@ -1275,7 +1275,7 @@ bio_end_io_pagebuf(
unsigned
int
bytes_done
,
int
error
)
{
page_buf_t
*
pb
=
(
page
_buf_t
*
)
bio
->
bi_private
;
xfs_buf_t
*
pb
=
(
xfs
_buf_t
*
)
bio
->
bi_private
;
unsigned
int
i
,
blocksize
=
pb
->
pb_target
->
pbr_bsize
;
unsigned
int
sectorshift
=
pb
->
pb_target
->
pbr_sshift
;
struct
bio_vec
*
bvec
=
bio
->
bi_io_vec
;
...
...
@@ -1316,7 +1316,7 @@ bio_end_io_pagebuf(
void
_pagebuf_ioapply
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
int
i
,
map_i
,
total_nr_pages
,
nr_pages
;
struct
bio
*
bio
;
...
...
@@ -1435,7 +1435,7 @@ _pagebuf_ioapply(
*/
int
pagebuf_iorequest
(
/* start real I/O */
page
_buf_t
*
pb
)
/* buffer to convey to device */
xfs
_buf_t
*
pb
)
/* buffer to convey to device */
{
PB_TRACE
(
pb
,
"iorequest"
,
0
);
...
...
@@ -1471,7 +1471,7 @@ pagebuf_iorequest( /* start real I/O */
*/
int
pagebuf_iowait
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
PB_TRACE
(
pb
,
"iowait"
,
0
);
if
(
atomic_read
(
&
pb
->
pb_io_remaining
))
...
...
@@ -1483,7 +1483,7 @@ pagebuf_iowait(
STATIC
void
*
pagebuf_mapout_locked
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
void
*
old_addr
=
NULL
;
...
...
@@ -1502,7 +1502,7 @@ pagebuf_mapout_locked(
caddr_t
pagebuf_offset
(
page
_buf_t
*
pb
,
xfs
_buf_t
*
pb
,
size_t
offset
)
{
struct
page
*
page
;
...
...
@@ -1520,7 +1520,7 @@ pagebuf_offset(
*/
void
pagebuf_iomove
(
page
_buf_t
*
pb
,
/* buffer to process */
xfs
_buf_t
*
pb
,
/* buffer to process */
size_t
boff
,
/* starting buffer offset */
size_t
bsize
,
/* length to copy */
caddr_t
data
,
/* data address */
...
...
@@ -1564,7 +1564,7 @@ STATIC spinlock_t pbd_delwrite_lock = SPIN_LOCK_UNLOCKED;
STATIC
void
pagebuf_delwri_queue
(
page
_buf_t
*
pb
,
xfs
_buf_t
*
pb
,
int
unlock
)
{
PB_TRACE
(
pb
,
"delwri_q"
,
(
long
)
unlock
);
...
...
@@ -1587,7 +1587,7 @@ pagebuf_delwri_queue(
void
pagebuf_delwri_dequeue
(
page
_buf_t
*
pb
)
xfs
_buf_t
*
pb
)
{
PB_TRACE
(
pb
,
"delwri_uq"
,
0
);
spin_lock
(
&
pbd_delwrite_lock
);
...
...
@@ -1621,7 +1621,7 @@ STATIC int
pagebuf_daemon
(
void
*
data
)
{
page
_buf_t
*
pb
;
xfs
_buf_t
*
pb
;
struct
list_head
*
curr
,
*
next
,
tmp
;
/* Set up the thread */
...
...
@@ -1644,7 +1644,7 @@ pagebuf_daemon(
spin_lock
(
&
pbd_delwrite_lock
);
list_for_each_safe
(
curr
,
next
,
&
pbd_delwrite_queue
)
{
pb
=
list_entry
(
curr
,
page
_buf_t
,
pb_list
);
pb
=
list_entry
(
curr
,
xfs
_buf_t
,
pb_list
);
PB_TRACE
(
pb
,
"walkq1"
,
(
long
)
pagebuf_ispin
(
pb
));
...
...
@@ -1664,7 +1664,7 @@ pagebuf_daemon(
spin_unlock
(
&
pbd_delwrite_lock
);
while
(
!
list_empty
(
&
tmp
))
{
pb
=
list_entry
(
tmp
.
next
,
page
_buf_t
,
pb_list
);
pb
=
list_entry
(
tmp
.
next
,
xfs
_buf_t
,
pb_list
);
list_del_init
(
&
pb
->
pb_list
);
pagebuf_iostrategy
(
pb
);
...
...
@@ -1682,11 +1682,11 @@ pagebuf_daemon(
void
pagebuf_delwri_flush
(
pb_target
_t
*
target
,
xfs_buftarg
_t
*
target
,
u_long
flags
,
int
*
pinptr
)
{
page
_buf_t
*
pb
;
xfs
_buf_t
*
pb
;
struct
list_head
*
curr
,
*
next
,
tmp
;
int
pincount
=
0
;
...
...
@@ -1697,7 +1697,7 @@ pagebuf_delwri_flush(
INIT_LIST_HEAD
(
&
tmp
);
list_for_each_safe
(
curr
,
next
,
&
pbd_delwrite_queue
)
{
pb
=
list_entry
(
curr
,
page
_buf_t
,
pb_list
);
pb
=
list_entry
(
curr
,
xfs
_buf_t
,
pb_list
);
/*
* Skip other targets, markers and in progress buffers
...
...
@@ -1723,7 +1723,7 @@ pagebuf_delwri_flush(
spin_unlock
(
&
pbd_delwrite_lock
);
list_for_each_safe
(
curr
,
next
,
&
tmp
)
{
pb
=
list_entry
(
curr
,
page
_buf_t
,
pb_list
);
pb
=
list_entry
(
curr
,
xfs
_buf_t
,
pb_list
);
if
(
flags
&
PBDF_WAIT
)
pb
->
pb_flags
&=
~
PBF_ASYNC
;
...
...
@@ -1735,7 +1735,7 @@ pagebuf_delwri_flush(
}
while
(
!
list_empty
(
&
tmp
))
{
pb
=
list_entry
(
tmp
.
next
,
page
_buf_t
,
pb_list
);
pb
=
list_entry
(
tmp
.
next
,
xfs
_buf_t
,
pb_list
);
list_del_init
(
&
pb
->
pb_list
);
pagebuf_iowait
(
pb
);
...
...
@@ -1800,7 +1800,7 @@ pagebuf_init(void)
{
int
i
;
pagebuf_cache
=
kmem_cache_create
(
"
page_buf_t"
,
sizeof
(
page
_buf_t
),
0
,
pagebuf_cache
=
kmem_cache_create
(
"
xfs_buf_t"
,
sizeof
(
xfs
_buf_t
),
0
,
SLAB_HWCACHE_ALIGN
,
NULL
,
NULL
);
if
(
pagebuf_cache
==
NULL
)
{
printk
(
"pagebuf: couldn't init pagebuf cache
\n
"
);
...
...
fs/xfs/linux/xfs_buf.h
View file @
1463079f
...
...
@@ -51,10 +51,7 @@
* Base types
*/
/* daddr must be signed since -1 is used for bmaps that are not yet allocated */
typedef
loff_t
page_buf_daddr_t
;
#define PAGE_BUF_DADDR_NULL ((page_buf_daddr_t) (-1LL))
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
...
...
@@ -103,17 +100,17 @@ typedef enum page_buf_flags_e { /* pb_flags values */
#define PBF_NOT_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) != 0)
#define PBF_DONE(pb) (((pb)->pb_flags & (PBF_PARTIAL|PBF_NONE)) == 0)
typedef
struct
pb_target
{
typedef
struct
xfs_buftarg
{
dev_t
pbr_dev
;
struct
block_device
*
pbr_bdev
;
struct
address_space
*
pbr_mapping
;
unsigned
int
pbr_bsize
;
unsigned
int
pbr_sshift
;
size_t
pbr_smask
;
}
pb_target
_t
;
}
xfs_buftarg
_t
;
/*
*
page
_buf_t: Buffer structure for page cache-based buffers
*
xfs
_buf_t: Buffer structure for page cache-based buffers
*
* This buffer structure is used by the page cache buffer management routines
* to refer to an assembly of pages forming a logical buffer. The actual
...
...
@@ -128,16 +125,16 @@ typedef struct pb_target {
* to indicate which disk blocks in the page are not valid.
*/
struct
page_buf_s
;
typedef
void
(
*
page_buf_iodone_t
)(
struct
page_buf_s
*
);
struct
xfs_buf
;
typedef
void
(
*
page_buf_iodone_t
)(
struct
xfs_buf
*
);
/* call-back function on I/O completion */
typedef
void
(
*
page_buf_relse_t
)(
struct
page_buf_s
*
);
typedef
void
(
*
page_buf_relse_t
)(
struct
xfs_buf
*
);
/* call-back function on I/O completion */
typedef
int
(
*
page_buf_bdstrat_t
)(
struct
page_buf_s
*
);
typedef
int
(
*
page_buf_bdstrat_t
)(
struct
xfs_buf
*
);
#define PB_PAGES 4
typedef
struct
page_buf_s
{
typedef
struct
xfs_buf
{
struct
semaphore
pb_sema
;
/* semaphore for lockables */
unsigned
long
pb_flushtime
;
/* time to flush pagebuf */
atomic_t
pb_pin_count
;
/* pin count */
...
...
@@ -145,9 +142,9 @@ typedef struct page_buf_s {
struct
list_head
pb_list
;
page_buf_flags_t
pb_flags
;
/* status flags */
struct
list_head
pb_hash_list
;
struct
pb_target
*
pb_target
;
/* logical object */
xfs_buftarg_t
*
pb_target
;
/* logical object */
atomic_t
pb_hold
;
/* reference count */
page_buf_daddr_t
pb_bn
;
/* block number for I/O */
xfs_daddr_t
pb_bn
;
/* block number for I/O */
loff_t
pb_file_offset
;
/* offset in file */
size_t
pb_buffer_length
;
/* size of buffer in bytes */
size_t
pb_count_desired
;
/* desired transfer size */
...
...
@@ -171,52 +168,52 @@ typedef struct page_buf_s {
#ifdef PAGEBUF_LOCK_TRACKING
int
pb_last_holder
;
#endif
}
page
_buf_t
;
}
xfs
_buf_t
;
/* Finding and Reading Buffers */
extern
page
_buf_t
*
pagebuf_find
(
/* find buffer for block if */
extern
xfs
_buf_t
*
pagebuf_find
(
/* find buffer for block if */
/* the block is in memory */
struct
pb_targe
t
*
,
/* inode for block */
xfs_buftarg_
t
*
,
/* inode for block */
loff_t
,
/* starting offset of range */
size_t
,
/* length of range */
page_buf_flags_t
);
/* PBF_LOCK */
extern
page
_buf_t
*
pagebuf_get
(
/* allocate a buffer */
struct
pb_targe
t
*
,
/* inode for buffer */
extern
xfs
_buf_t
*
pagebuf_get
(
/* allocate a buffer */
xfs_buftarg_
t
*
,
/* inode for buffer */
loff_t
,
/* starting offset of range */
size_t
,
/* length of range */
page_buf_flags_t
);
/* PBF_LOCK, PBF_READ, */
/* PBF_ASYNC */
extern
page
_buf_t
*
pagebuf_lookup
(
struct
pb_targe
t
*
,
extern
xfs
_buf_t
*
pagebuf_lookup
(
xfs_buftarg_
t
*
,
loff_t
,
/* starting offset of range */
size_t
,
/* length of range */
page_buf_flags_t
);
/* PBF_READ, PBF_WRITE, */
/* PBF_FORCEIO, */
extern
page
_buf_t
*
pagebuf_get_empty
(
/* allocate pagebuf struct with */
extern
xfs
_buf_t
*
pagebuf_get_empty
(
/* allocate pagebuf struct with */
/* no memory or disk address */
size_t
len
,
struct
pb_targe
t
*
);
/* mount point "fake" inode */
xfs_buftarg_
t
*
);
/* mount point "fake" inode */
extern
page
_buf_t
*
pagebuf_get_no_daddr
(
/* allocate pagebuf struct */
extern
xfs
_buf_t
*
pagebuf_get_no_daddr
(
/* allocate pagebuf struct */
/* without disk address */
size_t
len
,
struct
pb_targe
t
*
);
/* mount point "fake" inode */
xfs_buftarg_
t
*
);
/* mount point "fake" inode */
extern
int
pagebuf_associate_memory
(
page
_buf_t
*
,
xfs
_buf_t
*
,
void
*
,
size_t
);
extern
void
pagebuf_hold
(
/* increment reference count */
page
_buf_t
*
);
/* buffer to hold */
xfs
_buf_t
*
);
/* buffer to hold */
extern
void
pagebuf_readahead
(
/* read ahead into cache */
struct
pb_targe
t
*
,
/* target for buffer (or NULL) */
xfs_buftarg_
t
*
,
/* target for buffer (or NULL) */
loff_t
,
/* starting offset of range */
size_t
,
/* length of range */
page_buf_flags_t
);
/* additional read flags */
...
...
@@ -224,63 +221,63 @@ extern void pagebuf_readahead( /* read ahead into cache */
/* Releasing Buffers */
extern
void
pagebuf_free
(
/* deallocate a buffer */
page
_buf_t
*
);
/* buffer to deallocate */
xfs
_buf_t
*
);
/* buffer to deallocate */
extern
void
pagebuf_rele
(
/* release hold on a buffer */
page
_buf_t
*
);
/* buffer to release */
xfs
_buf_t
*
);
/* buffer to release */
/* Locking and Unlocking Buffers */
extern
int
pagebuf_cond_lock
(
/* lock buffer, if not locked */
/* (returns -EBUSY if locked) */
page
_buf_t
*
);
/* buffer to lock */
xfs
_buf_t
*
);
/* buffer to lock */
extern
int
pagebuf_lock_value
(
/* return count on lock */
page
_buf_t
*
);
/* buffer to check */
xfs
_buf_t
*
);
/* buffer to check */
extern
int
pagebuf_lock
(
/* lock buffer */
page
_buf_t
*
);
/* buffer to lock */
xfs
_buf_t
*
);
/* buffer to lock */
extern
void
pagebuf_unlock
(
/* unlock buffer */
page
_buf_t
*
);
/* buffer to unlock */
xfs
_buf_t
*
);
/* buffer to unlock */
/* Buffer Read and Write Routines */
extern
void
pagebuf_iodone
(
/* mark buffer I/O complete */
page
_buf_t
*
,
/* buffer to mark */
xfs
_buf_t
*
,
/* buffer to mark */
int
,
/* use data/log helper thread. */
int
);
/* run completion locally, or in
* a helper thread. */
extern
void
pagebuf_ioerror
(
/* mark buffer in error (or not) */
page
_buf_t
*
,
/* buffer to mark */
xfs
_buf_t
*
,
/* buffer to mark */
unsigned
int
);
/* error to store (0 if none) */
extern
int
pagebuf_iostart
(
/* start I/O on a buffer */
page
_buf_t
*
,
/* buffer to start */
xfs
_buf_t
*
,
/* buffer to start */
page_buf_flags_t
);
/* PBF_LOCK, PBF_ASYNC, */
/* PBF_READ, PBF_WRITE, */
/* PBF_DELWRI */
extern
int
pagebuf_iorequest
(
/* start real I/O */
page
_buf_t
*
);
/* buffer to convey to device */
xfs
_buf_t
*
);
/* buffer to convey to device */
extern
int
pagebuf_iowait
(
/* wait for buffer I/O done */
page
_buf_t
*
);
/* buffer to wait on */
xfs
_buf_t
*
);
/* buffer to wait on */
extern
void
pagebuf_iomove
(
/* move data in/out of pagebuf */
page
_buf_t
*
,
/* buffer to manipulate */
xfs
_buf_t
*
,
/* buffer to manipulate */
size_t
,
/* starting buffer offset */
size_t
,
/* length in buffer */
caddr_t
,
/* data pointer */
page_buf_rw_t
);
/* direction */
static
inline
int
pagebuf_iostrategy
(
page
_buf_t
*
pb
)
static
inline
int
pagebuf_iostrategy
(
xfs
_buf_t
*
pb
)
{
return
pb
->
pb_strat
?
pb
->
pb_strat
(
pb
)
:
pagebuf_iorequest
(
pb
);
}
static
inline
int
pagebuf_geterror
(
page
_buf_t
*
pb
)
static
inline
int
pagebuf_geterror
(
xfs
_buf_t
*
pb
)
{
return
pb
?
pb
->
pb_error
:
ENOMEM
;
}
...
...
@@ -288,30 +285,30 @@ static inline int pagebuf_geterror(page_buf_t *pb)
/* Buffer Utility Routines */
extern
caddr_t
pagebuf_offset
(
/* pointer at offset in buffer */
page
_buf_t
*
,
/* buffer to offset into */
xfs
_buf_t
*
,
/* buffer to offset into */
size_t
);
/* offset */
/* Pinning Buffer Storage in Memory */
extern
void
pagebuf_pin
(
/* pin buffer in memory */
page
_buf_t
*
);
/* buffer to pin */
xfs
_buf_t
*
);
/* buffer to pin */
extern
void
pagebuf_unpin
(
/* unpin buffered data */
page
_buf_t
*
);
/* buffer to unpin */
xfs
_buf_t
*
);
/* buffer to unpin */
extern
int
pagebuf_ispin
(
/* check if buffer is pinned */
page
_buf_t
*
);
/* buffer to check */
xfs
_buf_t
*
);
/* buffer to check */
/* Delayed Write Buffer Routines */
#define PBDF_WAIT 0x01
extern
void
pagebuf_delwri_flush
(
pb_target
_t
*
,
xfs_buftarg
_t
*
,
unsigned
long
,
int
*
);
extern
void
pagebuf_delwri_dequeue
(
page
_buf_t
*
);
xfs
_buf_t
*
);
/* Buffer Daemon Setup Routines */
...
...
@@ -322,7 +319,7 @@ extern void pagebuf_terminate(void);
#ifdef PAGEBUF_TRACE
extern
ktrace_t
*
pagebuf_trace_buf
;
extern
void
pagebuf_trace
(
page
_buf_t
*
,
/* buffer being traced */
xfs
_buf_t
*
,
/* buffer being traced */
char
*
,
/* description of operation */
void
*
,
/* arbitrary diagnostic value */
void
*
);
/* return address */
...
...
@@ -369,7 +366,7 @@ extern void pagebuf_trace(
#define XFS_BUF_MANAGE PBF_FS_MANAGED
#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED)
static
inline
void
xfs_buf_undelay
(
page
_buf_t
*
pb
)
static
inline
void
xfs_buf_undelay
(
xfs
_buf_t
*
pb
)
{
if
(
pb
->
pb_flags
&
PBF_DELWRI
)
{
if
(
pb
->
pb_list
.
next
!=
&
pb
->
pb_list
)
{
...
...
@@ -423,12 +420,6 @@ static inline void xfs_buf_undelay(page_buf_t *pb)
#define XFS_BUF_BP_ISMAPPED(bp) 1
typedef
struct
page_buf_s
xfs_buf_t
;
#define xfs_buf page_buf_s
typedef
struct
pb_target
xfs_buftarg_t
;
#define xfs_buftarg pb_target
#define XFS_BUF_DATAIO(x) ((x)->pb_flags |= PBF_FS_DATAIOD)
#define XFS_BUF_UNDATAIO(x) ((x)->pb_flags &= ~PBF_FS_DATAIOD)
...
...
@@ -461,7 +452,7 @@ typedef struct pb_target xfs_buftarg_t;
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
extern
inline
xfs_caddr_t
xfs_buf_offset
(
page
_buf_t
*
bp
,
size_t
offset
)
extern
inline
xfs_caddr_t
xfs_buf_offset
(
xfs
_buf_t
*
bp
,
size_t
offset
)
{
if
(
bp
->
pb_flags
&
PBF_MAPPED
)
return
XFS_BUF_PTR
(
bp
)
+
offset
;
...
...
@@ -472,7 +463,7 @@ extern inline xfs_caddr_t xfs_buf_offset(page_buf_t *bp, size_t offset)
pagebuf_associate_memory(bp, val, count)
#define XFS_BUF_ADDR(bp) ((bp)->pb_bn)
#define XFS_BUF_SET_ADDR(bp, blk) \
((bp)->pb_bn = (
page_buf_daddr_t)(
blk))
((bp)->pb_bn = (blk))
#define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset)
#define XFS_BUF_SET_OFFSET(bp, off) \
((bp)->pb_file_offset = (off))
...
...
@@ -517,7 +508,7 @@ extern inline xfs_caddr_t xfs_buf_offset(page_buf_t *bp, size_t offset)
#define xfs_buf_get_flags(target, blkno, len, flags) \
pagebuf_get((target), (blkno), (len), (flags))
static
inline
int
xfs_bawrite
(
void
*
mp
,
page
_buf_t
*
bp
)
static
inline
int
xfs_bawrite
(
void
*
mp
,
xfs
_buf_t
*
bp
)
{
bp
->
pb_fspriv3
=
mp
;
bp
->
pb_strat
=
xfs_bdstrat_cb
;
...
...
@@ -525,7 +516,7 @@ static inline int xfs_bawrite(void *mp, page_buf_t *bp)
return
pagebuf_iostart
(
bp
,
PBF_WRITE
|
PBF_ASYNC
|
PBF_RUN_QUEUES
);
}
static
inline
void
xfs_buf_relse
(
page
_buf_t
*
bp
)
static
inline
void
xfs_buf_relse
(
xfs
_buf_t
*
bp
)
{
if
(
!
bp
->
pb_relse
)
pagebuf_unlock
(
bp
);
...
...
@@ -553,7 +544,7 @@ static inline void xfs_buf_relse(page_buf_t *bp)
pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO)
static
inline
int
XFS_bwrite
(
page
_buf_t
*
pb
)
static
inline
int
XFS_bwrite
(
xfs
_buf_t
*
pb
)
{
int
iowait
=
(
pb
->
pb_flags
&
PBF_ASYNC
)
==
0
;
int
error
=
0
;
...
...
@@ -573,7 +564,7 @@ static inline int XFS_bwrite(page_buf_t *pb)
#define XFS_bdwrite(pb) \
pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
static
inline
int
xfs_bdwrite
(
void
*
mp
,
page
_buf_t
*
bp
)
static
inline
int
xfs_bdwrite
(
void
*
mp
,
xfs
_buf_t
*
bp
)
{
bp
->
pb_strat
=
xfs_bdstrat_cb
;
bp
->
pb_fspriv3
=
mp
;
...
...
fs/xfs/linux/xfs_ioctl.c
View file @
1463079f
...
...
@@ -659,7 +659,7 @@ xfs_ioctl(
case
XFS_IOC_DIOINFO
:
{
struct
dioattr
da
;
pb_target
_t
*
target
=
xfs_buftarg
_t
*
target
=
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
...
...
fs/xfs/linux/xfs_lrw.c
View file @
1463079f
...
...
@@ -301,7 +301,7 @@ xfs_read(
/* END copy & waste from filemap.c */
if
(
ioflags
&
IO_ISDIRECT
)
{
pb_target
_t
*
target
=
xfs_buftarg
_t
*
target
=
(
ip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
if
((
*
offset
&
target
->
pbr_smask
)
||
...
...
@@ -689,7 +689,7 @@ xfs_write(
}
if
(
ioflags
&
IO_ISDIRECT
)
{
pb_target
_t
*
target
=
xfs_buftarg
_t
*
target
=
(
xip
->
i_d
.
di_flags
&
XFS_DIFLAG_REALTIME
)
?
mp
->
m_rtdev_targp
:
mp
->
m_ddev_targp
;
...
...
fs/xfs/linux/xfs_lrw.h
View file @
1463079f
...
...
@@ -38,7 +38,7 @@ struct xfs_mount;
struct
xfs_iocore
;
struct
xfs_inode
;
struct
xfs_bmbt_irec
;
struct
page_buf_s
;
struct
xfs_buf
;
struct
xfs_iomap
;
#if defined(XFS_RW_TRACE)
...
...
@@ -89,8 +89,8 @@ extern void xfs_inval_cached_trace(struct xfs_iocore *,
extern
int
xfs_bmap
(
struct
bhv_desc
*
,
xfs_off_t
,
ssize_t
,
int
,
struct
xfs_iomap
*
,
int
*
);
extern
int
xfsbdstrat
(
struct
xfs_mount
*
,
struct
page_buf_s
*
);
extern
int
xfs_bdstrat_cb
(
struct
page_buf_s
*
);
extern
int
xfsbdstrat
(
struct
xfs_mount
*
,
struct
xfs_buf
*
);
extern
int
xfs_bdstrat_cb
(
struct
xfs_buf
*
);
extern
int
xfs_zero_eof
(
struct
vnode
*
,
struct
xfs_iocore
*
,
xfs_off_t
,
xfs_fsize_t
,
xfs_fsize_t
);
...
...
fs/xfs/linux/xfs_super.h
View file @
1463079f
...
...
@@ -112,7 +112,7 @@ extern void xfs_qm_exit(void);
struct
xfs_inode
;
struct
xfs_mount
;
struct
pb_target
;
struct
xfs_buftarg
;
struct
block_device
;
extern
__uint64_t
xfs_max_file_offset
(
unsigned
int
);
...
...
@@ -126,12 +126,12 @@ extern int xfs_blkdev_get(struct xfs_mount *, const char *,
struct
block_device
**
);
extern
void
xfs_blkdev_put
(
struct
block_device
*
);
extern
struct
pb_target
*
xfs_alloc_buftarg
(
struct
block_device
*
);
extern
void
xfs_relse_buftarg
(
struct
pb_target
*
);
extern
void
xfs_free_buftarg
(
struct
pb_target
*
);
extern
void
xfs_flush_buftarg
(
struct
pb_target
*
);
extern
int
xfs_readonly_buftarg
(
struct
pb_target
*
);
extern
void
xfs_setsize_buftarg
(
struct
pb_target
*
,
unsigned
int
,
unsigned
int
);
extern
unsigned
int
xfs_getsize_buftarg
(
struct
pb_target
*
);
extern
struct
xfs_buftarg
*
xfs_alloc_buftarg
(
struct
block_device
*
);
extern
void
xfs_relse_buftarg
(
struct
xfs_buftarg
*
);
extern
void
xfs_free_buftarg
(
struct
xfs_buftarg
*
);
extern
void
xfs_flush_buftarg
(
struct
xfs_buftarg
*
);
extern
int
xfs_readonly_buftarg
(
struct
xfs_buftarg
*
);
extern
void
xfs_setsize_buftarg
(
struct
xfs_buftarg
*
,
unsigned
int
,
unsigned
int
);
extern
unsigned
int
xfs_getsize_buftarg
(
struct
xfs_buftarg
*
);
#endif
/* __XFS_SUPER_H__ */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment