Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
193ce3be
Commit
193ce3be
authored
Apr 12, 2004
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge NFS conflicts
parents
f2d816ef
78a21ca0
Changes
30
Show whitespace changes
Inline
Side-by-side
Showing
30 changed files
with
3749 additions
and
2666 deletions
+3749
-2666
fs/lockd/clntlock.c
fs/lockd/clntlock.c
+0
-1
fs/lockd/clntproc.c
fs/lockd/clntproc.c
+30
-16
fs/nfs/direct.c
fs/nfs/direct.c
+3
-6
fs/nfs/inode.c
fs/nfs/inode.c
+23
-0
fs/nfs/nfs2xdr.c
fs/nfs/nfs2xdr.c
+3
-3
fs/nfs/nfs3proc.c
fs/nfs/nfs3proc.c
+7
-48
fs/nfs/nfs3xdr.c
fs/nfs/nfs3xdr.c
+3
-3
fs/nfs/nfs4proc.c
fs/nfs/nfs4proc.c
+407
-714
fs/nfs/nfs4state.c
fs/nfs/nfs4state.c
+1
-1
fs/nfs/nfs4xdr.c
fs/nfs/nfs4xdr.c
+1677
-1094
fs/nfs/nfsroot.c
fs/nfs/nfsroot.c
+143
-75
fs/nfs/pagelist.c
fs/nfs/pagelist.c
+2
-1
fs/nfs/proc.c
fs/nfs/proc.c
+6
-36
fs/nfs/read.c
fs/nfs/read.c
+265
-76
fs/nfs/write.c
fs/nfs/write.c
+402
-133
include/linux/nfs4.h
include/linux/nfs4.h
+74
-57
include/linux/nfs_fs.h
include/linux/nfs_fs.h
+10
-6
include/linux/nfs_fs_sb.h
include/linux/nfs_fs_sb.h
+10
-1
include/linux/nfs_page.h
include/linux/nfs_page.h
+40
-2
include/linux/nfs_xdr.h
include/linux/nfs_xdr.h
+121
-155
include/linux/sunrpc/sched.h
include/linux/sunrpc/sched.h
+50
-16
include/linux/sunrpc/xdr.h
include/linux/sunrpc/xdr.h
+13
-78
include/linux/sunrpc/xprt.h
include/linux/sunrpc/xprt.h
+5
-4
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/auth_gss.c
+3
-6
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
+19
-16
net/sunrpc/clnt.c
net/sunrpc/clnt.c
+8
-4
net/sunrpc/sched.c
net/sunrpc/sched.c
+203
-41
net/sunrpc/sunrpc_syms.c
net/sunrpc/sunrpc_syms.c
+0
-3
net/sunrpc/xdr.c
net/sunrpc/xdr.c
+184
-18
net/sunrpc/xprt.c
net/sunrpc/xprt.c
+37
-52
No files found.
fs/lockd/clntlock.c
View file @
193ce3be
...
...
@@ -228,7 +228,6 @@ reclaimer(void *ptr)
}
host
->
h_reclaiming
=
0
;
wake_up
(
&
host
->
h_gracewait
);
/* Now, wake up all processes that sleep on a blocked lock */
for
(
block
=
nlm_blocked
;
block
;
block
=
block
->
b_next
)
{
...
...
fs/lockd/clntproc.c
View file @
193ce3be
...
...
@@ -217,6 +217,21 @@ nlmclnt_alloc_call(void)
return
NULL
;
}
static
int
nlm_wait_on_grace
(
wait_queue_head_t
*
queue
)
{
DEFINE_WAIT
(
wait
);
int
status
=
-
EINTR
;
prepare_to_wait
(
queue
,
&
wait
,
TASK_INTERRUPTIBLE
);
if
(
!
signalled
())
{
schedule_timeout
(
NLMCLNT_GRACE_WAIT
);
if
(
!
signalled
())
status
=
0
;
}
finish_wait
(
queue
,
&
wait
);
return
status
;
}
/*
* Generic NLM call
*/
...
...
@@ -241,10 +256,8 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc)
msg
.
rpc_cred
=
nfs_file_cred
(
filp
);
do
{
if
(
host
->
h_reclaiming
&&
!
argp
->
reclaim
)
{
interruptible_sleep_on
(
&
host
->
h_gracewait
);
continue
;
}
if
(
host
->
h_reclaiming
&&
!
argp
->
reclaim
)
goto
in_grace_period
;
/* If we have no RPC client yet, create one. */
if
((
clnt
=
nlm_bind_host
(
host
))
==
NULL
)
...
...
@@ -279,22 +292,23 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc)
return
-
ENOLCK
;
}
}
else
{
if
(
!
argp
->
reclaim
)
{
/* We appear to be out of the grace period */
wake_up_all
(
&
host
->
h_gracewait
);
}
dprintk
(
"lockd: server returns status %d
\n
"
,
resp
->
status
);
return
0
;
/* Okay, call complete */
}
/* Back off a little and try again */
interruptible_sleep_on_timeout
(
&
host
->
h_gracewait
,
15
*
HZ
);
/* When the lock requested by F_SETLKW isn't available,
we will wait until the request can be satisfied. If
a signal is received during wait, we should return
-EINTR. */
if
(
signalled
())
{
status
=
-
EINTR
;
break
;
}
}
while
(
1
);
in_grace_period:
/*
* The server has rebooted and appears to be in the grace
* period during which locks are only allowed to be
* reclaimed.
* We can only back off and try again later.
*/
status
=
nlm_wait_on_grace
(
&
host
->
h_gracewait
);
}
while
(
status
==
0
);
return
status
;
}
...
...
fs/nfs/direct.c
View file @
193ce3be
...
...
@@ -128,6 +128,7 @@ nfs_direct_read_seg(struct inode *inode, struct file *file,
.
inode
=
inode
,
.
args
=
{
.
fh
=
NFS_FH
(
inode
),
.
lockowner
=
current
->
files
,
},
.
res
=
{
.
fattr
=
&
rdata
.
fattr
,
...
...
@@ -258,6 +259,7 @@ nfs_direct_write_seg(struct inode *inode, struct file *file,
.
inode
=
inode
,
.
args
=
{
.
fh
=
NFS_FH
(
inode
),
.
lockowner
=
current
->
files
,
},
.
res
=
{
.
fattr
=
&
wdata
.
fattr
,
...
...
@@ -335,8 +337,7 @@ nfs_direct_write_seg(struct inode *inode, struct file *file,
VERF_SIZE
)
!=
0
)
goto
sync_retry
;
}
nfs_end_data_update
(
inode
);
NFS_FLAGS
(
inode
)
|=
NFS_INO_INVALID_DATA
;
nfs_end_data_update_defer
(
inode
);
return
tot_bytes
;
...
...
@@ -395,10 +396,6 @@ nfs_direct_write(struct inode *inode, struct file *file,
if
(
result
<
size
)
break
;
}
/* Zap the page cache if we managed to write */
if
(
tot_bytes
>
0
)
invalidate_remote_inode
(
inode
);
return
tot_bytes
;
}
...
...
fs/nfs/inode.c
View file @
193ce3be
...
...
@@ -1012,6 +1012,8 @@ void nfs_begin_data_update(struct inode *inode)
* nfs_end_data_update
* @inode - pointer to inode
* Declare end of the operations that will update file data
* This will mark the inode as immediately needing revalidation
* of its attribute cache.
*/
void
nfs_end_data_update
(
struct
inode
*
inode
)
{
...
...
@@ -1026,6 +1028,27 @@ void nfs_end_data_update(struct inode *inode)
atomic_dec
(
&
nfsi
->
data_updates
);
}
/**
* nfs_end_data_update_defer
* @inode - pointer to inode
* Declare end of the operations that will update file data
* This will defer marking the inode as needing revalidation
* unless there are no other pending updates.
*/
void
nfs_end_data_update_defer
(
struct
inode
*
inode
)
{
struct
nfs_inode
*
nfsi
=
NFS_I
(
inode
);
if
(
atomic_dec_and_test
(
&
nfsi
->
data_updates
))
{
/* Mark the attribute cache for revalidation */
nfsi
->
flags
|=
NFS_INO_INVALID_ATTR
;
/* Directories and symlinks: invalidate page cache too */
if
(
S_ISDIR
(
inode
->
i_mode
)
||
S_ISLNK
(
inode
->
i_mode
))
nfsi
->
flags
|=
NFS_INO_INVALID_DATA
;
nfsi
->
cache_change_attribute
++
;
}
}
/**
* nfs_refresh_inode - verify consistency of the inode attribute cache
* @inode - pointer to inode
...
...
fs/nfs/nfs2xdr.c
View file @
193ce3be
...
...
@@ -231,7 +231,7 @@ nfs_xdr_readargs(struct rpc_rqst *req, u32 *p, struct nfs_readargs *args)
static
int
nfs_xdr_readres
(
struct
rpc_rqst
*
req
,
u32
*
p
,
struct
nfs_readres
*
res
)
{
struct
iovec
*
iov
=
req
->
rq_r
vec
;
struct
iovec
*
iov
=
req
->
rq_r
cv_buf
.
head
;
int
status
,
count
,
recvd
,
hdrlen
;
if
((
status
=
ntohl
(
*
p
++
)))
...
...
@@ -250,7 +250,7 @@ nfs_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
xdr_shift_buf
(
&
req
->
rq_rcv_buf
,
iov
->
iov_len
-
hdrlen
);
}
recvd
=
req
->
rq_r
eceived
-
hdrlen
;
recvd
=
req
->
rq_r
cv_buf
.
len
-
hdrlen
;
if
(
count
>
recvd
)
{
printk
(
KERN_WARNING
"NFS: server cheating in read reply: "
"count %d > recvd %d
\n
"
,
count
,
recvd
);
...
...
@@ -396,7 +396,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, u32 *p, void *dummy)
}
pglen
=
rcvbuf
->
page_len
;
recvd
=
r
eq
->
rq_received
-
hdrlen
;
recvd
=
r
cvbuf
->
len
-
hdrlen
;
if
(
pglen
>
recvd
)
pglen
=
recvd
;
page
=
rcvbuf
->
pages
;
...
...
fs/nfs/nfs3proc.c
View file @
193ce3be
...
...
@@ -729,11 +729,10 @@ nfs3_read_done(struct rpc_task *task)
}
static
void
nfs3_proc_read_setup
(
struct
nfs_read_data
*
data
,
unsigned
int
count
)
nfs3_proc_read_setup
(
struct
nfs_read_data
*
data
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
inode
*
inode
=
data
->
inode
;
struct
nfs_page
*
req
;
int
flags
;
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs3_procedures
[
NFS3PROC_READ
],
...
...
@@ -742,26 +741,12 @@ nfs3_proc_read_setup(struct nfs_read_data *data, unsigned int count)
.
rpc_cred
=
data
->
cred
,
};
req
=
nfs_list_entry
(
data
->
pages
.
next
);
data
->
args
.
fh
=
NFS_FH
(
inode
);
data
->
args
.
offset
=
req_offset
(
req
);
data
->
args
.
pgbase
=
req
->
wb_pgbase
;
data
->
args
.
pages
=
data
->
pagevec
;
data
->
args
.
count
=
count
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
count
=
count
;
data
->
res
.
eof
=
0
;
/* N.B. Do we need to test? Never called for swapfile inode */
flags
=
RPC_TASK_ASYNC
|
(
IS_SWAPFILE
(
inode
)
?
NFS_RPC_SWAPFLAGS
:
0
);
/* Finalize the task. */
rpc_init_task
(
task
,
NFS_CLIENT
(
inode
),
nfs3_read_done
,
flags
);
task
->
tk_calldata
=
data
;
/* Release requests */
task
->
tk_release
=
nfs_readdata_release
;
rpc_call_setup
(
&
data
->
task
,
&
msg
,
0
);
rpc_call_setup
(
task
,
&
msg
,
0
);
}
static
void
...
...
@@ -778,11 +763,10 @@ nfs3_write_done(struct rpc_task *task)
}
static
void
nfs3_proc_write_setup
(
struct
nfs_write_data
*
data
,
unsigned
int
count
,
int
how
)
nfs3_proc_write_setup
(
struct
nfs_write_data
*
data
,
int
how
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
inode
*
inode
=
data
->
inode
;
struct
nfs_page
*
req
;
int
stable
;
int
flags
;
struct
rpc_message
msg
=
{
...
...
@@ -799,28 +783,14 @@ nfs3_proc_write_setup(struct nfs_write_data *data, unsigned int count, int how)
stable
=
NFS_DATA_SYNC
;
}
else
stable
=
NFS_UNSTABLE
;
req
=
nfs_list_entry
(
data
->
pages
.
next
);
data
->
args
.
fh
=
NFS_FH
(
inode
);
data
->
args
.
offset
=
req_offset
(
req
);
data
->
args
.
pgbase
=
req
->
wb_pgbase
;
data
->
args
.
count
=
count
;
data
->
args
.
stable
=
stable
;
data
->
args
.
pages
=
data
->
pagevec
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
count
=
count
;
data
->
res
.
verf
=
&
data
->
verf
;
/* Set the initial flags for the task. */
flags
=
(
how
&
FLUSH_SYNC
)
?
0
:
RPC_TASK_ASYNC
;
/* Finalize the task. */
rpc_init_task
(
task
,
NFS_CLIENT
(
inode
),
nfs3_write_done
,
flags
);
task
->
tk_calldata
=
data
;
/* Release requests */
task
->
tk_release
=
nfs_writedata_release
;
rpc_call_setup
(
&
data
->
task
,
&
msg
,
0
);
rpc_call_setup
(
task
,
&
msg
,
0
);
}
static
void
...
...
@@ -837,7 +807,7 @@ nfs3_commit_done(struct rpc_task *task)
}
static
void
nfs3_proc_commit_setup
(
struct
nfs_write_data
*
data
,
u64
start
,
u32
len
,
int
how
)
nfs3_proc_commit_setup
(
struct
nfs_write_data
*
data
,
int
how
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
inode
*
inode
=
data
->
inode
;
...
...
@@ -849,23 +819,12 @@ nfs3_proc_commit_setup(struct nfs_write_data *data, u64 start, u32 len, int how)
.
rpc_cred
=
data
->
cred
,
};
data
->
args
.
fh
=
NFS_FH
(
data
->
inode
);
data
->
args
.
offset
=
start
;
data
->
args
.
count
=
len
;
data
->
res
.
count
=
len
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
verf
=
&
data
->
verf
;
/* Set the initial flags for the task. */
flags
=
(
how
&
FLUSH_SYNC
)
?
0
:
RPC_TASK_ASYNC
;
/* Finalize the task. */
rpc_init_task
(
task
,
NFS_CLIENT
(
inode
),
nfs3_commit_done
,
flags
);
task
->
tk_calldata
=
data
;
/* Release requests */
task
->
tk_release
=
nfs_commit_release
;
rpc_call_setup
(
&
data
->
task
,
&
msg
,
0
);
rpc_call_setup
(
task
,
&
msg
,
0
);
}
/*
...
...
fs/nfs/nfs3xdr.c
View file @
193ce3be
...
...
@@ -515,7 +515,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, u32 *p, struct nfs3_readdirres *res)
}
pglen
=
rcvbuf
->
page_len
;
recvd
=
r
eq
->
rq_received
-
hdrlen
;
recvd
=
r
cvbuf
->
len
-
hdrlen
;
if
(
pglen
>
recvd
)
pglen
=
recvd
;
page
=
rcvbuf
->
pages
;
...
...
@@ -758,7 +758,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, u32 *p, struct nfs_fattr *fattr)
static
int
nfs3_xdr_readres
(
struct
rpc_rqst
*
req
,
u32
*
p
,
struct
nfs_readres
*
res
)
{
struct
iovec
*
iov
=
req
->
rq_r
vec
;
struct
iovec
*
iov
=
req
->
rq_r
cv_buf
.
head
;
int
status
,
count
,
ocount
,
recvd
,
hdrlen
;
status
=
ntohl
(
*
p
++
);
...
...
@@ -789,7 +789,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, u32 *p, struct nfs_readres *res)
xdr_shift_buf
(
&
req
->
rq_rcv_buf
,
iov
->
iov_len
-
hdrlen
);
}
recvd
=
req
->
rq_r
eceived
-
hdrlen
;
recvd
=
req
->
rq_r
cv_buf
.
len
-
hdrlen
;
if
(
count
>
recvd
)
{
printk
(
KERN_WARNING
"NFS: server cheating in read reply: "
"count %d > recvd %d
\n
"
,
count
,
recvd
);
...
...
fs/nfs/nfs4proc.c
View file @
193ce3be
...
...
@@ -51,10 +51,7 @@
#define NFS4_POLL_RETRY_TIME (15*HZ)
#define GET_OP(cp,name) &cp->ops[cp->req_nops].u.name
#define OPNUM(cp) cp->ops[cp->req_nops].opnum
static
int
nfs4_proc_fsinfo
(
struct
nfs_server
*
,
struct
nfs_fh
*
,
struct
nfs_fsinfo
*
);
static
int
nfs4_do_fsinfo
(
struct
nfs_server
*
,
struct
nfs_fh
*
,
struct
nfs_fsinfo
*
);
static
int
nfs4_async_handle_error
(
struct
rpc_task
*
,
struct
nfs_server
*
);
extern
u32
*
nfs4_decode_dirent
(
u32
*
p
,
struct
nfs_entry
*
entry
,
int
plus
);
extern
struct
rpc_procinfo
nfs4_procedures
[];
...
...
@@ -72,102 +69,10 @@ static inline int nfs4_map_errors(int err)
return
err
;
}
static
void
nfs4_setup_compound
(
struct
nfs4_compound
*
cp
,
struct
nfs4_op
*
ops
,
struct
nfs_server
*
server
,
char
*
tag
)
{
memset
(
cp
,
0
,
sizeof
(
*
cp
));
cp
->
ops
=
ops
;
cp
->
server
=
server
;
}
static
void
nfs4_setup_access
(
struct
nfs4_compound
*
cp
,
u32
req_access
,
u32
*
resp_supported
,
u32
*
resp_access
)
{
struct
nfs4_access
*
access
=
GET_OP
(
cp
,
access
);
access
->
ac_req_access
=
req_access
;
access
->
ac_resp_supported
=
resp_supported
;
access
->
ac_resp_access
=
resp_access
;
OPNUM
(
cp
)
=
OP_ACCESS
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_create_dir
(
struct
nfs4_compound
*
cp
,
struct
qstr
*
name
,
struct
iattr
*
sattr
,
struct
nfs4_change_info
*
info
)
{
struct
nfs4_create
*
create
=
GET_OP
(
cp
,
create
);
create
->
cr_ftype
=
NF4DIR
;
create
->
cr_namelen
=
name
->
len
;
create
->
cr_name
=
name
->
name
;
create
->
cr_attrs
=
sattr
;
create
->
cr_cinfo
=
info
;
OPNUM
(
cp
)
=
OP_CREATE
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_create_symlink
(
struct
nfs4_compound
*
cp
,
struct
qstr
*
name
,
struct
qstr
*
linktext
,
struct
iattr
*
sattr
,
struct
nfs4_change_info
*
info
)
{
struct
nfs4_create
*
create
=
GET_OP
(
cp
,
create
);
create
->
cr_ftype
=
NF4LNK
;
create
->
cr_textlen
=
linktext
->
len
;
create
->
cr_text
=
linktext
->
name
;
create
->
cr_namelen
=
name
->
len
;
create
->
cr_name
=
name
->
name
;
create
->
cr_attrs
=
sattr
;
create
->
cr_cinfo
=
info
;
OPNUM
(
cp
)
=
OP_CREATE
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_create_special
(
struct
nfs4_compound
*
cp
,
struct
qstr
*
name
,
dev_t
dev
,
struct
iattr
*
sattr
,
struct
nfs4_change_info
*
info
)
{
int
mode
=
sattr
->
ia_mode
;
struct
nfs4_create
*
create
=
GET_OP
(
cp
,
create
);
BUG_ON
(
!
(
sattr
->
ia_valid
&
ATTR_MODE
));
BUG_ON
(
!
S_ISFIFO
(
mode
)
&&
!
S_ISBLK
(
mode
)
&&
!
S_ISCHR
(
mode
)
&&
!
S_ISSOCK
(
mode
));
if
(
S_ISFIFO
(
mode
))
create
->
cr_ftype
=
NF4FIFO
;
else
if
(
S_ISBLK
(
mode
))
{
create
->
cr_ftype
=
NF4BLK
;
create
->
cr_specdata1
=
MAJOR
(
dev
);
create
->
cr_specdata2
=
MINOR
(
dev
);
}
else
if
(
S_ISCHR
(
mode
))
{
create
->
cr_ftype
=
NF4CHR
;
create
->
cr_specdata1
=
MAJOR
(
dev
);
create
->
cr_specdata2
=
MINOR
(
dev
);
}
else
create
->
cr_ftype
=
NF4SOCK
;
create
->
cr_namelen
=
name
->
len
;
create
->
cr_name
=
name
->
name
;
create
->
cr_attrs
=
sattr
;
create
->
cr_cinfo
=
info
;
OPNUM
(
cp
)
=
OP_CREATE
;
cp
->
req_nops
++
;
}
/*
* This is our standard bitmap for GETATTR requests.
*/
u32
nfs4_fattr_bitmap
[
2
]
=
{
const
u32
nfs4_fattr_bitmap
[
2
]
=
{
FATTR4_WORD0_TYPE
|
FATTR4_WORD0_CHANGE
|
FATTR4_WORD0_SIZE
...
...
@@ -184,7 +89,7 @@ u32 nfs4_fattr_bitmap[2] = {
|
FATTR4_WORD1_TIME_MODIFY
};
u32
nfs4_statfs_bitmap
[
2
]
=
{
const
u32
nfs4_statfs_bitmap
[
2
]
=
{
FATTR4_WORD0_FILES_AVAIL
|
FATTR4_WORD0_FILES_FREE
|
FATTR4_WORD0_FILES_TOTAL
,
...
...
@@ -199,121 +104,28 @@ u32 nfs4_pathconf_bitmap[2] = {
0
};
static
inline
void
__nfs4_setup_getattr
(
struct
nfs4_compound
*
cp
,
u32
*
bitmap
,
struct
nfs_fattr
*
fattr
,
struct
nfs_fsstat
*
fsstat
,
struct
nfs_pathconf
*
pathconf
)
{
struct
nfs4_getattr
*
getattr
=
GET_OP
(
cp
,
getattr
);
getattr
->
gt_bmval
=
bitmap
;
getattr
->
gt_attrs
=
fattr
;
getattr
->
gt_fsstat
=
fsstat
;
getattr
->
gt_pathconf
=
pathconf
;
OPNUM
(
cp
)
=
OP_GETATTR
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_getattr
(
struct
nfs4_compound
*
cp
,
struct
nfs_fattr
*
fattr
)
{
__nfs4_setup_getattr
(
cp
,
nfs4_fattr_bitmap
,
fattr
,
NULL
,
NULL
);
}
static
void
nfs4_setup_statfs
(
struct
nfs4_compound
*
cp
,
struct
nfs_fsstat
*
fsstat
)
{
__nfs4_setup_getattr
(
cp
,
nfs4_statfs_bitmap
,
NULL
,
fsstat
,
NULL
);
}
static
void
nfs4_setup_pathconf
(
struct
nfs4_compound
*
cp
,
struct
nfs_pathconf
*
pathconf
)
{
__nfs4_setup_getattr
(
cp
,
nfs4_pathconf_bitmap
,
NULL
,
NULL
,
pathconf
);
}
static
void
nfs4_setup_getfh
(
struct
nfs4_compound
*
cp
,
struct
nfs_fh
*
fhandle
)
{
struct
nfs4_getfh
*
getfh
=
GET_OP
(
cp
,
getfh
);
getfh
->
gf_fhandle
=
fhandle
;
OPNUM
(
cp
)
=
OP_GETFH
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_link
(
struct
nfs4_compound
*
cp
,
struct
qstr
*
name
,
struct
nfs4_change_info
*
info
)
{
struct
nfs4_link
*
link
=
GET_OP
(
cp
,
link
);
link
->
ln_namelen
=
name
->
len
;
link
->
ln_name
=
name
->
name
;
link
->
ln_cinfo
=
info
;
OPNUM
(
cp
)
=
OP_LINK
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_lookup
(
struct
nfs4_compound
*
cp
,
struct
qstr
*
q
)
{
struct
nfs4_lookup
*
lookup
=
GET_OP
(
cp
,
lookup
);
lookup
->
lo_name
=
q
;
OPNUM
(
cp
)
=
OP_LOOKUP
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_putfh
(
struct
nfs4_compound
*
cp
,
struct
nfs_fh
*
fhandle
)
{
struct
nfs4_putfh
*
putfh
=
GET_OP
(
cp
,
putfh
);
putfh
->
pf_fhandle
=
fhandle
;
OPNUM
(
cp
)
=
OP_PUTFH
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_putrootfh
(
struct
nfs4_compound
*
cp
)
{
OPNUM
(
cp
)
=
OP_PUTROOTFH
;
cp
->
req_nops
++
;
}
const
u32
nfs4_fsinfo_bitmap
[
2
]
=
{
FATTR4_WORD0_MAXFILESIZE
|
FATTR4_WORD0_MAXREAD
|
FATTR4_WORD0_MAXWRITE
|
FATTR4_WORD0_LEASE_TIME
,
0
};
static
void
nfs4_setup_readdir
(
struct
nfs4_compound
*
cp
,
u64
cookie
,
u32
*
verifier
,
struct
page
**
pages
,
unsigned
int
bufsize
,
struct
dentry
*
dentry
)
static
void
nfs4_setup_readdir
(
u64
cookie
,
u32
*
verifier
,
struct
dentry
*
dentry
,
struct
nfs4_readdir_arg
*
readdir
)
{
u32
*
start
,
*
p
;
struct
nfs4_readdir
*
readdir
=
GET_OP
(
cp
,
readdir
);
BUG_ON
(
bufsize
<
80
);
readdir
->
rd_cookie
=
(
cookie
>
2
)
?
cookie
:
0
;
memcpy
(
&
readdir
->
rd_req_verifier
,
verifier
,
sizeof
(
readdir
->
rd_req_verifier
));
readdir
->
rd_count
=
bufsize
;
readdir
->
rd_bmval
[
0
]
=
FATTR4_WORD0_FILEID
;
readdir
->
rd_bmval
[
1
]
=
0
;
readdir
->
rd_pages
=
pages
;
readdir
->
rd_pgbase
=
0
;
OPNUM
(
cp
)
=
OP_READDIR
;
cp
->
req_nops
++
;
BUG_ON
(
readdir
->
count
<
80
);
if
(
cookie
>
2
)
{
readdir
->
cookie
=
(
cookie
>
2
)
?
cookie
:
0
;
memcpy
(
&
readdir
->
verifier
,
verifier
,
sizeof
(
readdir
->
verifier
));
return
;
}
if
(
cookie
>=
2
)
readdir
->
cookie
=
0
;
memset
(
&
readdir
->
verifier
,
0
,
sizeof
(
readdir
->
verifier
));
if
(
cookie
==
2
)
return
;
/*
...
...
@@ -323,7 +135,7 @@ nfs4_setup_readdir(struct nfs4_compound *cp, u64 cookie, u32 *verifier,
* when talking to the server, we always send cookie 0
* instead of 1 or 2.
*/
start
=
p
=
(
u32
*
)
kmap_atomic
(
*
pages
,
KM_USER0
);
start
=
p
=
(
u32
*
)
kmap_atomic
(
*
readdir
->
pages
,
KM_USER0
);
if
(
cookie
==
0
)
{
*
p
++
=
xdr_one
;
/* next */
...
...
@@ -335,7 +147,7 @@ nfs4_setup_readdir(struct nfs4_compound *cp, u64 cookie, u32 *verifier,
*
p
++
=
xdr_one
;
/* bitmap length */
*
p
++
=
htonl
(
FATTR4_WORD0_FILEID
);
/* bitmap */
*
p
++
=
htonl
(
8
);
/* attribute buffer length */
p
=
xdr_encode_hyper
(
p
,
NFS_FILEID
(
dentry
->
d_inode
)
);
p
=
xdr_encode_hyper
(
p
,
dentry
->
d_inode
->
i_ino
);
}
*
p
++
=
xdr_one
;
/* next */
...
...
@@ -347,69 +159,13 @@ nfs4_setup_readdir(struct nfs4_compound *cp, u64 cookie, u32 *verifier,
*
p
++
=
xdr_one
;
/* bitmap length */
*
p
++
=
htonl
(
FATTR4_WORD0_FILEID
);
/* bitmap */
*
p
++
=
htonl
(
8
);
/* attribute buffer length */
p
=
xdr_encode_hyper
(
p
,
NFS_FILEID
(
dentry
->
d_parent
->
d_inode
)
);
p
=
xdr_encode_hyper
(
p
,
dentry
->
d_parent
->
d_inode
->
i_ino
);
readdir
->
rd_
pgbase
=
(
char
*
)
p
-
(
char
*
)
start
;
readdir
->
rd_count
-=
readdir
->
rd_
pgbase
;
readdir
->
pgbase
=
(
char
*
)
p
-
(
char
*
)
start
;
readdir
->
count
-=
readdir
->
pgbase
;
kunmap_atomic
(
start
,
KM_USER0
);
}
static
void
nfs4_setup_readlink
(
struct
nfs4_compound
*
cp
,
int
count
,
struct
page
**
pages
)
{
struct
nfs4_readlink
*
readlink
=
GET_OP
(
cp
,
readlink
);
readlink
->
rl_count
=
count
;
readlink
->
rl_pages
=
pages
;
OPNUM
(
cp
)
=
OP_READLINK
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_remove
(
struct
nfs4_compound
*
cp
,
struct
qstr
*
name
,
struct
nfs4_change_info
*
cinfo
)
{
struct
nfs4_remove
*
remove
=
GET_OP
(
cp
,
remove
);
remove
->
rm_namelen
=
name
->
len
;
remove
->
rm_name
=
name
->
name
;
remove
->
rm_cinfo
=
cinfo
;
OPNUM
(
cp
)
=
OP_REMOVE
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_rename
(
struct
nfs4_compound
*
cp
,
struct
qstr
*
old
,
struct
qstr
*
new
,
struct
nfs4_change_info
*
old_cinfo
,
struct
nfs4_change_info
*
new_cinfo
)
{
struct
nfs4_rename
*
rename
=
GET_OP
(
cp
,
rename
);
rename
->
rn_oldnamelen
=
old
->
len
;
rename
->
rn_oldname
=
old
->
name
;
rename
->
rn_newnamelen
=
new
->
len
;
rename
->
rn_newname
=
new
->
name
;
rename
->
rn_src_cinfo
=
old_cinfo
;
rename
->
rn_dst_cinfo
=
new_cinfo
;
OPNUM
(
cp
)
=
OP_RENAME
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_restorefh
(
struct
nfs4_compound
*
cp
)
{
OPNUM
(
cp
)
=
OP_RESTOREFH
;
cp
->
req_nops
++
;
}
static
void
nfs4_setup_savefh
(
struct
nfs4_compound
*
cp
)
{
OPNUM
(
cp
)
=
OP_SAVEFH
;
cp
->
req_nops
++
;
}
static
void
renew_lease
(
struct
nfs_server
*
server
,
unsigned
long
timestamp
)
{
...
...
@@ -420,58 +176,12 @@ renew_lease(struct nfs_server *server, unsigned long timestamp)
spin_unlock
(
&
clp
->
cl_lock
);
}
static
inline
void
process_lease
(
struct
nfs4_compound
*
cp
)
{
/*
* Generic lease processing: If this operation contains a
* lease-renewing operation, and it succeeded, update the RENEW time
* in the superblock. Instead of the current time, we use the time
* when the request was sent out. (All we know is that the lease was
* renewed sometime between then and now, and we have to assume the
* worst case.)
*
* Notes:
* (1) renewd doesn't acquire the spinlock when messing with
* server->last_renewal; this is OK since rpciod always runs
* under the BKL.
* (2) cp->timestamp was set at the end of XDR encode.
*/
if
(
!
cp
->
renew_index
)
return
;
if
(
!
cp
->
toplevel_status
||
cp
->
resp_nops
>
cp
->
renew_index
)
renew_lease
(
cp
->
server
,
cp
->
timestamp
);
}
static
int
nfs4_call_compound
(
struct
nfs4_compound
*
cp
,
struct
rpc_cred
*
cred
,
int
flags
)
{
int
status
;
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_COMPOUND
],
.
rpc_argp
=
cp
,
.
rpc_resp
=
cp
,
.
rpc_cred
=
cred
,
};
status
=
rpc_call_sync
(
cp
->
server
->
client
,
&
msg
,
flags
);
if
(
!
status
)
process_lease
(
cp
);
return
status
;
}
static
inline
void
process_cinfo
(
struct
nfs4_change_info
*
info
,
struct
nfs_fattr
*
fattr
)
static
void
update_changeattr
(
struct
inode
*
inode
,
struct
nfs4_change_info
*
cinfo
)
{
BUG_ON
((
fattr
->
valid
&
NFS_ATTR_FATTR
)
==
0
);
BUG_ON
((
fattr
->
valid
&
NFS_ATTR_FATTR_V4
)
==
0
);
struct
nfs_inode
*
nfsi
=
NFS_I
(
inode
);
if
(
fattr
->
change_attr
==
info
->
after
)
{
fattr
->
pre_change_attr
=
info
->
before
;
fattr
->
valid
|=
NFS_ATTR_PRE_CHANGE
;
fattr
->
timestamp
=
jiffies
;
}
if
(
cinfo
->
before
==
nfsi
->
change_attr
&&
cinfo
->
atomic
)
nfsi
->
change_attr
=
cinfo
->
after
;
}
/*
...
...
@@ -487,12 +197,6 @@ nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
struct
nfs_fattr
fattr
=
{
.
valid
=
0
,
};
struct
nfs4_change_info
d_cinfo
;
struct
nfs4_getattr
f_getattr
=
{
.
gt_bmval
=
nfs4_fattr_bitmap
,
.
gt_attrs
=
&
fattr
,
};
struct
nfs_open_reclaimargs
o_arg
=
{
.
fh
=
NFS_FH
(
inode
),
.
seqid
=
sp
->
so_seqid
,
...
...
@@ -500,11 +204,10 @@ nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
.
share_access
=
state
->
state
,
.
clientid
=
server
->
nfs4_state
->
cl_clientid
,
.
claim
=
NFS4_OPEN_CLAIM_PREVIOUS
,
.
f_getattr
=
&
f_getattr
,
.
bitmask
=
server
->
attr_bitmask
,
};
struct
nfs_openres
o_res
=
{
.
cinfo
=
&
d_cinfo
,
.
f_getattr
=
&
f_getattr
,
.
f_attr
=
&
fattr
,
.
server
=
server
,
/* Grrr */
};
struct
rpc_message
msg
=
{
...
...
@@ -534,36 +237,21 @@ nfs4_do_open(struct inode *dir, struct qstr *name, int flags, struct iattr *satt
struct
nfs4_state
*
state
=
NULL
;
struct
nfs_server
*
server
=
NFS_SERVER
(
dir
);
struct
inode
*
inode
=
NULL
;
struct
nfs4_change_info
d_cinfo
;
int
status
;
struct
nfs_fattr
d_attr
=
{
.
valid
=
0
,
};
struct
nfs_fattr
f_attr
=
{
.
valid
=
0
,
};
struct
nfs4_getattr
f_getattr
=
{
.
gt_bmval
=
nfs4_fattr_bitmap
,
.
gt_attrs
=
&
f_attr
,
};
struct
nfs4_getattr
d_getattr
=
{
.
gt_bmval
=
nfs4_fattr_bitmap
,
.
gt_attrs
=
&
d_attr
,
};
struct
nfs_openargs
o_arg
=
{
.
fh
=
NFS_FH
(
dir
),
.
share_access
=
flags
&
(
FMODE_READ
|
FMODE_WRITE
),
.
opentype
=
(
flags
&
O_CREAT
)
?
NFS4_OPEN_CREATE
:
NFS4_OPEN_NOCREATE
,
.
createmode
=
(
flags
&
O_EXCL
)
?
NFS4_CREATE_EXCLUSIVE
:
NFS4_CREATE_UNCHECKED
,
.
name
=
name
,
.
f_getattr
=
&
f_getattr
,
.
d_getattr
=
&
d_getattr
,
.
server
=
server
,
.
bitmask
=
server
->
attr_bitmask
,
};
struct
nfs_openres
o_res
=
{
.
cinfo
=
&
d_cinfo
,
.
f_getattr
=
&
f_getattr
,
.
d_getattr
=
&
d_getattr
,
.
f_attr
=
&
f_attr
,
.
server
=
server
,
};
struct
rpc_message
msg
=
{
...
...
@@ -596,8 +284,7 @@ nfs4_do_open(struct inode *dir, struct qstr *name, int flags, struct iattr *satt
nfs4_increment_seqid
(
status
,
sp
);
if
(
status
)
goto
out_up
;
process_cinfo
(
&
d_cinfo
,
&
d_attr
);
nfs_refresh_inode
(
dir
,
&
d_attr
);
update_changeattr
(
dir
,
&
o_res
.
cinfo
);
status
=
-
ENOMEM
;
inode
=
nfs_fhget
(
dir
->
i_sb
,
&
o_res
.
fh
,
&
f_attr
);
...
...
@@ -679,18 +366,14 @@ nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
struct
nfs_fh
*
fhandle
,
struct
iattr
*
sattr
,
struct
nfs4_state
*
state
)
{
struct
nfs4_getattr
getattr
=
{
.
gt_bmval
=
nfs4_fattr_bitmap
,
.
gt_attrs
=
fattr
,
};
struct
nfs_setattrargs
arg
=
{
.
fh
=
fhandle
,
.
iap
=
sattr
,
.
attr
=
&
getattr
,
.
server
=
server
,
.
bitmask
=
server
->
attr_bitmask
,
};
struct
nfs_setattrres
res
=
{
.
attr
=
&
get
attr
,
.
fattr
=
f
attr
,
.
server
=
server
,
};
struct
rpc_message
msg
=
{
...
...
@@ -834,15 +517,73 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags)
return
0
;
}
static
int
nfs4_proc_get_root
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
static
int
nfs4_server_capabilities
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
)
{
struct
nfs4_server_caps_res
res
=
{};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_SERVER_CAPS
],
.
rpc_argp
=
fhandle
,
.
rpc_resp
=
&
res
,
};
int
status
;
status
=
rpc_call_sync
(
server
->
client
,
&
msg
,
0
);
if
(
status
==
0
)
{
memcpy
(
server
->
attr_bitmask
,
res
.
attr_bitmask
,
sizeof
(
server
->
attr_bitmask
));
if
(
res
.
attr_bitmask
[
0
]
&
FATTR4_WORD0_ACL
)
server
->
caps
|=
NFS_CAP_ACLS
;
if
(
res
.
has_links
!=
0
)
server
->
caps
|=
NFS_CAP_HARDLINKS
;
if
(
res
.
has_symlinks
!=
0
)
server
->
caps
|=
NFS_CAP_SYMLINKS
;
server
->
acl_bitmask
=
res
.
acl_bitmask
;
}
return
status
;
}
static
int
nfs4_lookup_root
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
struct
nfs_fsinfo
*
info
)
{
struct
nfs_fattr
*
fattr
=
info
->
fattr
;
struct
nfs4_lookup_root_arg
args
=
{
.
bitmask
=
nfs4_fattr_bitmap
,
};
struct
nfs4_lookup_res
res
=
{
.
server
=
server
,
.
fattr
=
fattr
,
.
fh
=
fhandle
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_LOOKUP_ROOT
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
&
res
,
};
fattr
->
valid
=
0
;
return
rpc_call_sync
(
server
->
client
,
&
msg
,
0
);
}
static
int
nfs4_proc_get_root
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
struct
nfs_fsinfo
*
info
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
4
];
struct
nfs_fattr
*
fattr
=
info
->
fattr
;
unsigned
char
*
p
;
struct
qstr
q
;
struct
nfs4_lookup_arg
args
=
{
.
dir_fh
=
fhandle
,
.
name
=
&
q
,
.
bitmask
=
nfs4_fattr_bitmap
,
};
struct
nfs4_lookup_res
res
=
{
.
server
=
server
,
.
fattr
=
fattr
,
.
fh
=
fhandle
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_LOOKUP
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
&
res
,
};
int
status
;
/*
...
...
@@ -850,14 +591,11 @@ nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
* The LOOKUPs are done separately so that we can conveniently
* catch an ERR_WRONGSEC if it occurs along the way...
*/
p
=
server
->
mnt_path
;
fattr
->
valid
=
0
;
nfs4_setup_compound
(
&
compound
,
ops
,
server
,
"getrootfh"
);
nfs4_setup_putrootfh
(
&
compound
);
nfs4_setup_getattr
(
&
compound
,
fattr
);
nfs4_setup_getfh
(
&
compound
,
fhandle
);
if
((
status
=
nfs4_call_compound
(
&
compound
,
NULL
,
0
)))
status
=
nfs4_lookup_root
(
server
,
fhandle
,
info
);
if
(
status
)
goto
out
;
p
=
server
->
mnt_path
;
for
(;;)
{
while
(
*
p
==
'/'
)
p
++
;
...
...
@@ -869,12 +607,7 @@ nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
q
.
len
=
p
-
q
.
name
;
fattr
->
valid
=
0
;
nfs4_setup_compound
(
&
compound
,
ops
,
server
,
"mount"
);
nfs4_setup_putfh
(
&
compound
,
fhandle
);
nfs4_setup_lookup
(
&
compound
,
&
q
);
nfs4_setup_getattr
(
&
compound
,
fattr
);
nfs4_setup_getfh
(
&
compound
,
fhandle
);
status
=
nfs4_call_compound
(
&
compound
,
NULL
,
0
);
status
=
rpc_call_sync
(
server
->
client
,
&
msg
,
0
);
if
(
!
status
)
continue
;
if
(
status
==
-
ENOENT
)
{
...
...
@@ -883,24 +616,34 @@ nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
}
break
;
}
if
(
status
==
0
)
status
=
nfs4_server_capabilities
(
server
,
fhandle
);
if
(
status
==
0
)
status
=
nfs4_do_fsinfo
(
server
,
fhandle
,
info
);
out:
if
(
status
)
return
nfs4_map_errors
(
status
);
return
nfs4_proc_fsinfo
(
server
,
fhandle
,
info
);
}
static
int
nfs4_proc_getattr
(
struct
inode
*
inode
,
struct
nfs_fattr
*
fattr
)
static
int
nfs4_proc_getattr
(
struct
inode
*
inode
,
struct
nfs_fattr
*
fattr
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
2
];
struct
nfs_server
*
server
=
NFS_SERVER
(
inode
);
struct
nfs4_getattr_arg
args
=
{
.
fh
=
NFS_FH
(
inode
),
.
bitmask
=
server
->
attr_bitmask
,
};
struct
nfs4_getattr_res
res
=
{
.
fattr
=
fattr
,
.
server
=
server
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_GETATTR
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
&
res
,
};
fattr
->
valid
=
0
;
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
inode
),
"getattr"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
inode
));
nfs4_setup_getattr
(
&
compound
,
fattr
);
return
nfs4_map_errors
(
nfs4_call_compound
(
&
compound
,
NULL
,
0
));
return
nfs4_map_errors
(
rpc_call_sync
(
NFS_CLIENT
(
inode
),
&
msg
,
0
));
}
/*
...
...
@@ -962,75 +705,72 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
return
status
;
}
static
int
nfs4_proc_lookup
(
struct
inode
*
dir
,
struct
qstr
*
name
,
static
int
nfs4_proc_lookup
(
struct
inode
*
dir
,
struct
qstr
*
name
,
struct
nfs_fh
*
fhandle
,
struct
nfs_fattr
*
fattr
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
5
];
struct
nfs_fattr
dir_attr
;
int
status
;
struct
nfs_server
*
server
=
NFS_SERVER
(
dir
);
struct
nfs4_lookup_arg
args
=
{
.
bitmask
=
server
->
attr_bitmask
,
.
dir_fh
=
NFS_FH
(
dir
),
.
name
=
name
,
};
struct
nfs4_lookup_res
res
=
{
.
server
=
server
,
.
fattr
=
fattr
,
.
fh
=
fhandle
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_LOOKUP
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
&
res
,
};
dir_attr
.
valid
=
0
;
fattr
->
valid
=
0
;
dprintk
(
"NFS call lookup %s
\n
"
,
name
->
name
);
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
dir
),
"lookup"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
dir
));
nfs4_setup_getattr
(
&
compound
,
&
dir_attr
);
nfs4_setup_lookup
(
&
compound
,
name
);
nfs4_setup_getattr
(
&
compound
,
fattr
);
nfs4_setup_getfh
(
&
compound
,
fhandle
);
status
=
nfs4_call_compound
(
&
compound
,
NULL
,
0
);
status
=
rpc_call_sync
(
NFS_CLIENT
(
dir
),
&
msg
,
0
);
dprintk
(
"NFS reply lookup: %d
\n
"
,
status
);
if
(
status
>=
0
)
status
=
nfs_refresh_inode
(
dir
,
&
dir_attr
);
return
nfs4_map_errors
(
status
);
}
static
int
nfs4_proc_access
(
struct
inode
*
inode
,
struct
rpc_cred
*
cred
,
int
mode
)
static
int
nfs4_proc_access
(
struct
inode
*
inode
,
struct
rpc_cred
*
cred
,
int
mode
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
3
];
struct
nfs_fattr
fattr
;
u32
req_access
=
0
,
resp_supported
,
resp_access
;
int
status
;
fattr
.
valid
=
0
;
struct
nfs4_accessargs
args
=
{
.
fh
=
NFS_FH
(
inode
),
};
struct
nfs4_accessres
res
=
{
0
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_ACCESS
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
&
res
,
.
rpc_cred
=
cred
,
};
/*
* Determine which access bits we want to ask for...
*/
if
(
mode
&
MAY_READ
)
req_
access
|=
NFS4_ACCESS_READ
;
args
.
access
|=
NFS4_ACCESS_READ
;
if
(
S_ISDIR
(
inode
->
i_mode
))
{
if
(
mode
&
MAY_WRITE
)
req_
access
|=
NFS4_ACCESS_MODIFY
|
NFS4_ACCESS_EXTEND
|
NFS4_ACCESS_DELETE
;
args
.
access
|=
NFS4_ACCESS_MODIFY
|
NFS4_ACCESS_EXTEND
|
NFS4_ACCESS_DELETE
;
if
(
mode
&
MAY_EXEC
)
req_
access
|=
NFS4_ACCESS_LOOKUP
;
args
.
access
|=
NFS4_ACCESS_LOOKUP
;
}
else
{
if
(
mode
&
MAY_WRITE
)
req_
access
|=
NFS4_ACCESS_MODIFY
|
NFS4_ACCESS_EXTEND
;
args
.
access
|=
NFS4_ACCESS_MODIFY
|
NFS4_ACCESS_EXTEND
;
if
(
mode
&
MAY_EXEC
)
req_
access
|=
NFS4_ACCESS_EXECUTE
;
args
.
access
|=
NFS4_ACCESS_EXECUTE
;
}
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
inode
),
"access"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
inode
));
nfs4_setup_getattr
(
&
compound
,
&
fattr
);
nfs4_setup_access
(
&
compound
,
req_access
,
&
resp_supported
,
&
resp_access
);
status
=
nfs4_call_compound
(
&
compound
,
cred
,
0
);
nfs_refresh_inode
(
inode
,
&
fattr
);
status
=
rpc_call_sync
(
NFS_CLIENT
(
inode
),
&
msg
,
0
);
if
(
!
status
)
{
if
(
req_access
!=
resp_
supported
)
{
if
(
args
.
access
!=
res
.
supported
)
{
printk
(
KERN_NOTICE
"NFS: server didn't support all access bits!
\n
"
);
status
=
-
ENOTSUPP
;
}
else
if
(
req_access
!=
resp_access
)
}
else
if
((
args
.
access
&
res
.
access
)
!=
args
.
access
)
status
=
-
EACCES
;
}
return
nfs4_map_errors
(
status
);
...
...
@@ -1060,16 +800,20 @@ nfs4_proc_access(struct inode *inode, struct rpc_cred *cred, int mode)
* Both of these changes to the XDR layer would in fact be quite
* minor, but I decided to leave them for a subsequent patch.
*/
static
int
nfs4_proc_readlink
(
struct
inode
*
inode
,
struct
page
*
page
)
static
int
nfs4_proc_readlink
(
struct
inode
*
inode
,
struct
page
*
page
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
2
];
struct
nfs4_readlink
args
=
{
.
fh
=
NFS_FH
(
inode
),
.
count
=
PAGE_CACHE_SIZE
,
.
pages
=
&
page
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_READLINK
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
NULL
,
};
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
inode
),
"readlink"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
inode
));
nfs4_setup_readlink
(
&
compound
,
PAGE_CACHE_SIZE
,
&
page
);
return
nfs4_map_errors
(
nfs4_call_compound
(
&
compound
,
NULL
,
0
));
return
nfs4_map_errors
(
rpc_call_sync
(
NFS_CLIENT
(
inode
),
&
msg
,
0
));
}
static
int
...
...
@@ -1096,10 +840,10 @@ nfs4_proc_read(struct nfs_read_data *rdata, struct file *filp)
if
(
filp
)
{
struct
nfs4_state
*
state
;
state
=
(
struct
nfs4_state
*
)
filp
->
private_data
;
nfs4_copy_stateid
(
&
rdata
->
args
.
stateid
,
state
,
rdata
->
lockowner
)
;
rdata
->
args
.
state
=
state
;
msg
.
rpc_cred
=
state
->
owner
->
so_cred
;
}
else
{
memcpy
(
&
rdata
->
args
.
stateid
,
&
zero_stateid
,
sizeof
(
rdata
->
args
.
stateid
))
;
rdata
->
args
.
state
=
NULL
;
msg
.
rpc_cred
=
NFS_I
(
inode
)
->
mm_cred
;
}
...
...
@@ -1134,10 +878,10 @@ nfs4_proc_write(struct nfs_write_data *wdata, struct file *filp)
if
(
filp
)
{
struct
nfs4_state
*
state
;
state
=
(
struct
nfs4_state
*
)
filp
->
private_data
;
nfs4_copy_stateid
(
&
wdata
->
args
.
stateid
,
state
,
wdata
->
lockowner
)
;
wdata
->
args
.
state
=
state
;
msg
.
rpc_cred
=
state
->
owner
->
so_cred
;
}
else
{
memcpy
(
&
wdata
->
args
.
stateid
,
&
zero_stateid
,
sizeof
(
wdata
->
args
.
stateid
))
;
wdata
->
args
.
state
=
NULL
;
msg
.
rpc_cred
=
NFS_I
(
inode
)
->
mm_cred
;
}
...
...
@@ -1166,15 +910,10 @@ nfs4_proc_commit(struct nfs_write_data *cdata, struct file *filp)
/*
* Try first to use O_WRONLY, then O_RDWR stateid.
*/
if
(
filp
)
{
struct
nfs4_state
*
state
;
state
=
(
struct
nfs4_state
*
)
filp
->
private_data
;
nfs4_copy_stateid
(
&
cdata
->
args
.
stateid
,
state
,
cdata
->
lockowner
);
msg
.
rpc_cred
=
state
->
owner
->
so_cred
;
}
else
{
memcpy
(
&
cdata
->
args
.
stateid
,
&
zero_stateid
,
sizeof
(
cdata
->
args
.
stateid
));
if
(
filp
)
msg
.
rpc_cred
=
((
struct
nfs4_state
*
)
filp
->
private_data
)
->
owner
->
so_cred
;
else
msg
.
rpc_cred
=
NFS_I
(
inode
)
->
mm_cred
;
}
fattr
->
valid
=
0
;
status
=
rpc_call_sync
(
server
->
client
,
&
msg
,
0
);
...
...
@@ -1226,301 +965,318 @@ nfs4_proc_create(struct inode *dir, struct qstr *name, struct iattr *sattr,
return
inode
;
}
static
int
nfs4_proc_remove
(
struct
inode
*
dir
,
struct
qstr
*
name
)
static
int
nfs4_proc_remove
(
struct
inode
*
dir
,
struct
qstr
*
name
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
3
];
struct
nfs4_change_info
dir_cinfo
;
struct
nfs_fattr
dir_attr
;
struct
nfs4_remove_arg
args
=
{
.
fh
=
NFS_FH
(
dir
),
.
name
=
name
,
};
struct
nfs4_change_info
res
;
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_REMOVE
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
&
res
,
};
int
status
;
dir_attr
.
valid
=
0
;
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
dir
),
"remove"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
dir
));
nfs4_setup_remove
(
&
compound
,
name
,
&
dir_cinfo
);
nfs4_setup_getattr
(
&
compound
,
&
dir_attr
);
status
=
nfs4_call_compound
(
&
compound
,
NULL
,
0
);
if
(
!
status
)
{
process_cinfo
(
&
dir_cinfo
,
&
dir_attr
);
nfs_refresh_inode
(
dir
,
&
dir_attr
);
}
status
=
rpc_call_sync
(
NFS_CLIENT
(
dir
),
&
msg
,
0
);
if
(
status
==
0
)
update_changeattr
(
dir
,
&
res
);
return
nfs4_map_errors
(
status
);
}
struct
unlink_desc
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
3
];
struct
nfs4_change_info
cinfo
;
struct
nfs_fattr
attrs
;
struct
nfs4_remove_arg
args
;
struct
nfs4_change_info
res
;
};
static
int
nfs4_proc_unlink_setup
(
struct
rpc_message
*
msg
,
struct
dentry
*
dir
,
struct
qstr
*
name
)
static
int
nfs4_proc_unlink_setup
(
struct
rpc_message
*
msg
,
struct
dentry
*
dir
,
struct
qstr
*
name
)
{
struct
unlink_desc
*
up
;
struct
nfs4_compound
*
cp
;
struct
unlink_desc
*
up
;
up
=
(
struct
unlink_desc
*
)
kmalloc
(
sizeof
(
*
up
),
GFP_KERNEL
);
if
(
!
up
)
return
-
ENOMEM
;
cp
=
&
up
->
compound
;
nfs4_setup_compound
(
cp
,
up
->
ops
,
NFS_SERVER
(
dir
->
d_inode
),
"unlink_setup"
);
nfs4_setup_putfh
(
cp
,
NFS_FH
(
dir
->
d_inode
));
nfs4_setup_remove
(
cp
,
name
,
&
up
->
cinfo
);
nfs4_setup_getattr
(
cp
,
&
up
->
attrs
);
up
->
args
.
fh
=
NFS_FH
(
dir
->
d_inode
);
up
->
args
.
name
=
name
;
msg
->
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_
COMPOUND
];
msg
->
rpc_argp
=
cp
;
msg
->
rpc_resp
=
cp
;
msg
->
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_
REMOVE
];
msg
->
rpc_argp
=
&
up
->
args
;
msg
->
rpc_resp
=
&
up
->
res
;
return
0
;
}
static
int
nfs4_proc_unlink_done
(
struct
dentry
*
dir
,
struct
rpc_task
*
task
)
static
int
nfs4_proc_unlink_done
(
struct
dentry
*
dir
,
struct
rpc_task
*
task
)
{
struct
rpc_message
*
msg
=
&
task
->
tk_msg
;
struct
unlink_desc
*
up
;
if
(
msg
->
rpc_argp
)
{
up
=
(
struct
unlink_desc
*
)
msg
->
rpc_argp
;
process_lease
(
&
up
->
compound
);
process_cinfo
(
&
up
->
cinfo
,
&
up
->
attrs
);
nfs_refresh_inode
(
dir
->
d_inode
,
&
up
->
attrs
);
if
(
msg
->
rpc_resp
!=
NULL
)
{
up
=
container_of
(
msg
->
rpc_resp
,
struct
unlink_desc
,
res
);
update_changeattr
(
dir
->
d_inode
,
&
up
->
res
);
kfree
(
up
);
msg
->
rpc_resp
=
NULL
;
msg
->
rpc_argp
=
NULL
;
}
return
0
;
}
static
int
nfs4_proc_rename
(
struct
inode
*
old_dir
,
struct
qstr
*
old_name
,
static
int
nfs4_proc_rename
(
struct
inode
*
old_dir
,
struct
qstr
*
old_name
,
struct
inode
*
new_dir
,
struct
qstr
*
new_name
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
7
];
struct
nfs4_change_info
old_cinfo
,
new_cinfo
;
struct
nfs_fattr
old_dir_attr
,
new_dir_attr
;
struct
nfs4_rename_arg
arg
=
{
.
old_dir
=
NFS_FH
(
old_dir
),
.
new_dir
=
NFS_FH
(
new_dir
),
.
old_name
=
old_name
,
.
new_name
=
new_name
,
};
struct
nfs4_rename_res
res
=
{
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_RENAME
],
.
rpc_argp
=
&
arg
,
.
rpc_resp
=
&
res
,
};
int
status
;
old_dir_attr
.
valid
=
0
;
new_dir_attr
.
valid
=
0
;
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
old_dir
),
"rename"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
old_dir
));
nfs4_setup_savefh
(
&
compound
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
new_dir
));
nfs4_setup_rename
(
&
compound
,
old_name
,
new_name
,
&
old_cinfo
,
&
new_cinfo
);
nfs4_setup_getattr
(
&
compound
,
&
new_dir_attr
);
nfs4_setup_restorefh
(
&
compound
);
nfs4_setup_getattr
(
&
compound
,
&
old_dir_attr
);
status
=
nfs4_call_compound
(
&
compound
,
NULL
,
0
);
status
=
rpc_call_sync
(
NFS_CLIENT
(
old_dir
),
&
msg
,
0
);
if
(
!
status
)
{
process_cinfo
(
&
old_cinfo
,
&
old_dir_attr
);
process_cinfo
(
&
new_cinfo
,
&
new_dir_attr
);
nfs_refresh_inode
(
old_dir
,
&
old_dir_attr
);
nfs_refresh_inode
(
new_dir
,
&
new_dir_attr
);
update_changeattr
(
old_dir
,
&
res
.
old_cinfo
);
update_changeattr
(
new_dir
,
&
res
.
new_cinfo
);
}
return
nfs4_map_errors
(
status
);
}
static
int
nfs4_proc_link
(
struct
inode
*
inode
,
struct
inode
*
dir
,
struct
qstr
*
name
)
static
int
nfs4_proc_link
(
struct
inode
*
inode
,
struct
inode
*
dir
,
struct
qstr
*
name
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
7
];
struct
nfs4_change_info
dir_cinfo
;
struct
nfs_fattr
dir_attr
,
fattr
;
struct
nfs4_link_arg
arg
=
{
.
fh
=
NFS_FH
(
inode
),
.
dir_fh
=
NFS_FH
(
dir
),
.
name
=
name
,
};
struct
nfs4_change_info
cinfo
=
{
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_LINK
],
.
rpc_argp
=
&
arg
,
.
rpc_resp
=
&
cinfo
,
};
int
status
;
dir_attr
.
valid
=
0
;
fattr
.
valid
=
0
;
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
inode
),
"link"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
inode
));
nfs4_setup_savefh
(
&
compound
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
dir
));
nfs4_setup_link
(
&
compound
,
name
,
&
dir_cinfo
);
nfs4_setup_getattr
(
&
compound
,
&
dir_attr
);
nfs4_setup_restorefh
(
&
compound
);
nfs4_setup_getattr
(
&
compound
,
&
fattr
);
status
=
nfs4_call_compound
(
&
compound
,
NULL
,
0
);
status
=
rpc_call_sync
(
NFS_CLIENT
(
inode
),
&
msg
,
0
);
if
(
!
status
)
update_changeattr
(
dir
,
&
cinfo
);
if
(
!
status
)
{
process_cinfo
(
&
dir_cinfo
,
&
dir_attr
);
nfs_refresh_inode
(
dir
,
&
dir_attr
);
nfs_refresh_inode
(
inode
,
&
fattr
);
}
return
nfs4_map_errors
(
status
);
}
static
int
nfs4_proc_symlink
(
struct
inode
*
dir
,
struct
qstr
*
name
,
struct
qstr
*
path
,
struct
iattr
*
sattr
,
struct
nfs_fh
*
fhandle
,
static
int
nfs4_proc_symlink
(
struct
inode
*
dir
,
struct
qstr
*
name
,
struct
qstr
*
path
,
struct
iattr
*
sattr
,
struct
nfs_fh
*
fhandle
,
struct
nfs_fattr
*
fattr
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
7
];
struct
nfs_fattr
dir_attr
;
struct
nfs4_change_info
dir_cinfo
;
struct
nfs_server
*
server
=
NFS_SERVER
(
dir
);
struct
nfs4_create_arg
arg
=
{
.
dir_fh
=
NFS_FH
(
dir
),
.
server
=
server
,
.
name
=
name
,
.
attrs
=
sattr
,
.
ftype
=
NF4LNK
,
.
bitmask
=
server
->
attr_bitmask
,
};
struct
nfs4_create_res
res
=
{
.
server
=
server
,
.
fh
=
fhandle
,
.
fattr
=
fattr
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_CREATE
],
.
rpc_argp
=
&
arg
,
.
rpc_resp
=
&
res
,
};
int
status
;
dir_attr
.
valid
=
0
;
arg
.
u
.
symlink
=
path
;
fattr
->
valid
=
0
;
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
dir
),
"symlink"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
dir
));
nfs4_setup_savefh
(
&
compound
);
nfs4_setup_create_symlink
(
&
compound
,
name
,
path
,
sattr
,
&
dir_cinfo
);
nfs4_setup_getattr
(
&
compound
,
fattr
);
nfs4_setup_getfh
(
&
compound
,
fhandle
);
nfs4_setup_restorefh
(
&
compound
);
nfs4_setup_getattr
(
&
compound
,
&
dir_attr
);
status
=
nfs4_call_compound
(
&
compound
,
NULL
,
0
);
if
(
!
status
)
{
process_cinfo
(
&
dir_cinfo
,
&
dir_attr
);
nfs_refresh_inode
(
dir
,
&
dir_attr
);
}
status
=
rpc_call_sync
(
NFS_CLIENT
(
dir
),
&
msg
,
0
);
if
(
!
status
)
update_changeattr
(
dir
,
&
res
.
dir_cinfo
);
return
nfs4_map_errors
(
status
);
}
static
int
nfs4_proc_mkdir
(
struct
inode
*
dir
,
struct
qstr
*
name
,
struct
iattr
*
sattr
,
struct
nfs_f
h
*
fhandle
,
struct
nfs_f
attr
*
fattr
)
static
int
nfs4_proc_mkdir
(
struct
inode
*
dir
,
struct
qstr
*
name
,
struct
iattr
*
sattr
,
struct
nfs_fh
*
fhandle
,
struct
nfs_fattr
*
fattr
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
7
];
struct
nfs_fattr
dir_attr
;
struct
nfs4_change_info
dir_cinfo
;
struct
nfs_server
*
server
=
NFS_SERVER
(
dir
);
struct
nfs4_create_arg
arg
=
{
.
dir_fh
=
NFS_FH
(
dir
),
.
server
=
server
,
.
name
=
name
,
.
attrs
=
sattr
,
.
ftype
=
NF4DIR
,
.
bitmask
=
server
->
attr_bitmask
,
};
struct
nfs4_create_res
res
=
{
.
server
=
server
,
.
fh
=
fhandle
,
.
fattr
=
fattr
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_CREATE
],
.
rpc_argp
=
&
arg
,
.
rpc_resp
=
&
res
,
};
int
status
;
dir_attr
.
valid
=
0
;
fattr
->
valid
=
0
;
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
dir
),
"mkdir"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
dir
));
nfs4_setup_savefh
(
&
compound
);
nfs4_setup_create_dir
(
&
compound
,
name
,
sattr
,
&
dir_cinfo
);
nfs4_setup_getattr
(
&
compound
,
fattr
);
nfs4_setup_getfh
(
&
compound
,
fhandle
);
nfs4_setup_restorefh
(
&
compound
);
nfs4_setup_getattr
(
&
compound
,
&
dir_attr
);
status
=
nfs4_call_compound
(
&
compound
,
NULL
,
0
);
if
(
!
status
)
{
process_cinfo
(
&
dir_cinfo
,
&
dir_attr
);
nfs_refresh_inode
(
dir
,
&
dir_attr
);
}
status
=
rpc_call_sync
(
NFS_CLIENT
(
dir
),
&
msg
,
0
);
if
(
!
status
)
update_changeattr
(
dir
,
&
res
.
dir_cinfo
);
return
nfs4_map_errors
(
status
);
}
static
int
nfs4_proc_readdir
(
struct
dentry
*
dentry
,
struct
rpc_cred
*
cred
,
static
int
nfs4_proc_readdir
(
struct
dentry
*
dentry
,
struct
rpc_cred
*
cred
,
u64
cookie
,
struct
page
*
page
,
unsigned
int
count
,
int
plus
)
{
struct
inode
*
dir
=
dentry
->
d_inode
;
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
2
];
struct
nfs4_readdir_arg
args
=
{
.
fh
=
NFS_FH
(
dir
),
.
pages
=
&
page
,
.
pgbase
=
0
,
.
count
=
count
,
};
struct
nfs4_readdir_res
res
;
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_READDIR
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
&
res
,
.
rpc_cred
=
cred
,
};
int
status
;
lock_kernel
();
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
dir
),
"readdir"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
dir
));
nfs4_setup_readdir
(
&
compound
,
cookie
,
NFS_COOKIEVERF
(
dir
),
&
page
,
count
,
dentry
);
status
=
nfs4_call_compound
(
&
compound
,
cred
,
0
);
nfs4_setup_readdir
(
cookie
,
NFS_COOKIEVERF
(
dir
),
dentry
,
&
args
);
res
.
pgbase
=
args
.
pgbase
;
status
=
rpc_call_sync
(
NFS_CLIENT
(
dir
),
&
msg
,
0
);
if
(
status
==
0
)
memcpy
(
NFS_COOKIEVERF
(
dir
),
ops
[
1
].
u
.
readdir
.
rd_resp_verifier
.
data
,
NFS4_VERIFIER_SIZE
);
memcpy
(
NFS_COOKIEVERF
(
dir
),
res
.
verifier
.
data
,
NFS4_VERIFIER_SIZE
);
unlock_kernel
();
return
nfs4_map_errors
(
status
);
}
static
int
nfs4_proc_mknod
(
struct
inode
*
dir
,
struct
qstr
*
name
,
struct
iattr
*
sattr
,
dev_t
rdev
,
struct
nfs_fh
*
fh
,
struct
nfs_fattr
*
fattr
)
static
int
nfs4_proc_mknod
(
struct
inode
*
dir
,
struct
qstr
*
name
,
struct
iattr
*
sattr
,
dev_t
rdev
,
struct
nfs_fh
*
fh
,
struct
nfs_fattr
*
fattr
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
7
];
struct
nfs_fattr
dir_attr
;
struct
nfs4_change_info
dir_cinfo
;
struct
nfs_server
*
server
=
NFS_SERVER
(
dir
);
struct
nfs4_create_arg
arg
=
{
.
dir_fh
=
NFS_FH
(
dir
),
.
server
=
server
,
.
name
=
name
,
.
attrs
=
sattr
,
.
bitmask
=
server
->
attr_bitmask
,
};
struct
nfs4_create_res
res
=
{
.
server
=
server
,
.
fh
=
fh
,
.
fattr
=
fattr
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_CREATE
],
.
rpc_argp
=
&
arg
,
.
rpc_resp
=
&
res
,
};
int
status
;
int
mode
=
sattr
->
ia_mode
;
dir_attr
.
valid
=
0
;
fattr
->
valid
=
0
;
nfs4_setup_compound
(
&
compound
,
ops
,
NFS_SERVER
(
dir
),
"mknod"
);
nfs4_setup_putfh
(
&
compound
,
NFS_FH
(
dir
));
nfs4_setup_savefh
(
&
compound
);
nfs4_setup_create_special
(
&
compound
,
name
,
rdev
,
sattr
,
&
dir_cinfo
);
nfs4_setup_getattr
(
&
compound
,
fattr
);
nfs4_setup_getfh
(
&
compound
,
fh
);
nfs4_setup_restorefh
(
&
compound
);
nfs4_setup_getattr
(
&
compound
,
&
dir_attr
);
status
=
nfs4_call_compound
(
&
compound
,
NULL
,
0
);
if
(
!
status
)
{
process_cinfo
(
&
dir_cinfo
,
&
dir_attr
);
nfs_refresh_inode
(
dir
,
&
dir_attr
);
BUG_ON
(
!
(
sattr
->
ia_valid
&
ATTR_MODE
));
BUG_ON
(
!
S_ISFIFO
(
mode
)
&&
!
S_ISBLK
(
mode
)
&&
!
S_ISCHR
(
mode
)
&&
!
S_ISSOCK
(
mode
));
if
(
S_ISFIFO
(
mode
))
arg
.
ftype
=
NF4FIFO
;
else
if
(
S_ISBLK
(
mode
))
{
arg
.
ftype
=
NF4BLK
;
arg
.
u
.
device
.
specdata1
=
MAJOR
(
rdev
);
arg
.
u
.
device
.
specdata2
=
MINOR
(
rdev
);
}
else
if
(
S_ISCHR
(
mode
))
{
arg
.
ftype
=
NF4CHR
;
arg
.
u
.
device
.
specdata1
=
MAJOR
(
rdev
);
arg
.
u
.
device
.
specdata2
=
MINOR
(
rdev
);
}
else
arg
.
ftype
=
NF4SOCK
;
status
=
rpc_call_sync
(
NFS_CLIENT
(
dir
),
&
msg
,
0
);
if
(
!
status
)
update_changeattr
(
dir
,
&
res
.
dir_cinfo
);
return
nfs4_map_errors
(
status
);
}
static
int
nfs4_proc_statfs
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
static
int
nfs4_proc_statfs
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
struct
nfs_fsstat
*
fsstat
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
2
];
struct
nfs4_statfs_arg
args
=
{
.
fh
=
fhandle
,
.
bitmask
=
server
->
attr_bitmask
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_STATFS
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
fsstat
,
};
nfs4_setup_compound
(
&
compound
,
ops
,
server
,
"statfs"
);
nfs4_setup_putfh
(
&
compound
,
fhandle
);
nfs4_setup_statfs
(
&
compound
,
fsstat
);
return
nfs4_map_errors
(
nfs4_call_compound
(
&
compound
,
NULL
,
0
));
fsstat
->
fattr
->
valid
=
0
;
return
nfs4_map_errors
(
rpc_call_sync
(
server
->
client
,
&
msg
,
0
));
}
static
int
nfs4_proc_fsinfo
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
static
int
nfs4_do_fsinfo
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
struct
nfs_fsinfo
*
fsinfo
)
{
struct
nfs4_fsinfo_arg
args
=
{
.
fh
=
fhandle
,
.
bitmask
=
server
->
attr_bitmask
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_FSINFO
],
.
rpc_argp
=
fhandle
,
.
rpc_argp
=
&
args
,
.
rpc_resp
=
fsinfo
,
};
return
nfs4_map_errors
(
rpc_call_sync
(
server
->
client
,
&
msg
,
0
));
}
static
int
nfs4_proc_pathconf
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
struct
nfs_pathconf
*
pathconf
)
static
int
nfs4_proc_fsinfo
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
struct
nfs_fsinfo
*
fsinfo
)
{
struct
nfs4_compound
compound
;
struct
nfs4_op
ops
[
2
];
nfs4_setup_compound
(
&
compound
,
ops
,
server
,
"statfs"
);
nfs4_setup_putfh
(
&
compound
,
fhandle
);
nfs4_setup_pathconf
(
&
compound
,
pathconf
);
return
nfs4_map_errors
(
nfs4_call_compound
(
&
compound
,
NULL
,
0
));
fsinfo
->
fattr
->
valid
=
0
;
return
nfs4_map_errors
(
nfs4_do_fsinfo
(
server
,
fhandle
,
fsinfo
));
}
static
void
nfs4_restart_read
(
struct
rpc_task
*
task
)
static
int
nfs4_proc_pathconf
(
struct
nfs_server
*
server
,
struct
nfs_fh
*
fhandle
,
struct
nfs_pathconf
*
pathconf
)
{
struct
nfs_read_data
*
data
=
(
struct
nfs_read_data
*
)
task
->
tk_calldata
;
struct
nfs_page
*
req
;
struct
nfs4_pathconf_arg
args
=
{
.
fh
=
fhandle
,
.
bitmask
=
server
->
attr_bitmask
,
};
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs4_procedures
[
NFSPROC4_CLNT_PATHCONF
],
.
rpc_argp
=
&
args
,
.
rpc_resp
=
pathconf
,
};
rpc_restart_call
(
task
);
req
=
nfs_list_entry
(
data
->
pages
.
next
);
if
(
req
->
wb_state
)
nfs4_copy_stateid
(
&
data
->
args
.
stateid
,
req
->
wb_state
,
req
->
wb_lockowner
);
else
memcpy
(
&
data
->
args
.
stateid
,
&
zero_stateid
,
sizeof
(
data
->
args
.
stateid
));
/* None of the pathconf attributes are mandatory to implement */
if
((
args
.
bitmask
[
0
]
&
nfs4_pathconf_bitmap
[
0
])
==
0
)
{
memset
(
pathconf
,
0
,
sizeof
(
*
pathconf
));
return
0
;
}
pathconf
->
fattr
->
valid
=
0
;
return
nfs4_map_errors
(
rpc_call_sync
(
server
->
client
,
&
msg
,
0
));
}
static
void
...
...
@@ -1530,7 +1286,7 @@ nfs4_read_done(struct rpc_task *task)
struct
inode
*
inode
=
data
->
inode
;
if
(
nfs4_async_handle_error
(
task
,
NFS_SERVER
(
inode
))
==
-
EAGAIN
)
{
task
->
tk_action
=
nfs4_restart_read
;
rpc_restart_call
(
task
)
;
return
;
}
if
(
task
->
tk_status
>
0
)
...
...
@@ -1540,7 +1296,7 @@ nfs4_read_done(struct rpc_task *task)
}
static
void
nfs4_proc_read_setup
(
struct
nfs_read_data
*
data
,
unsigned
int
count
)
nfs4_proc_read_setup
(
struct
nfs_read_data
*
data
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
rpc_message
msg
=
{
...
...
@@ -1550,51 +1306,18 @@ nfs4_proc_read_setup(struct nfs_read_data *data, unsigned int count)
.
rpc_cred
=
data
->
cred
,
};
struct
inode
*
inode
=
data
->
inode
;
struct
nfs_page
*
req
=
nfs_list_entry
(
data
->
pages
.
next
);
int
flags
;
data
->
args
.
fh
=
NFS_FH
(
inode
);
data
->
args
.
offset
=
req_offset
(
req
);
data
->
args
.
pgbase
=
req
->
wb_pgbase
;
data
->
args
.
pages
=
data
->
pagevec
;
data
->
args
.
count
=
count
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
count
=
count
;
data
->
res
.
eof
=
0
;
data
->
timestamp
=
jiffies
;
data
->
lockowner
=
req
->
wb_lockowner
;
if
(
req
->
wb_state
)
nfs4_copy_stateid
(
&
data
->
args
.
stateid
,
req
->
wb_state
,
req
->
wb_lockowner
);
else
memcpy
(
&
data
->
args
.
stateid
,
&
zero_stateid
,
sizeof
(
data
->
args
.
stateid
));
/* N.B. Do we need to test? Never called for swapfile inode */
flags
=
RPC_TASK_ASYNC
|
(
IS_SWAPFILE
(
inode
)
?
NFS_RPC_SWAPFLAGS
:
0
);
/* Finalize the task. */
rpc_init_task
(
task
,
NFS_CLIENT
(
inode
),
nfs4_read_done
,
flags
);
task
->
tk_calldata
=
data
;
/* Release requests */
task
->
tk_release
=
nfs_readdata_release
;
rpc_call_setup
(
task
,
&
msg
,
0
);
}
static
void
nfs4_restart_write
(
struct
rpc_task
*
task
)
{
struct
nfs_write_data
*
data
=
(
struct
nfs_write_data
*
)
task
->
tk_calldata
;
struct
nfs_page
*
req
;
rpc_restart_call
(
task
);
req
=
nfs_list_entry
(
data
->
pages
.
next
);
if
(
req
->
wb_state
)
nfs4_copy_stateid
(
&
data
->
args
.
stateid
,
req
->
wb_state
,
req
->
wb_lockowner
);
else
memcpy
(
&
data
->
args
.
stateid
,
&
zero_stateid
,
sizeof
(
data
->
args
.
stateid
));
}
static
void
nfs4_write_done
(
struct
rpc_task
*
task
)
{
...
...
@@ -1602,7 +1325,7 @@ nfs4_write_done(struct rpc_task *task)
struct
inode
*
inode
=
data
->
inode
;
if
(
nfs4_async_handle_error
(
task
,
NFS_SERVER
(
inode
))
==
-
EAGAIN
)
{
task
->
tk_action
=
nfs4_restart_write
;
rpc_restart_call
(
task
)
;
return
;
}
if
(
task
->
tk_status
>=
0
)
...
...
@@ -1612,7 +1335,7 @@ nfs4_write_done(struct rpc_task *task)
}
static
void
nfs4_proc_write_setup
(
struct
nfs_write_data
*
data
,
unsigned
int
count
,
int
how
)
nfs4_proc_write_setup
(
struct
nfs_write_data
*
data
,
int
how
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
rpc_message
msg
=
{
...
...
@@ -1622,7 +1345,6 @@ nfs4_proc_write_setup(struct nfs_write_data *data, unsigned int count, int how)
.
rpc_cred
=
data
->
cred
,
};
struct
inode
*
inode
=
data
->
inode
;
struct
nfs_page
*
req
=
nfs_list_entry
(
data
->
pages
.
next
);
int
stable
;
int
flags
;
...
...
@@ -1633,33 +1355,15 @@ nfs4_proc_write_setup(struct nfs_write_data *data, unsigned int count, int how)
stable
=
NFS_DATA_SYNC
;
}
else
stable
=
NFS_UNSTABLE
;
data
->
args
.
fh
=
NFS_FH
(
inode
);
data
->
args
.
offset
=
req_offset
(
req
);
data
->
args
.
pgbase
=
req
->
wb_pgbase
;
data
->
args
.
count
=
count
;
data
->
args
.
stable
=
stable
;
data
->
args
.
pages
=
data
->
pagevec
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
count
=
count
;
data
->
res
.
verf
=
&
data
->
verf
;
data
->
timestamp
=
jiffies
;
data
->
lockowner
=
req
->
wb_lockowner
;
if
(
req
->
wb_state
)
nfs4_copy_stateid
(
&
data
->
args
.
stateid
,
req
->
wb_state
,
req
->
wb_lockowner
);
else
memcpy
(
&
data
->
args
.
stateid
,
&
zero_stateid
,
sizeof
(
data
->
args
.
stateid
));
data
->
timestamp
=
jiffies
;
/* Set the initial flags for the task. */
flags
=
(
how
&
FLUSH_SYNC
)
?
0
:
RPC_TASK_ASYNC
;
/* Finalize the task. */
rpc_init_task
(
task
,
NFS_CLIENT
(
inode
),
nfs4_write_done
,
flags
);
task
->
tk_calldata
=
data
;
/* Release requests */
task
->
tk_release
=
nfs_writedata_release
;
rpc_call_setup
(
task
,
&
msg
,
0
);
}
...
...
@@ -1670,7 +1374,7 @@ nfs4_commit_done(struct rpc_task *task)
struct
inode
*
inode
=
data
->
inode
;
if
(
nfs4_async_handle_error
(
task
,
NFS_SERVER
(
inode
))
==
-
EAGAIN
)
{
task
->
tk_action
=
nfs4_restart_write
;
rpc_restart_call
(
task
)
;
return
;
}
/* Call back common NFS writeback processing */
...
...
@@ -1678,7 +1382,7 @@ nfs4_commit_done(struct rpc_task *task)
}
static
void
nfs4_proc_commit_setup
(
struct
nfs_write_data
*
data
,
u64
start
,
u32
len
,
int
how
)
nfs4_proc_commit_setup
(
struct
nfs_write_data
*
data
,
int
how
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
rpc_message
msg
=
{
...
...
@@ -1690,22 +1394,11 @@ nfs4_proc_commit_setup(struct nfs_write_data *data, u64 start, u32 len, int how)
struct
inode
*
inode
=
data
->
inode
;
int
flags
;
data
->
args
.
fh
=
NFS_FH
(
data
->
inode
);
data
->
args
.
offset
=
start
;
data
->
args
.
count
=
len
;
data
->
res
.
count
=
len
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
verf
=
&
data
->
verf
;
/* Set the initial flags for the task. */
flags
=
(
how
&
FLUSH_SYNC
)
?
0
:
RPC_TASK_ASYNC
;
/* Finalize the task. */
rpc_init_task
(
task
,
NFS_CLIENT
(
inode
),
nfs4_commit_done
,
flags
);
task
->
tk_calldata
=
data
;
/* Release requests */
task
->
tk_release
=
nfs_commit_release
;
rpc_call_setup
(
task
,
&
msg
,
0
);
}
...
...
fs/nfs/nfs4state.c
View file @
193ce3be
...
...
@@ -105,7 +105,7 @@ nfs4_alloc_client(struct in_addr *addr)
INIT_WORK
(
&
clp
->
cl_renewd
,
nfs4_renew_state
,
clp
);
INIT_LIST_HEAD
(
&
clp
->
cl_superblocks
);
init_waitqueue_head
(
&
clp
->
cl_waitq
);
INIT_RPC_WAITQ
(
&
clp
->
cl_rpcwaitq
,
"NFS4 client"
);
rpc_init_wait_queue
(
&
clp
->
cl_rpcwaitq
,
"NFS4 client"
);
clp
->
cl_state
=
1
<<
NFS4CLNT_NEW
;
}
return
clp
;
...
...
fs/nfs/nfs4xdr.c
View file @
193ce3be
This source diff could not be displayed because it is too large. You can
view the blob
instead.
fs/nfs/nfsroot.c
View file @
193ce3be
...
...
@@ -66,7 +66,8 @@
* is NOT for the length of the hostname.
* Hua Qin : Support for mounting root file system via
* NFS over TCP.
*/
* Fabian Frederick: Option parser rebuilt (using parser lib)
*/
#include <linux/config.h>
#include <linux/types.h>
...
...
@@ -85,6 +86,7 @@
#include <linux/inet.h>
#include <linux/root_dev.h>
#include <net/ipconfig.h>
#include <linux/parser.h>
/* Define this to allow debugging output */
#undef NFSROOT_DEBUG
...
...
@@ -114,92 +116,158 @@ static int mount_port __initdata = 0; /* Mount daemon port number */
***************************************************************************/
/*
* The following integer options are recognized
*/
static
struct
nfs_int_opts
{
char
*
name
;
int
*
val
;
}
root_int_opts
[]
__initdata
=
{
{
"port"
,
&
nfs_port
},
{
"rsize"
,
&
nfs_data
.
rsize
},
{
"wsize"
,
&
nfs_data
.
wsize
},
{
"timeo"
,
&
nfs_data
.
timeo
},
{
"retrans"
,
&
nfs_data
.
retrans
},
{
"acregmin"
,
&
nfs_data
.
acregmin
},
{
"acregmax"
,
&
nfs_data
.
acregmax
},
{
"acdirmin"
,
&
nfs_data
.
acdirmin
},
{
"acdirmax"
,
&
nfs_data
.
acdirmax
},
{
NULL
,
NULL
}
enum
{
Opt_port
,
Opt_rsize
,
Opt_wsize
,
Opt_timeo
,
Opt_retrans
,
Opt_acregmin
,
Opt_acregmax
,
Opt_acdirmin
,
Opt_acdirmax
,
Opt_soft
,
Opt_hard
,
Opt_intr
,
Opt_nointr
,
Opt_posix
,
Opt_noposix
,
Opt_cto
,
Opt_nocto
,
Opt_ac
,
Opt_noac
,
Opt_lock
,
Opt_nolock
,
Opt_v2
,
Opt_v3
,
Opt_udp
,
Opt_tcp
,
Opt_broken_suid
,
Opt_err
,
};
static
match_table_t
tokens
=
{
{
Opt_port
,
"port=%u"
},
{
Opt_rsize
,
"rsize=%u"
},
{
Opt_wsize
,
"wsize=%u"
},
{
Opt_timeo
,
"timeo=%u"
},
{
Opt_retrans
,
"retrans=%u"
},
{
Opt_acregmin
,
"acregmin=%u"
},
{
Opt_acregmax
,
"acregmax=%u"
},
{
Opt_acdirmin
,
"acdirmin=%u"
},
{
Opt_acdirmax
,
"acdirmax=%u"
},
{
Opt_soft
,
"soft"
},
{
Opt_hard
,
"hard"
},
{
Opt_intr
,
"intr"
},
{
Opt_nointr
,
"nointr"
},
{
Opt_posix
,
"posix"
},
{
Opt_noposix
,
"noposix"
},
{
Opt_cto
,
"cto"
},
{
Opt_nocto
,
"nocto"
},
{
Opt_ac
,
"ac"
},
{
Opt_noac
,
"noac"
},
{
Opt_lock
,
"lock"
},
{
Opt_nolock
,
"nolock"
},
{
Opt_v2
,
"v2"
},
{
Opt_v3
,
"v3"
},
{
Opt_udp
,
"udp"
},
{
Opt_tcp
,
"tcp"
},
{
Opt_broken_suid
,
"broken_suid"
},
{
Opt_err
,
NULL
}
/*
* And now the flag options
*/
static
struct
nfs_bool_opts
{
char
*
name
;
int
and_mask
;
int
or_mask
;
}
root_bool_opts
[]
__initdata
=
{
{
"soft"
,
~
NFS_MOUNT_SOFT
,
NFS_MOUNT_SOFT
},
{
"hard"
,
~
NFS_MOUNT_SOFT
,
0
},
{
"intr"
,
~
NFS_MOUNT_INTR
,
NFS_MOUNT_INTR
},
{
"nointr"
,
~
NFS_MOUNT_INTR
,
0
},
{
"posix"
,
~
NFS_MOUNT_POSIX
,
NFS_MOUNT_POSIX
},
{
"noposix"
,
~
NFS_MOUNT_POSIX
,
0
},
{
"cto"
,
~
NFS_MOUNT_NOCTO
,
0
},
{
"nocto"
,
~
NFS_MOUNT_NOCTO
,
NFS_MOUNT_NOCTO
},
{
"ac"
,
~
NFS_MOUNT_NOAC
,
0
},
{
"noac"
,
~
NFS_MOUNT_NOAC
,
NFS_MOUNT_NOAC
},
{
"lock"
,
~
NFS_MOUNT_NONLM
,
0
},
{
"nolock"
,
~
NFS_MOUNT_NONLM
,
NFS_MOUNT_NONLM
},
#ifdef CONFIG_NFS_V3
{
"v2"
,
~
NFS_MOUNT_VER3
,
0
},
{
"v3"
,
~
NFS_MOUNT_VER3
,
NFS_MOUNT_VER3
},
#endif
{
"udp"
,
~
NFS_MOUNT_TCP
,
0
},
{
"tcp"
,
~
NFS_MOUNT_TCP
,
NFS_MOUNT_TCP
},
{
"broken_suid"
,
~
NFS_MOUNT_BROKEN_SUID
,
NFS_MOUNT_BROKEN_SUID
},
{
NULL
,
0
,
0
}
};
/*
* Parse option string.
*/
static
void
__init
root_nfs_parse
(
char
*
name
,
char
*
buf
)
static
int
__init
root_nfs_parse
(
char
*
name
,
char
*
buf
)
{
char
*
options
,
*
val
,
*
cp
;
if
((
options
=
strchr
(
name
,
','
)))
{
*
options
++
=
0
;
while
((
cp
=
strsep
(
&
options
,
","
))
!=
NULL
)
{
if
(
!*
cp
)
continue
;
if
((
val
=
strchr
(
cp
,
'='
)))
{
struct
nfs_int_opts
*
opts
=
root_int_opts
;
*
val
++
=
'\0'
;
while
(
opts
->
name
&&
strcmp
(
opts
->
name
,
cp
))
opts
++
;
if
(
opts
->
name
)
*
(
opts
->
val
)
=
(
int
)
simple_strtoul
(
val
,
NULL
,
10
);
}
else
{
struct
nfs_bool_opts
*
opts
=
root_bool_opts
;
while
(
opts
->
name
&&
strcmp
(
opts
->
name
,
cp
))
opts
++
;
if
(
opts
->
name
)
{
nfs_data
.
flags
&=
opts
->
and_mask
;
nfs_data
.
flags
|=
opts
->
or_mask
;
}
char
*
p
;
substring_t
args
[
MAX_OPT_ARGS
];
int
option
;
if
(
!
name
)
return
1
;
if
(
name
[
0
]
&&
strcmp
(
name
,
"default"
)){
strlcpy
(
buf
,
name
,
NFS_MAXPATHLEN
);
return
1
;
}
while
((
p
=
strsep
(
&
name
,
","
))
!=
NULL
)
{
int
token
;
if
(
!*
p
)
continue
;
token
=
match_token
(
p
,
tokens
,
args
);
/* %u tokens only */
if
(
match_int
(
&
args
[
0
],
&
option
))
return
0
;
switch
(
token
)
{
case
Opt_port
:
nfs_port
=
option
;
break
;
case
Opt_rsize
:
nfs_data
.
rsize
=
option
;
break
;
case
Opt_wsize
:
nfs_data
.
wsize
=
option
;
break
;
case
Opt_timeo
:
nfs_data
.
timeo
=
option
;
break
;
case
Opt_retrans
:
nfs_data
.
retrans
=
option
;
break
;
case
Opt_acregmin
:
nfs_data
.
acregmin
=
option
;
break
;
case
Opt_acregmax
:
nfs_data
.
acregmax
=
option
;
break
;
case
Opt_acdirmin
:
nfs_data
.
acdirmin
=
option
;
break
;
case
Opt_acdirmax
:
nfs_data
.
acdirmax
=
option
;
break
;
case
Opt_soft
:
nfs_data
.
flags
|=
NFS_MOUNT_SOFT
;
break
;
case
Opt_hard
:
nfs_data
.
flags
&=
~
NFS_MOUNT_SOFT
;
break
;
case
Opt_intr
:
nfs_data
.
flags
|=
NFS_MOUNT_INTR
;
break
;
case
Opt_nointr
:
nfs_data
.
flags
&=
~
NFS_MOUNT_INTR
;
break
;
case
Opt_posix
:
nfs_data
.
flags
|=
NFS_MOUNT_POSIX
;
break
;
case
Opt_noposix
:
nfs_data
.
flags
&=
~
NFS_MOUNT_POSIX
;
break
;
case
Opt_cto
:
nfs_data
.
flags
&=
~
NFS_MOUNT_NOCTO
;
break
;
case
Opt_nocto
:
nfs_data
.
flags
|=
NFS_MOUNT_NOCTO
;
break
;
case
Opt_ac
:
nfs_data
.
flags
&=
~
NFS_MOUNT_NOAC
;
break
;
case
Opt_noac
:
nfs_data
.
flags
|=
NFS_MOUNT_NOAC
;
break
;
case
Opt_lock
:
nfs_data
.
flags
&=
~
NFS_MOUNT_NONLM
;
break
;
case
Opt_nolock
:
nfs_data
.
flags
|=
NFS_MOUNT_NONLM
;
break
;
case
Opt_v2
:
nfs_data
.
flags
&=
~
NFS_MOUNT_VER3
;
break
;
case
Opt_v3
:
nfs_data
.
flags
|=
NFS_MOUNT_VER3
;
break
;
case
Opt_udp
:
nfs_data
.
flags
&=
~
NFS_MOUNT_TCP
;
break
;
case
Opt_tcp
:
nfs_data
.
flags
|=
NFS_MOUNT_TCP
;
break
;
case
Opt_broken_suid
:
nfs_data
.
flags
|=
NFS_MOUNT_BROKEN_SUID
;
break
;
default
:
return
0
;
}
}
if
(
name
[
0
]
&&
strcmp
(
name
,
"default"
))
strlcpy
(
buf
,
name
,
NFS_MAXPATHLEN
);
return
1
;
}
/*
* Prepare the NFS data structure and parse all options.
*/
...
...
fs/nfs/pagelist.c
View file @
193ce3be
...
...
@@ -32,7 +32,7 @@ static inline struct nfs_page *
nfs_page_alloc
(
void
)
{
struct
nfs_page
*
p
;
p
=
kmem_cache_alloc
(
nfs_page_cachep
,
SLAB_
NOFS
);
p
=
kmem_cache_alloc
(
nfs_page_cachep
,
SLAB_
KERNEL
);
if
(
p
)
{
memset
(
p
,
0
,
sizeof
(
*
p
));
INIT_LIST_HEAD
(
&
p
->
wb_list
);
...
...
@@ -88,6 +88,7 @@ nfs_create_request(struct file *file, struct inode *inode,
* long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */
req
->
wb_page
=
page
;
atomic_set
(
&
req
->
wb_complete
,
0
);
req
->
wb_index
=
page
->
index
;
page_cache_get
(
page
);
req
->
wb_offset
=
offset
;
...
...
fs/nfs/proc.c
View file @
193ce3be
...
...
@@ -559,11 +559,10 @@ nfs_read_done(struct rpc_task *task)
}
static
void
nfs_proc_read_setup
(
struct
nfs_read_data
*
data
,
unsigned
int
count
)
nfs_proc_read_setup
(
struct
nfs_read_data
*
data
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
inode
*
inode
=
data
->
inode
;
struct
nfs_page
*
req
;
int
flags
;
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs_procedures
[
NFSPROC_READ
],
...
...
@@ -572,26 +571,12 @@ nfs_proc_read_setup(struct nfs_read_data *data, unsigned int count)
.
rpc_cred
=
data
->
cred
,
};
req
=
nfs_list_entry
(
data
->
pages
.
next
);
data
->
args
.
fh
=
NFS_FH
(
inode
);
data
->
args
.
offset
=
req_offset
(
req
);
data
->
args
.
pgbase
=
req
->
wb_pgbase
;
data
->
args
.
pages
=
data
->
pagevec
;
data
->
args
.
count
=
count
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
count
=
count
;
data
->
res
.
eof
=
0
;
/* N.B. Do we need to test? Never called for swapfile inode */
flags
=
RPC_TASK_ASYNC
|
(
IS_SWAPFILE
(
inode
)
?
NFS_RPC_SWAPFLAGS
:
0
);
/* Finalize the task. */
rpc_init_task
(
task
,
NFS_CLIENT
(
inode
),
nfs_read_done
,
flags
);
task
->
tk_calldata
=
data
;
/* Release requests */
task
->
tk_release
=
nfs_readdata_release
;
rpc_call_setup
(
&
data
->
task
,
&
msg
,
0
);
rpc_call_setup
(
task
,
&
msg
,
0
);
}
static
void
...
...
@@ -605,11 +590,10 @@ nfs_write_done(struct rpc_task *task)
}
static
void
nfs_proc_write_setup
(
struct
nfs_write_data
*
data
,
unsigned
int
count
,
int
how
)
nfs_proc_write_setup
(
struct
nfs_write_data
*
data
,
int
how
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
inode
*
inode
=
data
->
inode
;
struct
nfs_page
*
req
;
int
flags
;
struct
rpc_message
msg
=
{
.
rpc_proc
=
&
nfs_procedures
[
NFSPROC_WRITE
],
...
...
@@ -619,32 +603,18 @@ nfs_proc_write_setup(struct nfs_write_data *data, unsigned int count, int how)
};
/* Note: NFSv2 ignores @stable and always uses NFS_FILE_SYNC */
req
=
nfs_list_entry
(
data
->
pages
.
next
);
data
->
args
.
fh
=
NFS_FH
(
inode
);
data
->
args
.
offset
=
req_offset
(
req
);
data
->
args
.
pgbase
=
req
->
wb_pgbase
;
data
->
args
.
count
=
count
;
data
->
args
.
stable
=
NFS_FILE_SYNC
;
data
->
args
.
pages
=
data
->
pagevec
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
count
=
count
;
data
->
res
.
verf
=
&
data
->
verf
;
/* Set the initial flags for the task. */
flags
=
(
how
&
FLUSH_SYNC
)
?
0
:
RPC_TASK_ASYNC
;
/* Finalize the task. */
rpc_init_task
(
task
,
NFS_CLIENT
(
inode
),
nfs_write_done
,
flags
);
task
->
tk_calldata
=
data
;
/* Release requests */
task
->
tk_release
=
nfs_writedata_release
;
rpc_call_setup
(
&
data
->
task
,
&
msg
,
0
);
rpc_call_setup
(
task
,
&
msg
,
0
);
}
static
void
nfs_proc_commit_setup
(
struct
nfs_write_data
*
data
,
u64
start
,
u32
len
,
int
how
)
nfs_proc_commit_setup
(
struct
nfs_write_data
*
data
,
int
how
)
{
BUG
();
}
...
...
fs/nfs/read.c
View file @
193ce3be
...
...
@@ -35,6 +35,8 @@
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
static
int
nfs_pagein_one
(
struct
list_head
*
,
struct
inode
*
);
static
void
nfs_readpage_result_partial
(
struct
nfs_read_data
*
,
int
);
static
void
nfs_readpage_result_full
(
struct
nfs_read_data
*
,
int
);
static
kmem_cache_t
*
nfs_rdata_cachep
;
static
mempool_t
*
nfs_rdata_mempool
;
...
...
@@ -57,12 +59,37 @@ static __inline__ void nfs_readdata_free(struct nfs_read_data *p)
mempool_free
(
p
,
nfs_rdata_mempool
);
}
void
nfs_readdata_release
(
struct
rpc_task
*
task
)
static
void
nfs_readdata_release
(
struct
rpc_task
*
task
)
{
struct
nfs_read_data
*
data
=
(
struct
nfs_read_data
*
)
task
->
tk_calldata
;
nfs_readdata_free
(
data
);
}
static
unsigned
int
nfs_page_length
(
struct
inode
*
inode
,
struct
page
*
page
)
{
loff_t
i_size
=
i_size_read
(
inode
);
unsigned
long
idx
;
if
(
i_size
<=
0
)
return
0
;
idx
=
(
i_size
-
1
)
>>
PAGE_CACHE_SHIFT
;
if
(
page
->
index
>
idx
)
return
0
;
if
(
page
->
index
!=
idx
)
return
PAGE_CACHE_SIZE
;
return
1
+
((
i_size
-
1
)
&
(
PAGE_CACHE_SIZE
-
1
));
}
static
int
nfs_return_empty_page
(
struct
page
*
page
)
{
memclear_highpage_flush
(
page
,
0
,
PAGE_CACHE_SIZE
);
SetPageUptodate
(
page
);
unlock_page
(
page
);
return
0
;
}
/*
* Read a page synchronously.
*/
...
...
@@ -78,6 +105,7 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
.
inode
=
inode
,
.
args
=
{
.
fh
=
NFS_FH
(
inode
),
.
lockowner
=
current
->
files
,
.
pages
=
&
page
,
.
pgbase
=
0UL
,
.
count
=
rsize
,
...
...
@@ -146,89 +174,209 @@ nfs_readpage_async(struct file *file, struct inode *inode, struct page *page)
{
LIST_HEAD
(
one_request
);
struct
nfs_page
*
new
;
unsigned
int
len
;
new
=
nfs_create_request
(
file
,
inode
,
page
,
0
,
PAGE_CACHE_SIZE
);
len
=
nfs_page_length
(
inode
,
page
);
if
(
len
==
0
)
return
nfs_return_empty_page
(
page
);
new
=
nfs_create_request
(
file
,
inode
,
page
,
0
,
len
);
if
(
IS_ERR
(
new
))
{
unlock_page
(
page
);
return
PTR_ERR
(
new
);
}
if
(
len
<
PAGE_CACHE_SIZE
)
memclear_highpage_flush
(
page
,
len
,
PAGE_CACHE_SIZE
-
len
);
nfs_lock_request
(
new
);
nfs_list_add_request
(
new
,
&
one_request
);
nfs_pagein_one
(
&
one_request
,
inode
);
return
0
;
}
static
void
nfs_readpage_release
(
struct
nfs_page
*
req
)
{
unlock_page
(
req
->
wb_page
);
nfs_clear_request
(
req
);
nfs_release_request
(
req
);
nfs_unlock_request
(
req
);
dprintk
(
"NFS: read done (%s/%Ld %d@%Ld)
\n
"
,
req
->
wb_inode
->
i_sb
->
s_id
,
(
long
long
)
NFS_FILEID
(
req
->
wb_inode
),
req
->
wb_bytes
,
(
long
long
)
req_offset
(
req
));
}
/*
* Set up the NFS read request struct
*/
static
void
nfs_read_rpcsetup
(
struct
list_head
*
head
,
struct
nfs_read_data
*
data
)
static
void
nfs_read_rpcsetup
(
struct
nfs_page
*
req
,
struct
nfs_read_data
*
data
,
unsigned
int
count
,
unsigned
int
offset
)
{
struct
inode
*
inode
;
struct
nfs_page
*
req
;
struct
page
**
pages
;
unsigned
int
count
;
pages
=
data
->
pagevec
;
count
=
0
;
while
(
!
list_empty
(
head
))
{
req
=
nfs_list_entry
(
head
->
next
);
nfs_list_remove_request
(
req
);
nfs_list_add_request
(
req
,
&
data
->
pages
);
*
pages
++
=
req
->
wb_page
;
count
+=
req
->
wb_bytes
;
}
req
=
nfs_list_entry
(
data
->
pages
.
next
);
data
->
req
=
req
;
data
->
inode
=
inode
=
req
->
wb_inode
;
data
->
cred
=
req
->
wb_cred
;
NFS_PROTO
(
inode
)
->
read_setup
(
data
,
count
);
data
->
args
.
fh
=
NFS_FH
(
inode
);
data
->
args
.
offset
=
req_offset
(
req
)
+
offset
;
data
->
args
.
pgbase
=
req
->
wb_pgbase
+
offset
;
data
->
args
.
pages
=
data
->
pagevec
;
data
->
args
.
count
=
count
;
data
->
args
.
lockowner
=
req
->
wb_lockowner
;
data
->
args
.
state
=
req
->
wb_state
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
count
=
count
;
data
->
res
.
eof
=
0
;
dprintk
(
"NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu.
\n
"
,
NFS_PROTO
(
inode
)
->
read_setup
(
data
);
data
->
task
.
tk_cookie
=
(
unsigned
long
)
inode
;
data
->
task
.
tk_calldata
=
data
;
/* Release requests */
data
->
task
.
tk_release
=
nfs_readdata_release
;
dprintk
(
"NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)
\n
"
,
data
->
task
.
tk_pid
,
inode
->
i_sb
->
s_id
,
(
long
long
)
NFS_FILEID
(
inode
),
count
,
(
unsigned
long
long
)
req_offset
(
req
)
);
(
unsigned
long
long
)
data
->
args
.
offset
);
}
static
void
nfs_async_read_error
(
struct
list_head
*
head
)
{
struct
nfs_page
*
req
;
struct
page
*
page
;
while
(
!
list_empty
(
head
))
{
req
=
nfs_list_entry
(
head
->
next
);
page
=
req
->
wb_page
;
nfs_list_remove_request
(
req
);
SetPageError
(
page
);
unlock_page
(
page
);
nfs_clear_request
(
req
);
nfs_release_request
(
req
);
nfs_unlock_request
(
req
);
SetPageError
(
req
->
wb_page
);
nfs_readpage_release
(
req
);
}
}
static
int
nfs_pagein_one
(
struct
list_head
*
head
,
struct
inode
*
inode
)
/*
* Start an async read operation
*/
static
void
nfs_execute_read
(
struct
nfs_read_data
*
data
)
{
struct
rpc_clnt
*
clnt
=
NFS_CLIENT
(
inode
);
struct
nfs_read_data
*
data
;
struct
rpc_clnt
*
clnt
=
NFS_CLIENT
(
data
->
inode
);
sigset_t
oldset
;
data
=
nfs_readdata_alloc
();
if
(
!
data
)
goto
out_bad
;
nfs_read_rpcsetup
(
head
,
data
);
/* Start the async call */
rpc_clnt_sigmask
(
clnt
,
&
oldset
);
lock_kernel
();
rpc_execute
(
&
data
->
task
);
unlock_kernel
();
rpc_clnt_sigunmask
(
clnt
,
&
oldset
);
}
/*
* Generate multiple requests to fill a single page.
*
* We optimize to reduce the number of read operations on the wire. If we
* detect that we're reading a page, or an area of a page, that is past the
* end of file, we do not generate NFS read operations but just clear the
* parts of the page that would have come back zero from the server anyway.
*
* We rely on the cached value of i_size to make this determination; another
* client can fill pages on the server past our cached end-of-file, but we
* won't see the new data until our attribute cache is updated. This is more
* or less conventional NFS client behavior.
*/
static
int
nfs_pagein_multi
(
struct
list_head
*
head
,
struct
inode
*
inode
)
{
struct
nfs_page
*
req
=
nfs_list_entry
(
head
->
next
);
struct
page
*
page
=
req
->
wb_page
;
struct
nfs_read_data
*
data
;
unsigned
int
rsize
=
NFS_SERVER
(
inode
)
->
rsize
;
unsigned
int
nbytes
,
offset
;
int
requests
=
0
;
LIST_HEAD
(
list
);
nfs_list_remove_request
(
req
);
nbytes
=
req
->
wb_bytes
;
for
(;;)
{
data
=
nfs_readdata_alloc
();
if
(
!
data
)
goto
out_bad
;
list_add
(
&
data
->
pages
,
&
list
);
requests
++
;
if
(
nbytes
<=
rsize
)
break
;
nbytes
-=
rsize
;
}
atomic_set
(
&
req
->
wb_complete
,
requests
);
ClearPageError
(
page
);
offset
=
0
;
nbytes
=
req
->
wb_bytes
;
do
{
data
=
list_entry
(
list
.
next
,
struct
nfs_read_data
,
pages
);
list_del_init
(
&
data
->
pages
);
data
->
pagevec
[
0
]
=
page
;
data
->
complete
=
nfs_readpage_result_partial
;
if
(
nbytes
>
rsize
)
{
nfs_read_rpcsetup
(
req
,
data
,
rsize
,
offset
);
offset
+=
rsize
;
nbytes
-=
rsize
;
}
else
{
nfs_read_rpcsetup
(
req
,
data
,
nbytes
,
offset
);
nbytes
=
0
;
}
nfs_execute_read
(
data
);
}
while
(
nbytes
!=
0
);
return
0
;
out_bad:
while
(
!
list_empty
(
&
list
))
{
data
=
list_entry
(
list
.
next
,
struct
nfs_read_data
,
pages
);
list_del
(
&
data
->
pages
);
nfs_readdata_free
(
data
);
}
SetPageError
(
page
);
nfs_readpage_release
(
req
);
return
-
ENOMEM
;
}
static
int
nfs_pagein_one
(
struct
list_head
*
head
,
struct
inode
*
inode
)
{
struct
nfs_page
*
req
;
struct
page
**
pages
;
struct
nfs_read_data
*
data
;
unsigned
int
count
;
if
(
NFS_SERVER
(
inode
)
->
rsize
<
PAGE_CACHE_SIZE
)
return
nfs_pagein_multi
(
head
,
inode
);
data
=
nfs_readdata_alloc
();
if
(
!
data
)
goto
out_bad
;
pages
=
data
->
pagevec
;
count
=
0
;
while
(
!
list_empty
(
head
))
{
req
=
nfs_list_entry
(
head
->
next
);
nfs_list_remove_request
(
req
);
nfs_list_add_request
(
req
,
&
data
->
pages
);
ClearPageError
(
req
->
wb_page
);
*
pages
++
=
req
->
wb_page
;
count
+=
req
->
wb_bytes
;
}
req
=
nfs_list_entry
(
data
->
pages
.
next
);
data
->
complete
=
nfs_readpage_result_full
;
nfs_read_rpcsetup
(
req
,
data
,
count
,
0
);
nfs_execute_read
(
data
);
return
0
;
out_bad:
nfs_async_read_error
(
head
);
...
...
@@ -257,56 +405,96 @@ nfs_pagein_list(struct list_head *head, int rpages)
return
error
;
}
/*
* Handle a read reply that fills part of a page.
*/
static
void
nfs_readpage_result_partial
(
struct
nfs_read_data
*
data
,
int
status
)
{
struct
nfs_page
*
req
=
data
->
req
;
struct
page
*
page
=
req
->
wb_page
;
if
(
status
>=
0
)
{
unsigned
int
request
=
data
->
args
.
count
;
unsigned
int
result
=
data
->
res
.
count
;
if
(
result
<
request
)
{
memclear_highpage_flush
(
page
,
data
->
args
.
pgbase
+
result
,
request
-
result
);
}
}
else
SetPageError
(
page
);
if
(
atomic_dec_and_test
(
&
req
->
wb_complete
))
{
if
(
!
PageError
(
page
))
SetPageUptodate
(
page
);
nfs_readpage_release
(
req
);
}
}
/*
* This is the callback from RPC telling us whether a reply was
* received or some error occurred (timeout or socket shutdown).
*/
void
nfs_readpage_result
(
struct
rpc_task
*
task
)
static
void
nfs_readpage_result_full
(
struct
nfs_read_data
*
data
,
int
status
)
{
struct
nfs_read_data
*
data
=
(
struct
nfs_read_data
*
)
task
->
tk_calldata
;
unsigned
int
count
=
data
->
res
.
count
;
dprintk
(
"NFS: %4d nfs_readpage_result, (status %d)
\n
"
,
task
->
tk_pid
,
task
->
tk_status
);
NFS_FLAGS
(
data
->
inode
)
|=
NFS_INO_INVALID_ATIME
;
while
(
!
list_empty
(
&
data
->
pages
))
{
struct
nfs_page
*
req
=
nfs_list_entry
(
data
->
pages
.
next
);
struct
page
*
page
=
req
->
wb_page
;
nfs_list_remove_request
(
req
);
if
(
task
->
tk_
status
>=
0
)
{
if
(
status
>=
0
)
{
if
(
count
<
PAGE_CACHE_SIZE
)
{
if
(
count
<
req
->
wb_bytes
)
memclear_highpage_flush
(
page
,
req
->
wb_pgbase
+
count
,
req
->
wb_bytes
-
count
);
count
=
0
;
}
else
count
-=
PAGE_CACHE_SIZE
;
SetPageUptodate
(
page
);
}
else
SetPageError
(
page
);
unlock_page
(
page
);
nfs_readpage_release
(
req
);
}
}
dprintk
(
"NFS: read (%s/%Ld %d@%Ld)
\n
"
,
req
->
wb_inode
->
i_sb
->
s_id
,
(
long
long
)
NFS_FILEID
(
req
->
wb_inode
),
req
->
wb_bytes
,
(
long
long
)
req_offset
(
req
));
nfs_clear_request
(
req
);
nfs_release_request
(
req
);
nfs_unlock_request
(
req
);
/*
* This is the callback from RPC telling us whether a reply was
* received or some error occurred (timeout or socket shutdown).
*/
void
nfs_readpage_result
(
struct
rpc_task
*
task
)
{
struct
nfs_read_data
*
data
=
(
struct
nfs_read_data
*
)
task
->
tk_calldata
;
struct
nfs_readargs
*
argp
=
&
data
->
args
;
struct
nfs_readres
*
resp
=
&
data
->
res
;
int
status
=
task
->
tk_status
;
dprintk
(
"NFS: %4d nfs_readpage_result, (status %d)
\n
"
,
task
->
tk_pid
,
status
);
/* Is this a short read? */
if
(
task
->
tk_status
>=
0
&&
resp
->
count
<
argp
->
count
&&
!
resp
->
eof
)
{
/* Has the server at least made some progress? */
if
(
resp
->
count
!=
0
)
{
/* Yes, so retry the read at the end of the data */
argp
->
offset
+=
resp
->
count
;
argp
->
pgbase
+=
resp
->
count
;
argp
->
count
-=
resp
->
count
;
rpc_restart_call
(
task
);
return
;
}
task
->
tk_status
=
-
EIO
;
}
NFS_FLAGS
(
data
->
inode
)
|=
NFS_INO_INVALID_ATIME
;
data
->
complete
(
data
,
status
);
}
/*
* Read a page over NFS.
* We read the page synchronously in the following cases:
* - The NFS rsize is smaller than PAGE_CACHE_SIZE. We could kludge our way
* around this by creating several consecutive read requests, but
* that's hardly worth it.
* We read the page synchronously in the following case:
* - The error flag is set for this page. This happens only when a
* previous async read operation failed.
*/
...
...
@@ -329,7 +517,7 @@ nfs_readpage(struct file *file, struct page *page)
if
(
error
)
goto
out_error
;
if
(
!
PageError
(
page
)
&&
NFS_SERVER
(
inode
)
->
rsize
>=
PAGE_CACHE_SIZE
)
{
if
(
!
IS_SYNC
(
inode
)
)
{
error
=
nfs_readpage_async
(
file
,
inode
,
page
);
goto
out
;
}
...
...
@@ -350,27 +538,26 @@ struct nfs_readdesc {
struct
file
*
filp
;
};
static
int
readpage_sync_filler
(
void
*
data
,
struct
page
*
page
)
{
struct
nfs_readdesc
*
desc
=
(
struct
nfs_readdesc
*
)
data
;
return
nfs_readpage_sync
(
desc
->
filp
,
page
->
mapping
->
host
,
page
);
}
static
int
readpage_async_filler
(
void
*
data
,
struct
page
*
page
)
{
struct
nfs_readdesc
*
desc
=
(
struct
nfs_readdesc
*
)
data
;
struct
inode
*
inode
=
page
->
mapping
->
host
;
struct
nfs_page
*
new
;
unsigned
int
len
;
nfs_wb_page
(
inode
,
page
);
new
=
nfs_create_request
(
desc
->
filp
,
inode
,
page
,
0
,
PAGE_CACHE_SIZE
);
len
=
nfs_page_length
(
inode
,
page
);
if
(
len
==
0
)
return
nfs_return_empty_page
(
page
);
new
=
nfs_create_request
(
desc
->
filp
,
inode
,
page
,
0
,
len
);
if
(
IS_ERR
(
new
))
{
SetPageError
(
page
);
unlock_page
(
page
);
return
PTR_ERR
(
new
);
}
if
(
len
<
PAGE_CACHE_SIZE
)
memclear_highpage_flush
(
page
,
len
,
PAGE_CACHE_SIZE
-
len
);
nfs_lock_request
(
new
);
nfs_list_add_request
(
new
,
desc
->
head
);
return
0
;
...
...
@@ -385,14 +572,16 @@ nfs_readpages(struct file *filp, struct address_space *mapping,
.
filp
=
filp
,
.
head
=
&
head
,
};
struct
nfs_server
*
server
=
NFS_SERVER
(
mapping
->
host
)
;
int
is_sync
=
server
->
rsize
<
PAGE_CACHE_SIZE
;
struct
inode
*
inode
=
mapping
->
host
;
struct
nfs_server
*
server
=
NFS_SERVER
(
inode
)
;
int
ret
;
ret
=
read_cache_pages
(
mapping
,
pages
,
is_sync
?
readpage_sync_filler
:
readpage_async_filler
,
&
desc
);
dprintk
(
"NFS: nfs_readpages (%s/%Ld %d)
\n
"
,
inode
->
i_sb
->
s_id
,
(
long
long
)
NFS_FILEID
(
inode
),
nr_pages
);
ret
=
read_cache_pages
(
mapping
,
pages
,
readpage_async_filler
,
&
desc
);
if
(
!
list_empty
(
&
head
))
{
int
err
=
nfs_pagein_list
(
&
head
,
server
->
rpages
);
if
(
!
ret
)
...
...
fs/nfs/write.c
View file @
193ce3be
...
...
@@ -74,11 +74,17 @@
static
struct
nfs_page
*
nfs_update_request
(
struct
file
*
,
struct
inode
*
,
struct
page
*
,
unsigned
int
,
unsigned
int
);
static
void
nfs_writeback_done_partial
(
struct
nfs_write_data
*
,
int
);
static
void
nfs_writeback_done_full
(
struct
nfs_write_data
*
,
int
);
static
int
nfs_wait_on_write_congestion
(
struct
address_space
*
,
int
);
static
int
nfs_wait_on_requests
(
struct
inode
*
,
unsigned
long
,
unsigned
int
);
static
kmem_cache_t
*
nfs_wdata_cachep
;
static
mempool_t
*
nfs_wdata_mempool
;
static
mempool_t
*
nfs_commit_mempool
;
static
DECLARE_WAIT_QUEUE_HEAD
(
nfs_write_congestion
);
static
__inline__
struct
nfs_write_data
*
nfs_writedata_alloc
(
void
)
{
struct
nfs_write_data
*
p
;
...
...
@@ -95,7 +101,7 @@ static __inline__ void nfs_writedata_free(struct nfs_write_data *p)
mempool_free
(
p
,
nfs_wdata_mempool
);
}
void
nfs_writedata_release
(
struct
rpc_task
*
task
)
static
void
nfs_writedata_release
(
struct
rpc_task
*
task
)
{
struct
nfs_write_data
*
wdata
=
(
struct
nfs_write_data
*
)
task
->
tk_calldata
;
nfs_writedata_free
(
wdata
);
...
...
@@ -117,12 +123,6 @@ static __inline__ void nfs_commit_free(struct nfs_write_data *p)
mempool_free
(
p
,
nfs_commit_mempool
);
}
void
nfs_commit_release
(
struct
rpc_task
*
task
)
{
struct
nfs_write_data
*
wdata
=
(
struct
nfs_write_data
*
)
task
->
tk_calldata
;
nfs_commit_free
(
wdata
);
}
/* Adjust the file length if we're writing beyond the end */
static
void
nfs_grow_file
(
struct
page
*
page
,
unsigned
int
offset
,
unsigned
int
count
)
{
...
...
@@ -173,19 +173,19 @@ static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int
* Write a page synchronously.
* Offset is the data offset within the page.
*/
static
int
nfs_writepage_sync
(
struct
file
*
file
,
struct
inode
*
inode
,
struct
page
*
page
,
unsigned
int
offset
,
unsigned
int
count
)
static
int
nfs_writepage_sync
(
struct
file
*
file
,
struct
inode
*
inode
,
struct
page
*
page
,
unsigned
int
offset
,
unsigned
int
count
,
int
how
)
{
unsigned
int
wsize
=
NFS_SERVER
(
inode
)
->
wsize
;
int
result
,
written
=
0
;
int
swapfile
=
IS_SWAPFILE
(
inode
);
struct
nfs_write_data
wdata
=
{
.
flags
=
swapfile
?
NFS_RPC_SWAPFLAGS
:
0
,
.
flags
=
how
,
.
cred
=
NULL
,
.
inode
=
inode
,
.
args
=
{
.
fh
=
NFS_FH
(
inode
),
.
lockowner
=
current
->
files
,
.
pages
=
&
page
,
.
stable
=
NFS_FILE_SYNC
,
.
pgbase
=
offset
,
...
...
@@ -204,7 +204,7 @@ nfs_writepage_sync(struct file *file, struct inode *inode, struct page *page,
nfs_begin_data_update
(
inode
);
do
{
if
(
count
<
wsize
&&
!
swapfile
)
if
(
count
<
wsize
)
wdata
.
args
.
count
=
count
;
wdata
.
args
.
offset
=
page_offset
(
page
)
+
wdata
.
args
.
pgbase
;
...
...
@@ -233,7 +233,7 @@ nfs_writepage_sync(struct file *file, struct inode *inode, struct page *page,
ClearPageError
(
page
);
io_error:
nfs_end_data_update
(
inode
);
nfs_end_data_update
_defer
(
inode
);
if
(
wdata
.
cred
)
put_rpccred
(
wdata
.
cred
);
...
...
@@ -259,17 +259,26 @@ static int nfs_writepage_async(struct file *file, struct inode *inode,
return
status
;
}
static
int
wb_priority
(
struct
writeback_control
*
wbc
)
{
if
(
wbc
->
for_reclaim
)
return
FLUSH_HIGHPRI
;
if
(
wbc
->
for_kupdate
)
return
FLUSH_LOWPRI
;
return
0
;
}
/*
* Write an mmapped page to the server.
*/
int
nfs_writepage
(
struct
page
*
page
,
struct
writeback_control
*
wbc
)
int
nfs_writepage
(
struct
page
*
page
,
struct
writeback_control
*
wbc
)
{
struct
inode
*
inode
=
page
->
mapping
->
host
;
unsigned
long
end_index
;
unsigned
offset
=
PAGE_CACHE_SIZE
;
loff_t
i_size
=
i_size_read
(
inode
);
int
inode_referenced
=
0
;
int
priority
=
wb_priority
(
wbc
);
int
err
;
/*
...
...
@@ -285,7 +294,7 @@ nfs_writepage(struct page *page, struct writeback_control *wbc)
end_index
=
i_size
>>
PAGE_CACHE_SHIFT
;
/* Ensure we've flushed out any previous writes */
nfs_wb_page
(
inode
,
page
);
nfs_wb_page
_priority
(
inode
,
page
,
priority
);
/* easy case */
if
(
page
->
index
<
end_index
)
...
...
@@ -299,44 +308,60 @@ nfs_writepage(struct page *page, struct writeback_control *wbc)
goto
out
;
do_it:
lock_kernel
();
if
(
NFS_SERVER
(
inode
)
->
wsize
>=
PAGE_CACHE_SIZE
&&
!
IS_SYNC
(
inode
)
&&
inode_referenced
)
{
if
(
!
IS_SYNC
(
inode
)
&&
inode_referenced
)
{
err
=
nfs_writepage_async
(
NULL
,
inode
,
page
,
0
,
offset
);
if
(
err
>=
0
)
if
(
err
>=
0
)
{
err
=
0
;
if
(
wbc
->
for_reclaim
)
err
=
WRITEPAGE_ACTIVATE
;
}
}
else
{
err
=
nfs_writepage_sync
(
NULL
,
inode
,
page
,
0
,
offset
);
err
=
nfs_writepage_sync
(
NULL
,
inode
,
page
,
0
,
offset
,
priority
);
if
(
err
==
offset
)
err
=
0
;
}
unlock_kernel
();
out:
if
(
err
!=
WRITEPAGE_ACTIVATE
)
unlock_page
(
page
);
if
(
inode_referenced
)
iput
(
inode
);
return
err
;
}
int
nfs_writepages
(
struct
address_space
*
mapping
,
struct
writeback_control
*
wbc
)
/*
* Note: causes nfs_update_request() to block on the assumption
* that the writeback is generated due to memory pressure.
*/
int
nfs_writepages
(
struct
address_space
*
mapping
,
struct
writeback_control
*
wbc
)
{
struct
backing_dev_info
*
bdi
=
mapping
->
backing_dev_info
;
struct
inode
*
inode
=
mapping
->
host
;
int
is_sync
=
!
wbc
->
nonblocking
;
int
err
;
err
=
generic_writepages
(
mapping
,
wbc
);
if
(
err
)
goto
out
;
err
=
nfs_flush_inode
(
inode
,
0
,
0
,
0
);
return
err
;
while
(
test_and_set_bit
(
BDI_write_congested
,
&
bdi
->
state
)
!=
0
)
{
if
(
wbc
->
nonblocking
)
return
0
;
nfs_wait_on_write_congestion
(
mapping
,
0
);
}
err
=
nfs_flush_inode
(
inode
,
0
,
0
,
wb_priority
(
wbc
));
if
(
err
<
0
)
goto
out
;
if
(
wbc
->
sync_mode
==
WB_SYNC_HOLD
)
wbc
->
nr_to_write
-=
err
;
if
(
!
wbc
->
nonblocking
&&
wbc
->
sync_mode
==
WB_SYNC_ALL
)
{
err
=
nfs_wait_on_requests
(
inode
,
0
,
0
);
if
(
err
<
0
)
goto
out
;
if
(
is_sync
&&
wbc
->
sync_mode
==
WB_SYNC_ALL
)
{
err
=
nfs_wb_all
(
inode
);
}
else
nfs_commit_inode
(
inode
,
0
,
0
,
0
)
;
}
err
=
nfs_commit_inode
(
inode
,
0
,
0
,
wb_priority
(
wbc
)
);
if
(
err
>
0
)
wbc
->
nr_to_write
-=
err
;
out:
clear_bit
(
BDI_write_congested
,
&
bdi
->
state
);
wake_up_all
(
&
nfs_write_congestion
);
return
err
;
}
...
...
@@ -365,7 +390,7 @@ nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
/*
* Insert a write request into an inode
*/
static
inline
void
static
void
nfs_inode_remove_request
(
struct
nfs_page
*
req
)
{
struct
nfs_inode
*
nfsi
;
...
...
@@ -379,7 +404,7 @@ nfs_inode_remove_request(struct nfs_page *req)
nfsi
->
npages
--
;
if
(
!
nfsi
->
npages
)
{
spin_unlock
(
&
nfs_wreq_lock
);
nfs_end_data_update
(
inode
);
nfs_end_data_update
_defer
(
inode
);
iput
(
inode
);
}
else
spin_unlock
(
&
nfs_wreq_lock
);
...
...
@@ -416,7 +441,7 @@ nfs_find_request(struct inode *inode, unsigned long index)
/*
* Add a request to the inode's dirty list.
*/
static
inline
void
static
void
nfs_mark_request_dirty
(
struct
nfs_page
*
req
)
{
struct
inode
*
inode
=
req
->
wb_inode
;
...
...
@@ -444,7 +469,7 @@ nfs_dirty_request(struct nfs_page *req)
/*
* Add a request to the inode's commit list.
*/
static
inline
void
static
void
nfs_mark_request_commit
(
struct
nfs_page
*
req
)
{
struct
inode
*
inode
=
req
->
wb_inode
;
...
...
@@ -548,6 +573,38 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, unsigned long idx_st
}
#endif
static
int
nfs_wait_on_write_congestion
(
struct
address_space
*
mapping
,
int
intr
)
{
struct
backing_dev_info
*
bdi
=
mapping
->
backing_dev_info
;
DEFINE_WAIT
(
wait
);
int
ret
=
0
;
might_sleep
();
if
(
!
bdi_write_congested
(
bdi
))
return
0
;
if
(
intr
)
{
struct
rpc_clnt
*
clnt
=
NFS_CLIENT
(
mapping
->
host
);
sigset_t
oldset
;
rpc_clnt_sigmask
(
clnt
,
&
oldset
);
prepare_to_wait
(
&
nfs_write_congestion
,
&
wait
,
TASK_INTERRUPTIBLE
);
if
(
bdi_write_congested
(
bdi
))
{
if
(
signalled
())
ret
=
-
ERESTARTSYS
;
else
schedule
();
}
rpc_clnt_sigunmask
(
clnt
,
&
oldset
);
}
else
{
prepare_to_wait
(
&
nfs_write_congestion
,
&
wait
,
TASK_UNINTERRUPTIBLE
);
if
(
bdi_write_congested
(
bdi
))
schedule
();
}
finish_wait
(
&
nfs_write_congestion
,
&
wait
);
return
ret
;
}
/*
* Try to update any existing write request, or create one if there is none.
...
...
@@ -560,11 +617,14 @@ static struct nfs_page *
nfs_update_request
(
struct
file
*
file
,
struct
inode
*
inode
,
struct
page
*
page
,
unsigned
int
offset
,
unsigned
int
bytes
)
{
struct
nfs_server
*
server
=
NFS_SERVER
(
inode
);
struct
nfs_page
*
req
,
*
new
=
NULL
;
unsigned
long
rqend
,
end
;
end
=
offset
+
bytes
;
if
(
nfs_wait_on_write_congestion
(
page
->
mapping
,
server
->
flags
&
NFS_MOUNT_INTR
))
return
ERR_PTR
(
-
ERESTARTSYS
);
for
(;;)
{
/* Loop over all inode entries and see if we find
* A request for the page we wish to update
...
...
@@ -668,8 +728,8 @@ nfs_flush_incompatible(struct file *file, struct page *page)
* XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
* things with a page scheduled for an RPC call (e.g. invalidate it).
*/
int
nfs_updatepage
(
struct
file
*
file
,
struct
page
*
page
,
unsigned
int
offset
,
unsigned
int
count
)
int
nfs_updatepage
(
struct
file
*
file
,
struct
page
*
page
,
unsigned
int
offset
,
unsigned
int
count
)
{
struct
dentry
*
dentry
=
file
->
f_dentry
;
struct
inode
*
inode
=
page
->
mapping
->
host
;
...
...
@@ -680,12 +740,8 @@ nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsign
dentry
->
d_parent
->
d_name
.
name
,
dentry
->
d_name
.
name
,
count
,
(
long
long
)(
page_offset
(
page
)
+
offset
));
/*
* If wsize is smaller than page size, update and write
* page synchronously.
*/
if
(
NFS_SERVER
(
inode
)
->
wsize
<
PAGE_CACHE_SIZE
||
IS_SYNC
(
inode
))
{
status
=
nfs_writepage_sync
(
file
,
inode
,
page
,
offset
,
count
);
if
(
IS_SYNC
(
inode
))
{
status
=
nfs_writepage_sync
(
file
,
inode
,
page
,
offset
,
count
,
0
);
if
(
status
>
0
)
{
if
(
offset
==
0
&&
status
==
PAGE_CACHE_SIZE
)
SetPageUptodate
(
page
);
...
...
@@ -747,43 +803,162 @@ nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsign
return
status
;
}
static
void
nfs_writepage_release
(
struct
nfs_page
*
req
)
{
end_page_writeback
(
req
->
wb_page
);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if
(
!
PageError
(
req
->
wb_page
))
{
if
(
NFS_NEED_RESCHED
(
req
))
{
nfs_mark_request_dirty
(
req
);
goto
out
;
}
else
if
(
NFS_NEED_COMMIT
(
req
))
{
nfs_mark_request_commit
(
req
);
goto
out
;
}
}
nfs_inode_remove_request
(
req
);
out:
nfs_clear_commit
(
req
);
nfs_clear_reschedule
(
req
);
#else
nfs_inode_remove_request
(
req
);
#endif
nfs_unlock_request
(
req
);
}
static
inline
int
flush_task_priority
(
int
how
)
{
switch
(
how
&
(
FLUSH_HIGHPRI
|
FLUSH_LOWPRI
))
{
case
FLUSH_HIGHPRI
:
return
RPC_PRIORITY_HIGH
;
case
FLUSH_LOWPRI
:
return
RPC_PRIORITY_LOW
;
}
return
RPC_PRIORITY_NORMAL
;
}
/*
* Set up the argument/result storage required for the RPC call.
*/
static
void
nfs_write_rpcsetup
(
struct
list_head
*
head
,
struct
nfs_write_data
*
data
,
int
how
)
static
void
nfs_write_rpcsetup
(
struct
nfs_page
*
req
,
struct
nfs_write_data
*
data
,
unsigned
int
count
,
unsigned
int
offset
,
int
how
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
inode
*
inode
;
struct
nfs_page
*
req
;
struct
page
**
pages
;
unsigned
int
count
;
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
pages
=
data
->
pagevec
;
count
=
0
;
while
(
!
list_empty
(
head
))
{
req
=
nfs_list_entry
(
head
->
next
);
nfs_list_remove_request
(
req
);
nfs_list_add_request
(
req
,
&
data
->
pages
);
set_page_writeback
(
req
->
wb_page
);
*
pages
++
=
req
->
wb_page
;
count
+=
req
->
wb_bytes
;
}
req
=
nfs_list_entry
(
data
->
pages
.
next
);
data
->
req
=
req
;
data
->
inode
=
inode
=
req
->
wb_inode
;
data
->
cred
=
req
->
wb_cred
;
NFS_PROTO
(
inode
)
->
write_setup
(
data
,
count
,
how
);
data
->
args
.
fh
=
NFS_FH
(
inode
);
data
->
args
.
offset
=
req_offset
(
req
)
+
offset
;
data
->
args
.
pgbase
=
req
->
wb_pgbase
+
offset
;
data
->
args
.
pages
=
data
->
pagevec
;
data
->
args
.
count
=
count
;
data
->
args
.
lockowner
=
req
->
wb_lockowner
;
data
->
args
.
state
=
req
->
wb_state
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
count
=
count
;
data
->
res
.
verf
=
&
data
->
verf
;
NFS_PROTO
(
inode
)
->
write_setup
(
data
,
how
);
data
->
task
.
tk_priority
=
flush_task_priority
(
how
);
data
->
task
.
tk_cookie
=
(
unsigned
long
)
inode
;
data
->
task
.
tk_calldata
=
data
;
/* Release requests */
data
->
task
.
tk_release
=
nfs_writedata_release
;
dprintk
(
"NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)
\n
"
,
task
->
tk_pid
,
inode
->
i_sb
->
s_id
,
(
long
long
)
NFS_FILEID
(
inode
),
count
,
(
unsigned
long
long
)
req_offset
(
req
));
(
unsigned
long
long
)
data
->
args
.
offset
);
}
static
void
nfs_execute_write
(
struct
nfs_write_data
*
data
)
{
struct
rpc_clnt
*
clnt
=
NFS_CLIENT
(
data
->
inode
);
sigset_t
oldset
;
rpc_clnt_sigmask
(
clnt
,
&
oldset
);
lock_kernel
();
rpc_execute
(
&
data
->
task
);
unlock_kernel
();
rpc_clnt_sigunmask
(
clnt
,
&
oldset
);
}
/*
* Generate multiple small requests to write out a single
* contiguous dirty area on one page.
*/
static
int
nfs_flush_multi
(
struct
list_head
*
head
,
struct
inode
*
inode
,
int
how
)
{
struct
nfs_page
*
req
=
nfs_list_entry
(
head
->
next
);
struct
page
*
page
=
req
->
wb_page
;
struct
nfs_write_data
*
data
;
unsigned
int
wsize
=
NFS_SERVER
(
inode
)
->
wsize
;
unsigned
int
nbytes
,
offset
;
int
requests
=
0
;
LIST_HEAD
(
list
);
nfs_list_remove_request
(
req
);
nbytes
=
req
->
wb_bytes
;
for
(;;)
{
data
=
nfs_writedata_alloc
();
if
(
!
data
)
goto
out_bad
;
list_add
(
&
data
->
pages
,
&
list
);
requests
++
;
if
(
nbytes
<=
wsize
)
break
;
nbytes
-=
wsize
;
}
atomic_set
(
&
req
->
wb_complete
,
requests
);
ClearPageError
(
page
);
SetPageWriteback
(
page
);
offset
=
0
;
nbytes
=
req
->
wb_bytes
;
do
{
data
=
list_entry
(
list
.
next
,
struct
nfs_write_data
,
pages
);
list_del_init
(
&
data
->
pages
);
data
->
pagevec
[
0
]
=
page
;
data
->
complete
=
nfs_writeback_done_partial
;
if
(
nbytes
>
wsize
)
{
nfs_write_rpcsetup
(
req
,
data
,
wsize
,
offset
,
how
);
offset
+=
wsize
;
nbytes
-=
wsize
;
}
else
{
nfs_write_rpcsetup
(
req
,
data
,
nbytes
,
offset
,
how
);
nbytes
=
0
;
}
nfs_execute_write
(
data
);
}
while
(
nbytes
!=
0
);
return
0
;
out_bad:
while
(
!
list_empty
(
&
list
))
{
data
=
list_entry
(
list
.
next
,
struct
nfs_write_data
,
pages
);
list_del
(
&
data
->
pages
);
nfs_writedata_free
(
data
);
}
nfs_mark_request_dirty
(
req
);
nfs_unlock_request
(
req
);
return
-
ENOMEM
;
}
/*
...
...
@@ -794,25 +969,38 @@ nfs_write_rpcsetup(struct list_head *head, struct nfs_write_data *data, int how)
* This is the case if nfs_updatepage detects a conflicting request
* that has been written but not committed.
*/
static
int
nfs_flush_one
(
struct
list_head
*
head
,
struct
inode
*
inode
,
int
how
)
static
int
nfs_flush_one
(
struct
list_head
*
head
,
struct
inode
*
inode
,
int
how
)
{
struct
rpc_clnt
*
clnt
=
NFS_CLIENT
(
inode
);
struct
nfs_page
*
req
;
struct
page
**
pages
;
struct
nfs_write_data
*
data
;
sigset_t
oldset
;
unsigned
int
count
;
if
(
NFS_SERVER
(
inode
)
->
wsize
<
PAGE_CACHE_SIZE
)
return
nfs_flush_multi
(
head
,
inode
,
how
);
data
=
nfs_writedata_alloc
();
if
(
!
data
)
goto
out_bad
;
pages
=
data
->
pagevec
;
count
=
0
;
while
(
!
list_empty
(
head
))
{
req
=
nfs_list_entry
(
head
->
next
);
nfs_list_remove_request
(
req
);
nfs_list_add_request
(
req
,
&
data
->
pages
);
ClearPageError
(
req
->
wb_page
);
SetPageWriteback
(
req
->
wb_page
);
*
pages
++
=
req
->
wb_page
;
count
+=
req
->
wb_bytes
;
}
req
=
nfs_list_entry
(
data
->
pages
.
next
);
data
->
complete
=
nfs_writeback_done_full
;
/* Set up the argument struct */
nfs_write_rpcsetup
(
head
,
data
,
how
);
nfs_write_rpcsetup
(
req
,
data
,
count
,
0
,
how
);
rpc_clnt_sigmask
(
clnt
,
&
oldset
);
lock_kernel
();
rpc_execute
(
&
data
->
task
);
unlock_kernel
();
rpc_clnt_sigunmask
(
clnt
,
&
oldset
);
nfs_execute_write
(
data
);
return
0
;
out_bad:
while
(
!
list_empty
(
head
))
{
...
...
@@ -851,59 +1039,59 @@ nfs_flush_list(struct list_head *head, int wpages, int how)
return
error
;
}
/*
*
This function is called when the WRITE call is complet
e.
*
Handle a write reply that flushed part of a pag
e.
*/
void
nfs_writeback_done
(
struct
rpc_task
*
task
)
static
void
nfs_writeback_done_partial
(
struct
nfs_write_data
*
data
,
int
status
)
{
struct
nfs_write_data
*
data
=
(
struct
nfs_write_data
*
)
task
->
tk_calldata
;
struct
nfs_writeargs
*
argp
=
&
data
->
args
;
struct
nfs_writeres
*
resp
=
&
data
->
res
;
struct
nfs_page
*
req
;
struct
page
*
page
;
struct
nfs_page
*
req
=
data
->
req
;
struct
page
*
page
=
req
->
wb_page
;
dprintk
(
"NFS: %4d nfs_writeback_done (status %d)
\n
"
,
task
->
tk_pid
,
task
->
tk_status
);
dprintk
(
"NFS: write (%s/%Ld %d@%Ld)"
,
req
->
wb_inode
->
i_sb
->
s_id
,
(
long
long
)
NFS_FILEID
(
req
->
wb_inode
),
req
->
wb_bytes
,
(
long
long
)
req_offset
(
req
));
/* We can't handle that yet but we check for it nevertheless */
if
(
resp
->
count
<
argp
->
count
&&
task
->
tk_status
>=
0
)
{
static
unsigned
long
complain
;
if
(
time_before
(
complain
,
jiffies
))
{
printk
(
KERN_WARNING
"NFS: Server wrote less than requested.
\n
"
);
complain
=
jiffies
+
300
*
HZ
;
}
/* Can't do anything about it right now except throw
* an error. */
task
->
tk_status
=
-
EIO
;
}
if
(
status
<
0
)
{
ClearPageUptodate
(
page
);
SetPageError
(
page
);
if
(
req
->
wb_file
)
req
->
wb_file
->
f_error
=
status
;
dprintk
(
", error = %d
\n
"
,
status
);
}
else
{
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if
(
data
->
verf
.
committed
<
argp
->
stable
&&
task
->
tk_status
>=
0
)
{
/* We tried a write call, but the server did not
* commit data to stable storage even though we
* requested it.
* Note: There is a known bug in Tru64 < 5.0 in which
* the server reports NFS_DATA_SYNC, but performs
* NFS_FILE_SYNC. We therefore implement this checking
* as a dprintk() in order to avoid filling syslog.
*/
static
unsigned
long
complain
;
if
(
time_before
(
complain
,
jiffies
))
{
dprintk
(
"NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)
\n
"
,
NFS_SERVER
(
data
->
inode
)
->
hostname
,
data
->
verf
.
committed
,
argp
->
stable
);
complain
=
jiffies
+
300
*
HZ
;
}
if
(
data
->
verf
.
committed
<
NFS_FILE_SYNC
)
{
if
(
!
NFS_NEED_COMMIT
(
req
))
{
nfs_defer_commit
(
req
);
memcpy
(
&
req
->
wb_verf
,
&
data
->
verf
,
sizeof
(
req
->
wb_verf
));
dprintk
(
" defer commit
\n
"
);
}
else
if
(
memcmp
(
&
req
->
wb_verf
,
&
data
->
verf
,
sizeof
(
req
->
wb_verf
)))
{
nfs_defer_reschedule
(
req
);
dprintk
(
" server reboot detected
\n
"
);
}
}
else
#endif
dprintk
(
" OK
\n
"
);
}
/*
* Process the nfs_page list
if
(
atomic_dec_and_test
(
&
req
->
wb_complete
))
nfs_writepage_release
(
req
);
}
/*
* Handle a write reply that flushes a whole page.
*
* FIXME: There is an inherent race with invalidate_inode_pages and
* writebacks since the page->count is kept > 1 for as long
* as the page has a write request pending.
*/
static
void
nfs_writeback_done_full
(
struct
nfs_write_data
*
data
,
int
status
)
{
struct
nfs_page
*
req
;
struct
page
*
page
;
/* Update attributes as result of writeback. */
while
(
!
list_empty
(
&
data
->
pages
))
{
req
=
nfs_list_entry
(
data
->
pages
.
next
);
nfs_list_remove_request
(
req
);
...
...
@@ -915,20 +1103,20 @@ nfs_writeback_done(struct rpc_task *task)
req
->
wb_bytes
,
(
long
long
)
req_offset
(
req
));
if
(
task
->
tk_
status
<
0
)
{
if
(
status
<
0
)
{
ClearPageUptodate
(
page
);
SetPageError
(
page
);
if
(
req
->
wb_file
)
req
->
wb_file
->
f_error
=
task
->
tk_
status
;
req
->
wb_file
->
f_error
=
status
;
end_page_writeback
(
page
);
nfs_inode_remove_request
(
req
);
dprintk
(
", error = %d
\n
"
,
task
->
tk_
status
);
dprintk
(
", error = %d
\n
"
,
status
);
goto
next
;
}
end_page_writeback
(
page
);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if
(
argp
->
stable
!=
NFS_UNSTABLE
||
data
->
verf
.
committed
==
NFS_FILE_SYNC
)
{
if
(
data
->
args
.
stable
!=
NFS_UNSTABLE
||
data
->
verf
.
committed
==
NFS_FILE_SYNC
)
{
nfs_inode_remove_request
(
req
);
dprintk
(
" OK
\n
"
);
goto
next
;
...
...
@@ -944,13 +1132,88 @@ nfs_writeback_done(struct rpc_task *task)
}
}
/*
* This function is called when the WRITE call is complete.
*/
void
nfs_writeback_done
(
struct
rpc_task
*
task
)
{
struct
nfs_write_data
*
data
=
(
struct
nfs_write_data
*
)
task
->
tk_calldata
;
struct
nfs_writeargs
*
argp
=
&
data
->
args
;
struct
nfs_writeres
*
resp
=
&
data
->
res
;
dprintk
(
"NFS: %4d nfs_writeback_done (status %d)
\n
"
,
task
->
tk_pid
,
task
->
tk_status
);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if
(
resp
->
verf
->
committed
<
argp
->
stable
&&
task
->
tk_status
>=
0
)
{
/* We tried a write call, but the server did not
* commit data to stable storage even though we
* requested it.
* Note: There is a known bug in Tru64 < 5.0 in which
* the server reports NFS_DATA_SYNC, but performs
* NFS_FILE_SYNC. We therefore implement this checking
* as a dprintk() in order to avoid filling syslog.
*/
static
unsigned
long
complain
;
if
(
time_before
(
complain
,
jiffies
))
{
dprintk
(
"NFS: faulty NFS server %s:"
" (committed = %d) != (stable = %d)
\n
"
,
NFS_SERVER
(
data
->
inode
)
->
hostname
,
resp
->
verf
->
committed
,
argp
->
stable
);
complain
=
jiffies
+
300
*
HZ
;
}
}
#endif
/* Is this a short write? */
if
(
task
->
tk_status
>=
0
&&
resp
->
count
<
argp
->
count
)
{
static
unsigned
long
complain
;
/* Has the server at least made some progress? */
if
(
resp
->
count
!=
0
)
{
/* Was this an NFSv2 write or an NFSv3 stable write? */
if
(
resp
->
verf
->
committed
!=
NFS_UNSTABLE
)
{
/* Resend from where the server left off */
argp
->
offset
+=
resp
->
count
;
argp
->
pgbase
+=
resp
->
count
;
argp
->
count
-=
resp
->
count
;
}
else
{
/* Resend as a stable write in order to avoid
* headaches in the case of a server crash.
*/
argp
->
stable
=
NFS_FILE_SYNC
;
}
rpc_restart_call
(
task
);
return
;
}
if
(
time_before
(
complain
,
jiffies
))
{
printk
(
KERN_WARNING
"NFS: Server wrote less than requested.
\n
"
);
complain
=
jiffies
+
300
*
HZ
;
}
/* Can't do anything about it except throw an error. */
task
->
tk_status
=
-
EIO
;
}
/*
* Process the nfs_page list
*/
data
->
complete
(
data
,
task
->
tk_status
);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static
void
nfs_commit_release
(
struct
rpc_task
*
task
)
{
struct
nfs_write_data
*
wdata
=
(
struct
nfs_write_data
*
)
task
->
tk_calldata
;
nfs_commit_free
(
wdata
);
}
/*
* Set up the argument/result storage required for the RPC call.
*/
static
void
nfs_commit_rpcsetup
(
struct
list_head
*
head
,
struct
nfs_write_data
*
data
,
int
how
)
static
void
nfs_commit_rpcsetup
(
struct
list_head
*
head
,
struct
nfs_write_data
*
data
,
int
how
)
{
struct
rpc_task
*
task
=
&
data
->
task
;
struct
nfs_page
*
first
,
*
last
;
...
...
@@ -979,7 +1242,20 @@ nfs_commit_rpcsetup(struct list_head *head, struct nfs_write_data *data, int how
data
->
inode
=
inode
;
data
->
cred
=
first
->
wb_cred
;
NFS_PROTO
(
inode
)
->
commit_setup
(
data
,
start
,
len
,
how
);
data
->
args
.
fh
=
NFS_FH
(
data
->
inode
);
data
->
args
.
offset
=
start
;
data
->
args
.
count
=
len
;
data
->
res
.
count
=
len
;
data
->
res
.
fattr
=
&
data
->
fattr
;
data
->
res
.
verf
=
&
data
->
verf
;
NFS_PROTO
(
inode
)
->
commit_setup
(
data
,
how
);
data
->
task
.
tk_priority
=
flush_task_priority
(
how
);
data
->
task
.
tk_cookie
=
(
unsigned
long
)
inode
;
data
->
task
.
tk_calldata
=
data
;
/* Release requests */
data
->
task
.
tk_release
=
nfs_commit_release
;
dprintk
(
"NFS: %4d initiated commit call
\n
"
,
task
->
tk_pid
);
}
...
...
@@ -990,10 +1266,8 @@ nfs_commit_rpcsetup(struct list_head *head, struct nfs_write_data *data, int how
int
nfs_commit_list
(
struct
list_head
*
head
,
int
how
)
{
struct
rpc_clnt
*
clnt
;
struct
nfs_write_data
*
data
;
struct
nfs_page
*
req
;
sigset_t
oldset
;
data
=
nfs_commit_alloc
();
...
...
@@ -1002,13 +1276,8 @@ nfs_commit_list(struct list_head *head, int how)
/* Set up the argument struct */
nfs_commit_rpcsetup
(
head
,
data
,
how
);
clnt
=
NFS_CLIENT
(
data
->
inode
);
rpc_clnt_sigmask
(
clnt
,
&
oldset
);
lock_kernel
();
rpc_execute
(
&
data
->
task
);
unlock_kernel
();
rpc_clnt_sigunmask
(
clnt
,
&
oldset
);
nfs_execute_write
(
data
);
return
0
;
out_bad:
while
(
!
list_empty
(
head
))
{
...
...
include/linux/nfs4.h
View file @
193ce3be
...
...
@@ -47,6 +47,11 @@
#define NFS4_ACE_SYSTEM_AUDIT_ACE_TYPE 2
#define NFS4_ACE_SYSTEM_ALARM_ACE_TYPE 3
#define ACL4_SUPPORT_ALLOW_ACL 0x01
#define ACL4_SUPPORT_DENY_ACL 0x02
#define ACL4_SUPPORT_AUDIT_ACL 0x04
#define ACL4_SUPPORT_ALARM_ACL 0x08
typedef
struct
{
char
data
[
NFS4_VERIFIER_SIZE
];
}
nfs4_verifier
;
typedef
struct
{
char
data
[
16
];
}
nfs4_stateid
;
...
...
@@ -217,64 +222,64 @@ enum lock_type4 {
/* Mandatory Attributes */
#define FATTR4_WORD0_SUPPORTED_ATTRS (1)
#define FATTR4_WORD0_TYPE (1 << 1)
#define FATTR4_WORD0_FH_EXPIRE_TYPE (1 << 2)
#define FATTR4_WORD0_CHANGE (1 << 3)
#define FATTR4_WORD0_SIZE (1 << 4)
#define FATTR4_WORD0_LINK_SUPPORT (1 << 5)
#define FATTR4_WORD0_SYMLINK_SUPPORT (1 << 6)
#define FATTR4_WORD0_NAMED_ATTR (1 << 7)
#define FATTR4_WORD0_FSID (1 << 8)
#define FATTR4_WORD0_UNIQUE_HANDLES (1 << 9)
#define FATTR4_WORD0_LEASE_TIME (1 << 10)
#define FATTR4_WORD0_RDATTR_ERROR (1 << 11)
#define FATTR4_WORD0_SUPPORTED_ATTRS (1
UL << 0
)
#define FATTR4_WORD0_TYPE (1
UL
<< 1)
#define FATTR4_WORD0_FH_EXPIRE_TYPE (1
UL
<< 2)
#define FATTR4_WORD0_CHANGE (1
UL
<< 3)
#define FATTR4_WORD0_SIZE (1
UL
<< 4)
#define FATTR4_WORD0_LINK_SUPPORT (1
UL
<< 5)
#define FATTR4_WORD0_SYMLINK_SUPPORT (1
UL
<< 6)
#define FATTR4_WORD0_NAMED_ATTR (1
UL
<< 7)
#define FATTR4_WORD0_FSID (1
UL
<< 8)
#define FATTR4_WORD0_UNIQUE_HANDLES (1
UL
<< 9)
#define FATTR4_WORD0_LEASE_TIME (1
UL
<< 10)
#define FATTR4_WORD0_RDATTR_ERROR (1
UL
<< 11)
/* Recommended Attributes */
#define FATTR4_WORD0_ACL (1 << 12)
#define FATTR4_WORD0_ACLSUPPORT (1 << 13)
#define FATTR4_WORD0_ARCHIVE (1 << 14)
#define FATTR4_WORD0_CANSETTIME (1 << 15)
#define FATTR4_WORD0_CASE_INSENSITIVE (1 << 16)
#define FATTR4_WORD0_CASE_PRESERVING (1 << 17)
#define FATTR4_WORD0_CHOWN_RESTRICTED (1 << 18)
#define FATTR4_WORD0_FILEHANDLE (1 << 19)
#define FATTR4_WORD0_FILEID (1 << 20)
#define FATTR4_WORD0_FILES_AVAIL (1 << 21)
#define FATTR4_WORD0_FILES_FREE (1 << 22)
#define FATTR4_WORD0_FILES_TOTAL (1 << 23)
#define FATTR4_WORD0_FS_LOCATIONS (1 << 24)
#define FATTR4_WORD0_HIDDEN (1 << 25)
#define FATTR4_WORD0_HOMOGENEOUS (1 << 26)
#define FATTR4_WORD0_MAXFILESIZE (1 << 27)
#define FATTR4_WORD0_MAXLINK (1 << 28)
#define FATTR4_WORD0_MAXNAME (1 << 29)
#define FATTR4_WORD0_MAXREAD (1 << 30)
#define FATTR4_WORD0_MAXWRITE (1 << 31)
#define FATTR4_WORD1_MIMETYPE (1)
#define FATTR4_WORD1_MODE (1 << 1)
#define FATTR4_WORD1_NO_TRUNC (1 << 2)
#define FATTR4_WORD1_NUMLINKS (1 << 3)
#define FATTR4_WORD1_OWNER (1 << 4)
#define FATTR4_WORD1_OWNER_GROUP (1 << 5)
#define FATTR4_WORD1_QUOTA_HARD (1 << 6)
#define FATTR4_WORD1_QUOTA_SOFT (1 << 7)
#define FATTR4_WORD1_QUOTA_USED (1 << 8)
#define FATTR4_WORD1_RAWDEV (1 << 9)
#define FATTR4_WORD1_SPACE_AVAIL (1 << 10)
#define FATTR4_WORD1_SPACE_FREE (1 << 11)
#define FATTR4_WORD1_SPACE_TOTAL (1 << 12)
#define FATTR4_WORD1_SPACE_USED (1 << 13)
#define FATTR4_WORD1_SYSTEM (1 << 14)
#define FATTR4_WORD1_TIME_ACCESS (1 << 15)
#define FATTR4_WORD1_TIME_ACCESS_SET (1 << 16)
#define FATTR4_WORD1_TIME_BACKUP (1 << 17)
#define FATTR4_WORD1_TIME_CREATE (1 << 18)
#define FATTR4_WORD1_TIME_DELTA (1 << 19)
#define FATTR4_WORD1_TIME_METADATA (1 << 20)
#define FATTR4_WORD1_TIME_MODIFY (1 << 21)
#define FATTR4_WORD1_TIME_MODIFY_SET (1 << 22)
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1 << 23)
#define FATTR4_WORD0_ACL (1
UL
<< 12)
#define FATTR4_WORD0_ACLSUPPORT (1
UL
<< 13)
#define FATTR4_WORD0_ARCHIVE (1
UL
<< 14)
#define FATTR4_WORD0_CANSETTIME (1
UL
<< 15)
#define FATTR4_WORD0_CASE_INSENSITIVE (1
UL
<< 16)
#define FATTR4_WORD0_CASE_PRESERVING (1
UL
<< 17)
#define FATTR4_WORD0_CHOWN_RESTRICTED (1
UL
<< 18)
#define FATTR4_WORD0_FILEHANDLE (1
UL
<< 19)
#define FATTR4_WORD0_FILEID (1
UL
<< 20)
#define FATTR4_WORD0_FILES_AVAIL (1
UL
<< 21)
#define FATTR4_WORD0_FILES_FREE (1
UL
<< 22)
#define FATTR4_WORD0_FILES_TOTAL (1
UL
<< 23)
#define FATTR4_WORD0_FS_LOCATIONS (1
UL
<< 24)
#define FATTR4_WORD0_HIDDEN (1
UL
<< 25)
#define FATTR4_WORD0_HOMOGENEOUS (1
UL
<< 26)
#define FATTR4_WORD0_MAXFILESIZE (1
UL
<< 27)
#define FATTR4_WORD0_MAXLINK (1
UL
<< 28)
#define FATTR4_WORD0_MAXNAME (1
UL
<< 29)
#define FATTR4_WORD0_MAXREAD (1
UL
<< 30)
#define FATTR4_WORD0_MAXWRITE (1
UL
<< 31)
#define FATTR4_WORD1_MIMETYPE (1
UL << 0
)
#define FATTR4_WORD1_MODE (1
UL
<< 1)
#define FATTR4_WORD1_NO_TRUNC (1
UL
<< 2)
#define FATTR4_WORD1_NUMLINKS (1
UL
<< 3)
#define FATTR4_WORD1_OWNER (1
UL
<< 4)
#define FATTR4_WORD1_OWNER_GROUP (1
UL
<< 5)
#define FATTR4_WORD1_QUOTA_HARD (1
UL
<< 6)
#define FATTR4_WORD1_QUOTA_SOFT (1
UL
<< 7)
#define FATTR4_WORD1_QUOTA_USED (1
UL
<< 8)
#define FATTR4_WORD1_RAWDEV (1
UL
<< 9)
#define FATTR4_WORD1_SPACE_AVAIL (1
UL
<< 10)
#define FATTR4_WORD1_SPACE_FREE (1
UL
<< 11)
#define FATTR4_WORD1_SPACE_TOTAL (1
UL
<< 12)
#define FATTR4_WORD1_SPACE_USED (1
UL
<< 13)
#define FATTR4_WORD1_SYSTEM (1
UL
<< 14)
#define FATTR4_WORD1_TIME_ACCESS (1
UL
<< 15)
#define FATTR4_WORD1_TIME_ACCESS_SET (1
UL
<< 16)
#define FATTR4_WORD1_TIME_BACKUP (1
UL
<< 17)
#define FATTR4_WORD1_TIME_CREATE (1
UL
<< 18)
#define FATTR4_WORD1_TIME_DELTA (1
UL
<< 19)
#define FATTR4_WORD1_TIME_METADATA (1
UL
<< 20)
#define FATTR4_WORD1_TIME_MODIFY (1
UL
<< 21)
#define FATTR4_WORD1_TIME_MODIFY_SET (1
UL
<< 22)
#define FATTR4_WORD1_MOUNTED_ON_FILEID (1
UL
<< 23)
#define NFSPROC4_NULL 0
#define NFSPROC4_COMPOUND 1
...
...
@@ -287,7 +292,6 @@ enum lock_type4 {
enum
{
NFSPROC4_CLNT_NULL
=
0
,
/* Unused */
NFSPROC4_CLNT_COMPOUND
,
/* Soon to be unused */
NFSPROC4_CLNT_READ
,
NFSPROC4_CLNT_WRITE
,
NFSPROC4_CLNT_COMMIT
,
...
...
@@ -304,6 +308,19 @@ enum {
NFSPROC4_CLNT_LOCK
,
NFSPROC4_CLNT_LOCKT
,
NFSPROC4_CLNT_LOCKU
,
NFSPROC4_CLNT_ACCESS
,
NFSPROC4_CLNT_GETATTR
,
NFSPROC4_CLNT_LOOKUP
,
NFSPROC4_CLNT_LOOKUP_ROOT
,
NFSPROC4_CLNT_REMOVE
,
NFSPROC4_CLNT_RENAME
,
NFSPROC4_CLNT_LINK
,
NFSPROC4_CLNT_CREATE
,
NFSPROC4_CLNT_PATHCONF
,
NFSPROC4_CLNT_STATFS
,
NFSPROC4_CLNT_READLINK
,
NFSPROC4_CLNT_READDIR
,
NFSPROC4_CLNT_SERVER_CAPS
,
};
#endif
...
...
include/linux/nfs_fs.h
View file @
193ce3be
...
...
@@ -69,6 +69,8 @@
#define FLUSH_SYNC 1
/* file being synced, or contention */
#define FLUSH_WAIT 2
/* wait for completion */
#define FLUSH_STABLE 4
/* commit to stable storage */
#define FLUSH_LOWPRI 8
/* low priority background flush */
#define FLUSH_HIGHPRI 16
/* high priority memory reclaim flush */
#ifdef __KERNEL__
...
...
@@ -275,6 +277,7 @@ extern void nfs_begin_attr_update(struct inode *);
extern
void
nfs_end_attr_update
(
struct
inode
*
);
extern
void
nfs_begin_data_update
(
struct
inode
*
);
extern
void
nfs_end_data_update
(
struct
inode
*
);
extern
void
nfs_end_data_update_defer
(
struct
inode
*
);
/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
extern
u32
root_nfs_parse_addr
(
char
*
name
);
/*__init*/
...
...
@@ -335,10 +338,8 @@ extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern
int
nfs_flush_incompatible
(
struct
file
*
file
,
struct
page
*
page
);
extern
int
nfs_updatepage
(
struct
file
*
,
struct
page
*
,
unsigned
int
,
unsigned
int
);
extern
void
nfs_writeback_done
(
struct
rpc_task
*
task
);
extern
void
nfs_writedata_release
(
struct
rpc_task
*
task
);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
extern
void
nfs_commit_release
(
struct
rpc_task
*
task
);
extern
void
nfs_commit_done
(
struct
rpc_task
*
);
#endif
...
...
@@ -376,14 +377,18 @@ nfs_wb_all(struct inode *inode)
/*
* Write back all requests on one page - we do this before reading it.
*/
static
inline
int
nfs_wb_page
(
struct
inode
*
inode
,
struct
page
*
page
)
static
inline
int
nfs_wb_page_priority
(
struct
inode
*
inode
,
struct
page
*
page
,
int
how
)
{
int
error
=
nfs_sync_inode
(
inode
,
page
->
index
,
1
,
FLUSH_WAIT
|
FLUSH_STABLE
);
how
|
FLUSH_WAIT
|
FLUSH_STABLE
);
return
(
error
<
0
)
?
error
:
0
;
}
static
inline
int
nfs_wb_page
(
struct
inode
*
inode
,
struct
page
*
page
)
{
return
nfs_wb_page_priority
(
inode
,
page
,
0
);
}
/* Hack for future NFS swap support */
#ifndef IS_SWAPFILE
# define IS_SWAPFILE(inode) (0)
...
...
@@ -397,7 +402,6 @@ extern int nfs_readpages(struct file *, struct address_space *,
struct
list_head
*
,
unsigned
);
extern
int
nfs_pagein_list
(
struct
list_head
*
,
int
);
extern
void
nfs_readpage_result
(
struct
rpc_task
*
);
extern
void
nfs_readdata_release
(
struct
rpc_task
*
);
/*
* linux/fs/mount_clnt.c
...
...
include/linux/nfs_fs_sb.h
View file @
193ce3be
...
...
@@ -38,10 +38,19 @@ struct nfs_server {
struct
list_head
nfs4_siblings
;
/* List of other nfs_server structs
* that share the same clientid
*/
u32
attr_bitmask
[
2
];
/* V4 bitmask representing the set
of attributes supported on this
filesystem */
u32
acl_bitmask
;
/* V4 bitmask representing the ACEs
that are supported on this
filesystem */
#endif
};
/* Server capabilities */
#define NFS_CAP_READDIRPLUS (1)
#define NFS_CAP_READDIRPLUS (1U << 0)
#define NFS_CAP_HARDLINKS (1U << 1)
#define NFS_CAP_SYMLINKS (1U << 2)
#define NFS_CAP_ACLS (1U << 3)
#endif
include/linux/nfs_page.h
View file @
193ce3be
...
...
@@ -17,10 +17,14 @@
#include <linux/sunrpc/auth.h>
#include <linux/nfs_xdr.h>
#include <asm/atomic.h>
/*
* Valid flags for a dirty buffer
*/
#define PG_BUSY 0
#define PG_NEED_COMMIT 1
#define PG_NEED_RESCHED 2
struct
nfs_page
{
struct
list_head
wb_list
,
/* Defines state of page: */
...
...
@@ -31,6 +35,7 @@ struct nfs_page {
struct
rpc_cred
*
wb_cred
;
struct
nfs4_state
*
wb_state
;
struct
page
*
wb_page
;
/* page to read in/write out */
atomic_t
wb_complete
;
/* i/os we're waiting for */
wait_queue_head_t
wb_wait
;
/* wait queue */
unsigned
long
wb_index
;
/* Offset >> PAGE_CACHE_SHIFT */
unsigned
int
wb_offset
,
/* Offset & ~PAGE_CACHE_MASK */
...
...
@@ -42,6 +47,8 @@ struct nfs_page {
};
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
#define NFS_NEED_COMMIT(req) (test_bit(PG_NEED_COMMIT,&(req)->wb_flags))
#define NFS_NEED_RESCHED(req) (test_bit(PG_NEED_RESCHED,&(req)->wb_flags))
extern
struct
nfs_page
*
nfs_create_request
(
struct
file
*
,
struct
inode
*
,
struct
page
*
,
...
...
@@ -93,7 +100,6 @@ nfs_unlock_request(struct nfs_page *req)
smp_mb__before_clear_bit
();
clear_bit
(
PG_BUSY
,
&
req
->
wb_flags
);
smp_mb__after_clear_bit
();
if
(
waitqueue_active
(
&
req
->
wb_wait
))
wake_up_all
(
&
req
->
wb_wait
);
nfs_release_request
(
req
);
}
...
...
@@ -115,6 +121,38 @@ nfs_list_remove_request(struct nfs_page *req)
req
->
wb_list_head
=
NULL
;
}
static
inline
int
nfs_defer_commit
(
struct
nfs_page
*
req
)
{
if
(
test_and_set_bit
(
PG_NEED_COMMIT
,
&
req
->
wb_flags
))
return
0
;
return
1
;
}
static
inline
void
nfs_clear_commit
(
struct
nfs_page
*
req
)
{
smp_mb__before_clear_bit
();
clear_bit
(
PG_NEED_COMMIT
,
&
req
->
wb_flags
);
smp_mb__after_clear_bit
();
}
static
inline
int
nfs_defer_reschedule
(
struct
nfs_page
*
req
)
{
if
(
test_and_set_bit
(
PG_NEED_RESCHED
,
&
req
->
wb_flags
))
return
0
;
return
1
;
}
static
inline
void
nfs_clear_reschedule
(
struct
nfs_page
*
req
)
{
smp_mb__before_clear_bit
();
clear_bit
(
PG_NEED_RESCHED
,
&
req
->
wb_flags
);
smp_mb__after_clear_bit
();
}
static
inline
struct
nfs_page
*
nfs_list_entry
(
struct
list_head
*
head
)
{
...
...
include/linux/nfs_xdr.h
View file @
193ce3be
...
...
@@ -3,6 +3,11 @@
#include <linux/sunrpc/xprt.h>
struct
nfs4_fsid
{
__u64
major
;
__u64
minor
;
};
struct
nfs_fattr
{
unsigned
short
valid
;
/* which fields are valid */
__u64
pre_size
;
/* pre_op_attr.size */
...
...
@@ -26,10 +31,7 @@ struct nfs_fattr {
dev_t
rdev
;
union
{
__u64
nfs3
;
/* also nfs2 */
struct
{
__u64
major
;
__u64
minor
;
}
nfs4
;
struct
nfs4_fsid
nfs4
;
}
fsid_u
;
__u64
fileid
;
struct
timespec
atime
;
...
...
@@ -87,6 +89,12 @@ struct nfs_pathconf {
__u32
max_namelen
;
/* max name length */
};
struct
nfs4_change_info
{
u32
atomic
;
u64
before
;
u64
after
;
};
/*
* Arguments to the open call.
*/
...
...
@@ -102,20 +110,18 @@ struct nfs_openargs {
struct
iattr
*
attrs
;
/* UNCHECKED, GUARDED */
nfs4_verifier
verifier
;
/* EXCLUSIVE */
}
u
;
struct
qstr
*
name
;
struct
nfs4_getattr
*
f_getattr
;
struct
nfs4_getattr
*
d_getattr
;
struct
nfs_server
*
server
;
/* Needed for ID mapping */
const
struct
qstr
*
name
;
const
struct
nfs_server
*
server
;
/* Needed for ID mapping */
const
u32
*
bitmask
;
};
struct
nfs_openres
{
nfs4_stateid
stateid
;
struct
nfs_fh
fh
;
struct
nfs4_change_info
*
cinfo
;
struct
nfs4_change_info
cinfo
;
__u32
rflags
;
struct
nfs4_getattr
*
f_getattr
;
struct
nfs4_getattr
*
d_getattr
;
struct
nfs_server
*
server
;
struct
nfs_fattr
*
f_attr
;
const
struct
nfs_server
*
server
;
};
/*
...
...
@@ -141,7 +147,7 @@ struct nfs_open_reclaimargs {
__u32
id
;
__u32
share_access
;
__u32
claim
;
struct
nfs4_getattr
*
f_getattr
;
const
__u32
*
bitmask
;
};
/*
...
...
@@ -215,7 +221,7 @@ struct nfs_lockres {
nfs4_stateid
stateid
;
/* LOCK success, LOCKU */
struct
nfs_lock_denied
denied
;
/* LOCK failed, LOCKT success */
}
u
;
struct
nfs_server
*
server
;
const
struct
nfs_server
*
server
;
};
/*
...
...
@@ -229,7 +235,8 @@ struct nfs_lockres {
struct
nfs_readargs
{
struct
nfs_fh
*
fh
;
nfs4_stateid
stateid
;
fl_owner_t
lockowner
;
struct
nfs4_state
*
state
;
__u64
offset
;
__u32
count
;
unsigned
int
pgbase
;
...
...
@@ -252,7 +259,8 @@ struct nfs_readres {
struct
nfs_writeargs
{
struct
nfs_fh
*
fh
;
nfs4_stateid
stateid
;
fl_owner_t
lockowner
;
struct
nfs4_state
*
state
;
__u64
offset
;
__u32
count
;
enum
nfs3_stable_how
stable
;
...
...
@@ -319,13 +327,13 @@ struct nfs_setattrargs {
struct
nfs_fh
*
fh
;
nfs4_stateid
stateid
;
struct
iattr
*
iap
;
struct
nfs4_getattr
*
attr
;
struct
nfs_server
*
server
;
/* Needed for name mapping */
const
struct
nfs_server
*
server
;
/* Needed for name mapping */
const
u32
*
bitmask
;
};
struct
nfs_setattrres
{
struct
nfs
4_getattr
*
attr
;
struct
nfs_server
*
server
;
struct
nfs
_fattr
*
f
attr
;
const
struct
nfs_server
*
server
;
};
struct
nfs_linkargs
{
...
...
@@ -476,124 +484,116 @@ struct nfs3_readdirres {
typedef
u64
clientid4
;
struct
nfs4_change_info
{
u32
atomic
;
u64
before
;
u64
after
;
};
struct
nfs4_access
{
u32
ac_req_access
;
/* request */
u32
*
ac_resp_supported
;
/* response */
u32
*
ac_resp_access
;
/* response */
struct
nfs4_accessargs
{
const
struct
nfs_fh
*
fh
;
u32
access
;
};
struct
nfs4_
close
{
char
*
cl_stateid
;
/* request */
u32
cl_seqid
;
/* request */
struct
nfs4_
accessres
{
u32
supported
;
u32
access
;
};
struct
nfs4_create
{
u32
cr_ftype
;
/* request */
union
{
/* request */
struct
{
u32
textlen
;
const
char
*
text
;
}
symlink
;
/* NF4LNK */
struct
nfs4_create_arg
{
u32
ftype
;
union
{
struct
qstr
*
symlink
;
/* NF4LNK */
struct
{
u32
specdata1
;
u32
specdata2
;
}
device
;
/* NF4BLK, NF4CHR */
}
u
;
u32
cr_namelen
;
/* request */
const
char
*
cr_name
;
/* request */
struct
iattr
*
cr_attrs
;
/* request */
struct
nfs4_change_info
*
cr_cinfo
;
/* response */
const
struct
qstr
*
name
;
const
struct
nfs_server
*
server
;
const
struct
iattr
*
attrs
;
const
struct
nfs_fh
*
dir_fh
;
const
u32
*
bitmask
;
};
#define cr_textlen u.symlink.textlen
#define cr_text u.symlink.text
#define cr_specdata1 u.device.specdata1
#define cr_specdata2 u.device.specdata2
struct
nfs4_
getattr
{
u32
*
gt_bmval
;
/* request */
struct
nfs_fattr
*
gt_attrs
;
/* response */
struct
nfs_f
sstat
*
gt_fsstat
;
/* response */
struct
nfs
_pathconf
*
gt_pathconf
;
/* response */
struct
nfs4_
create_res
{
const
struct
nfs_server
*
server
;
struct
nfs_fh
*
fh
;
struct
nfs_f
attr
*
fattr
;
struct
nfs
4_change_info
dir_cinfo
;
};
struct
nfs4_getfh
{
struct
nfs_fh
*
gf_fhandle
;
/* response */
struct
nfs4_fsinfo_arg
{
const
struct
nfs_fh
*
fh
;
const
u32
*
bitmask
;
};
struct
nfs4_link
{
u32
ln_namelen
;
/* request */
const
char
*
ln_name
;
/* request */
struct
nfs4_change_info
*
ln_cinfo
;
/* response */
struct
nfs4_getattr_arg
{
const
struct
nfs_fh
*
fh
;
const
u32
*
bitmask
;
};
struct
nfs4_lookup
{
struct
qstr
*
lo_name
;
/* request */
struct
nfs4_getattr_res
{
const
struct
nfs_server
*
server
;
struct
nfs_fattr
*
fattr
;
};
struct
nfs4_open
{
struct
nfs4_client
*
op_client_state
;
/* request */
u32
op_share_access
;
/* request */
u32
op_opentype
;
/* request */
u32
op_createmode
;
/* request */
union
{
/* request */
struct
iattr
*
attrs
;
/* UNCHECKED, GUARDED */
nfs4_verifier
verifier
;
/* EXCLUSIVE */
}
u
;
struct
qstr
*
op_name
;
/* request */
char
*
op_stateid
;
/* response */
struct
nfs4_change_info
*
op_cinfo
;
/* response */
u32
*
op_rflags
;
/* response */
struct
nfs4_link_arg
{
const
struct
nfs_fh
*
fh
;
const
struct
nfs_fh
*
dir_fh
;
const
struct
qstr
*
name
;
};
struct
nfs4_lookup_arg
{
const
struct
nfs_fh
*
dir_fh
;
const
struct
qstr
*
name
;
const
u32
*
bitmask
;
};
struct
nfs4_lookup_res
{
const
struct
nfs_server
*
server
;
struct
nfs_fattr
*
fattr
;
struct
nfs_fh
*
fh
;
};
#define op_attrs u.attrs
#define op_verifier u.verifier
struct
nfs4_
open_confirm
{
c
har
*
oc_stateid
;
/* request */
struct
nfs4_
lookup_root_arg
{
c
onst
u32
*
bitmask
;
};
struct
nfs4_putfh
{
struct
nfs_fh
*
pf_fhandle
;
/* request */
struct
nfs4_pathconf_arg
{
const
struct
nfs_fh
*
fh
;
const
u32
*
bitmask
;
};
struct
nfs4_readdir
{
u64
rd_cookie
;
/* request */
nfs4_verifier
rd_req_verifier
;
/* request */
u32
rd_count
;
/* request */
u32
rd_bmval
[
2
];
/* request */
nfs4_verifier
rd_resp_verifier
;
/* response */
struct
page
**
rd_pages
;
/* zero-copy data */
unsigned
int
rd_pgbase
;
/* zero-copy data */
struct
nfs4_readdir_arg
{
const
struct
nfs_fh
*
fh
;
u64
cookie
;
nfs4_verifier
verifier
;
u32
count
;
struct
page
**
pages
;
/* zero-copy data */
unsigned
int
pgbase
;
/* zero-copy data */
};
struct
nfs4_readdir_res
{
nfs4_verifier
verifier
;
unsigned
int
pgbase
;
};
struct
nfs4_readlink
{
u32
rl_count
;
/* zero-copy data */
struct
page
**
rl_pages
;
/* zero-copy data */
const
struct
nfs_fh
*
fh
;
u32
count
;
/* zero-copy data */
struct
page
**
pages
;
/* zero-copy data */
};
struct
nfs4_remove
{
u32
rm_namelen
;
/* request */
const
char
*
rm_name
;
/* request */
struct
nfs4_change_info
*
rm_cinfo
;
/* response */
struct
nfs4_remove_arg
{
const
struct
nfs_fh
*
fh
;
const
struct
qstr
*
name
;
};
struct
nfs4_rename
{
u32
rn_oldnamelen
;
/* request */
const
char
*
rn_oldname
;
/* request */
u32
rn_newnamelen
;
/* request */
const
char
*
rn_newname
;
/* request */
struct
nfs4_change_info
*
rn_src_cinfo
;
/* response */
struct
nfs4_change_info
*
rn_dst_cinfo
;
/* response */
struct
nfs4_rename_arg
{
const
struct
nfs_fh
*
old_dir
;
const
struct
nfs_fh
*
new_dir
;
const
struct
qstr
*
old_name
;
const
struct
qstr
*
new_name
;
};
struct
nfs4_
setattr
{
char
*
st_stateid
;
/* request */
struct
iattr
*
st_iap
;
/* request */
struct
nfs4_
rename_res
{
struct
nfs4_change_info
old_cinfo
;
struct
nfs4_change_info
new_cinfo
;
};
struct
nfs4_setclientid
{
...
...
@@ -606,70 +606,37 @@ struct nfs4_setclientid {
struct
nfs4_client
*
sc_state
;
/* response */
};
struct
nfs4_op
{
u32
opnum
;
union
{
struct
nfs4_access
access
;
struct
nfs4_close
close
;
struct
nfs4_create
create
;
struct
nfs4_getattr
getattr
;
struct
nfs4_getfh
getfh
;
struct
nfs4_link
link
;
struct
nfs4_lookup
lookup
;
struct
nfs4_open
open
;
struct
nfs4_open_confirm
open_confirm
;
struct
nfs4_putfh
putfh
;
struct
nfs4_readdir
readdir
;
struct
nfs4_readlink
readlink
;
struct
nfs4_remove
remove
;
struct
nfs4_rename
rename
;
struct
nfs4_client
*
renew
;
struct
nfs4_setattr
setattr
;
}
u
;
struct
nfs4_statfs_arg
{
const
struct
nfs_fh
*
fh
;
const
u32
*
bitmask
;
};
struct
nfs4_compound
{
unsigned
int
flags
;
/* defined below */
struct
nfs_server
*
server
;
/* RENEW information */
int
renew_index
;
unsigned
long
timestamp
;
/* scratch variables for XDR encode/decode */
int
nops
;
u32
*
p
;
u32
*
end
;
/* the individual COMPOUND operations */
struct
nfs4_op
*
ops
;
/* request */
int
req_nops
;
u32
taglen
;
char
*
tag
;
/* response */
int
resp_nops
;
int
toplevel_status
;
struct
nfs4_server_caps_res
{
u32
attr_bitmask
[
2
];
u32
acl_bitmask
;
u32
has_links
;
u32
has_symlinks
;
};
#endif
/* CONFIG_NFS_V4 */
struct
nfs_page
;
struct
nfs_read_data
{
int
flags
;
struct
rpc_task
task
;
struct
inode
*
inode
;
struct
rpc_cred
*
cred
;
fl_owner_t
lockowner
;
struct
nfs_fattr
fattr
;
/* fattr storage */
struct
list_head
pages
;
/* Coalesced read requests */
struct
nfs_page
*
req
;
/* multi ops per nfs_page */
struct
page
*
pagevec
[
NFS_READ_MAXIOV
];
struct
nfs_readargs
args
;
struct
nfs_readres
res
;
#ifdef CONFIG_NFS_V4
unsigned
long
timestamp
;
/* For lease renewal */
#endif
void
(
*
complete
)
(
struct
nfs_read_data
*
,
int
);
};
struct
nfs_write_data
{
...
...
@@ -677,20 +644,19 @@ struct nfs_write_data {
struct
rpc_task
task
;
struct
inode
*
inode
;
struct
rpc_cred
*
cred
;
fl_owner_t
lockowner
;
struct
nfs_fattr
fattr
;
struct
nfs_writeverf
verf
;
struct
list_head
pages
;
/* Coalesced requests we wish to flush */
struct
nfs_page
*
req
;
/* multi ops per nfs_page */
struct
page
*
pagevec
[
NFS_WRITE_MAXIOV
];
struct
nfs_writeargs
args
;
/* argument struct */
struct
nfs_writeres
res
;
/* result struct */
#ifdef CONFIG_NFS_V4
unsigned
long
timestamp
;
/* For lease renewal */
#endif
void
(
*
complete
)
(
struct
nfs_write_data
*
,
int
);
};
struct
nfs_page
;
/*
* RPC procedure vector for NFSv2/NFSv3 demuxing
*/
...
...
@@ -737,9 +703,9 @@ struct nfs_rpc_ops {
int
(
*
pathconf
)
(
struct
nfs_server
*
,
struct
nfs_fh
*
,
struct
nfs_pathconf
*
);
u32
*
(
*
decode_dirent
)(
u32
*
,
struct
nfs_entry
*
,
int
plus
);
void
(
*
read_setup
)
(
struct
nfs_read_data
*
,
unsigned
int
count
);
void
(
*
write_setup
)
(
struct
nfs_write_data
*
,
unsigned
int
count
,
int
how
);
void
(
*
commit_setup
)
(
struct
nfs_write_data
*
,
u64
start
,
u32
len
,
int
how
);
void
(
*
read_setup
)
(
struct
nfs_read_data
*
);
void
(
*
write_setup
)
(
struct
nfs_write_data
*
,
int
how
);
void
(
*
commit_setup
)
(
struct
nfs_write_data
*
,
int
how
);
int
(
*
file_open
)
(
struct
inode
*
,
struct
file
*
);
int
(
*
file_release
)
(
struct
inode
*
,
struct
file
*
);
void
(
*
request_init
)(
struct
nfs_page
*
,
struct
file
*
);
...
...
include/linux/sunrpc/sched.h
View file @
193ce3be
...
...
@@ -49,6 +49,8 @@ struct rpc_task {
tk_cred_retry
,
tk_suid_retry
;
unsigned
long
tk_cookie
;
/* Cookie for batching tasks */
/*
* timeout_fn to be executed by timer bottom half
* callback to be executed after waking up
...
...
@@ -72,7 +74,9 @@ struct rpc_task {
unsigned
long
tk_timeout
;
/* timeout for rpc_sleep() */
unsigned
short
tk_flags
;
/* misc flags */
unsigned
char
tk_active
:
1
;
/* Task has been activated */
unsigned
char
tk_priority
:
2
;
/* Task priority */
unsigned
long
tk_runstate
;
/* Task run status */
struct
list_head
tk_links
;
/* links to related tasks */
#ifdef RPC_DEBUG
unsigned
short
tk_pid
;
/* debugging aid */
#endif
...
...
@@ -137,29 +141,59 @@ typedef void (*rpc_action)(struct rpc_task *);
smp_mb__after_clear_bit(); \
} while(0)
/*
* Task priorities.
* Note: if you change these, you must also change
* the task initialization definitions below.
*/
#define RPC_PRIORITY_LOW 0
#define RPC_PRIORITY_NORMAL 1
#define RPC_PRIORITY_HIGH 2
#define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1)
/*
* RPC synchronization objects
*/
struct
rpc_wait_queue
{
struct
list_head
tasks
;
struct
list_head
tasks
[
RPC_NR_PRIORITY
];
/* task queue for each priority level */
unsigned
long
cookie
;
/* cookie of last task serviced */
unsigned
char
maxpriority
;
/* maximum priority (0 if queue is not a priority queue) */
unsigned
char
priority
;
/* current priority */
unsigned
char
count
;
/* # task groups remaining serviced so far */
unsigned
char
nr
;
/* # tasks remaining for cookie */
#ifdef RPC_DEBUG
c
har
*
name
;
c
onst
char
*
name
;
#endif
};
/*
* This is the # requests to send consecutively
* from a single cookie. The aim is to improve
* performance of NFS operations such as read/write.
*/
#define RPC_BATCH_COUNT 16
#ifndef RPC_DEBUG
# define RPC_WAITQ_INIT(var,qname) ((struct rpc_wait_queue) {LIST_HEAD_INIT(var)})
# define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var.tasks,qname)
# define INIT_RPC_WAITQ(ptr,qname) do { \
INIT_LIST_HEAD(&(ptr)->tasks); \
} while(0)
# define RPC_WAITQ_INIT(var,qname) { \
.tasks = { \
[0] = LIST_HEAD_INIT(var.tasks[0]), \
[1] = LIST_HEAD_INIT(var.tasks[1]), \
[2] = LIST_HEAD_INIT(var.tasks[2]), \
}, \
}
#else
# define RPC_WAITQ_INIT(var,qname) ((struct rpc_wait_queue) {LIST_HEAD_INIT(var.tasks), qname})
# define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname)
# define INIT_RPC_WAITQ(ptr,qname) do { \
INIT_LIST_HEAD(&(ptr)->tasks); (ptr)->name = qname; \
} while(0)
# define RPC_WAITQ_INIT(var,qname) { \
.tasks = { \
[0] = LIST_HEAD_INIT(var.tasks[0]), \
[1] = LIST_HEAD_INIT(var.tasks[1]), \
[2] = LIST_HEAD_INIT(var.tasks[2]), \
}, \
.name = qname, \
}
#endif
# define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname)
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
/*
* Function prototypes
...
...
@@ -175,6 +209,8 @@ void rpc_run_child(struct rpc_task *parent, struct rpc_task *child,
rpc_action
action
);
int
rpc_add_wait_queue
(
struct
rpc_wait_queue
*
,
struct
rpc_task
*
);
void
rpc_remove_wait_queue
(
struct
rpc_task
*
);
void
rpc_init_priority_wait_queue
(
struct
rpc_wait_queue
*
,
const
char
*
);
void
rpc_init_wait_queue
(
struct
rpc_wait_queue
*
,
const
char
*
);
void
rpc_sleep_on
(
struct
rpc_wait_queue
*
,
struct
rpc_task
*
,
rpc_action
action
,
rpc_action
timer
);
void
rpc_add_timer
(
struct
rpc_task
*
,
rpc_action
);
...
...
@@ -194,16 +230,14 @@ void rpc_show_tasks(void);
int
rpc_init_mempool
(
void
);
void
rpc_destroy_mempool
(
void
);
static
__inline__
void
rpc_exit
(
struct
rpc_task
*
task
,
int
status
)
static
inline
void
rpc_exit
(
struct
rpc_task
*
task
,
int
status
)
{
task
->
tk_status
=
status
;
task
->
tk_action
=
NULL
;
}
#ifdef RPC_DEBUG
static
__inline__
char
*
rpc_qname
(
struct
rpc_wait_queue
*
q
)
static
inline
const
char
*
rpc_qname
(
struct
rpc_wait_queue
*
q
)
{
return
((
q
&&
q
->
name
)
?
q
->
name
:
"unknown"
);
}
...
...
include/linux/sunrpc/xdr.h
View file @
193ce3be
...
...
@@ -55,7 +55,8 @@ struct xdr_buf {
unsigned
int
page_base
,
/* Start of page data */
page_len
;
/* Length of page data */
unsigned
int
len
;
/* Total length of data */
unsigned
int
buflen
,
/* Total length of storage buffer */
len
;
/* Length of XDR encoded message */
};
...
...
@@ -87,7 +88,8 @@ struct xdr_buf {
/*
* Miscellaneous XDR helper functions
*/
u32
*
xdr_encode_array
(
u32
*
p
,
const
void
*
s
,
unsigned
int
len
);
u32
*
xdr_encode_opaque_fixed
(
u32
*
p
,
const
void
*
ptr
,
unsigned
int
len
);
u32
*
xdr_encode_opaque
(
u32
*
p
,
const
void
*
ptr
,
unsigned
int
len
);
u32
*
xdr_encode_string
(
u32
*
p
,
const
char
*
s
);
u32
*
xdr_decode_string
(
u32
*
p
,
char
**
sp
,
int
*
lenp
,
int
maxlen
);
u32
*
xdr_decode_string_inplace
(
u32
*
p
,
char
**
sp
,
int
*
lenp
,
int
maxlen
);
...
...
@@ -100,6 +102,11 @@ void xdr_encode_pages(struct xdr_buf *, struct page **, unsigned int,
void
xdr_inline_pages
(
struct
xdr_buf
*
,
unsigned
int
,
struct
page
**
,
unsigned
int
,
unsigned
int
);
static
inline
u32
*
xdr_encode_array
(
u32
*
p
,
const
void
*
s
,
unsigned
int
len
)
{
return
xdr_encode_opaque
(
p
,
s
,
len
);
}
/*
* Decode 64bit quantities (NFSv3 support)
*/
...
...
@@ -178,86 +185,14 @@ struct xdr_stream {
struct
iovec
*
iov
;
/* pointer to the current iovec */
};
/*
* Initialize an xdr_stream for encoding data.
*
* Note: at the moment the RPC client only passes the length of our
* scratch buffer in the xdr_buf's header iovec. Previously this
* meant we needed to call xdr_adjust_iovec() after encoding the
* data. With the new scheme, the xdr_stream manages the details
* of the buffer length, and takes care of adjusting the iovec
* length for us.
*/
static
inline
void
xdr_init_encode
(
struct
xdr_stream
*
xdr
,
struct
xdr_buf
*
buf
,
uint32_t
*
p
)
{
struct
iovec
*
iov
=
buf
->
head
;
xdr
->
buf
=
buf
;
xdr
->
iov
=
iov
;
xdr
->
end
=
(
uint32_t
*
)((
char
*
)
iov
->
iov_base
+
iov
->
iov_len
);
buf
->
len
=
iov
->
iov_len
=
(
char
*
)
p
-
(
char
*
)
iov
->
iov_base
;
xdr
->
p
=
p
;
}
/*
* Check that we have enough buffer space to encode 'nbytes' more
* bytes of data. If so, update the total xdr_buf length, and
* adjust the length of the current iovec.
*/
static
inline
uint32_t
*
xdr_reserve_space
(
struct
xdr_stream
*
xdr
,
size_t
nbytes
)
{
uint32_t
*
p
=
xdr
->
p
;
uint32_t
*
q
;
/* align nbytes on the next 32-bit boundary */
nbytes
+=
3
;
nbytes
&=
~
3
;
q
=
p
+
(
nbytes
>>
2
);
if
(
unlikely
(
q
>
xdr
->
end
||
q
<
p
))
return
NULL
;
xdr
->
p
=
q
;
xdr
->
iov
->
iov_len
+=
nbytes
;
xdr
->
buf
->
len
+=
nbytes
;
return
p
;
}
extern
void
xdr_init_encode
(
struct
xdr_stream
*
xdr
,
struct
xdr_buf
*
buf
,
uint32_t
*
p
);
extern
uint32_t
*
xdr_reserve_space
(
struct
xdr_stream
*
xdr
,
size_t
nbytes
);
extern
void
xdr_write_pages
(
struct
xdr_stream
*
xdr
,
struct
page
**
pages
,
unsigned
int
base
,
unsigned
int
len
);
extern
void
xdr_init_decode
(
struct
xdr_stream
*
xdr
,
struct
xdr_buf
*
buf
,
uint32_t
*
p
);
extern
uint32_t
*
xdr_inline_decode
(
struct
xdr_stream
*
xdr
,
size_t
nbytes
);
extern
void
xdr_read_pages
(
struct
xdr_stream
*
xdr
,
unsigned
int
len
);
/*
* Initialize an xdr_stream for decoding data.
*/
static
inline
void
xdr_init_decode
(
struct
xdr_stream
*
xdr
,
struct
xdr_buf
*
buf
,
uint32_t
*
p
)
{
struct
iovec
*
iov
=
buf
->
head
;
xdr
->
buf
=
buf
;
xdr
->
iov
=
iov
;
xdr
->
p
=
p
;
xdr
->
end
=
(
uint32_t
*
)((
char
*
)
iov
->
iov_base
+
iov
->
iov_len
);
}
/*
* Check if the input buffer is long enough to enable us to decode
* 'nbytes' more bytes of data starting at the current position.
* If so return the current pointer, then update the current
* position.
*/
static
inline
uint32_t
*
xdr_inline_decode
(
struct
xdr_stream
*
xdr
,
size_t
nbytes
)
{
uint32_t
*
p
=
xdr
->
p
;
uint32_t
*
q
=
p
+
XDR_QUADLEN
(
nbytes
);
if
(
unlikely
(
q
>
xdr
->
end
||
q
<
p
))
return
NULL
;
xdr
->
p
=
q
;
return
p
;
}
#endif
/* __KERNEL__ */
#endif
/* _SUNRPC_XDR_H_ */
include/linux/sunrpc/xprt.h
View file @
193ce3be
...
...
@@ -120,8 +120,6 @@ struct rpc_rqst {
};
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
#define rq_rvec rq_rcv_buf.head
#define rq_rlen rq_rcv_buf.len
#define XPRT_LAST_FRAG (1 << 0)
#define XPRT_COPY_RECM (1 << 1)
...
...
@@ -218,12 +216,15 @@ void xprt_connect(struct rpc_task *);
int
xprt_clear_backlog
(
struct
rpc_xprt
*
);
void
xprt_sock_setbufsize
(
struct
rpc_xprt
*
);
#define XPRT_CONNECT 0
#define XPRT_LOCKED 1
#define XPRT_LOCKED 0
#define XPRT_CONNECT 1
#define XPRT_CONNECTING 2
#define xprt_connected(xp) (test_bit(XPRT_CONNECT, &(xp)->sockstate))
#define xprt_set_connected(xp) (set_bit(XPRT_CONNECT, &(xp)->sockstate))
#define xprt_test_and_set_connected(xp) (test_and_set_bit(XPRT_CONNECT, &(xp)->sockstate))
#define xprt_test_and_clear_connected(xp) \
(test_and_clear_bit(XPRT_CONNECT, &(xp)->sockstate))
#define xprt_clear_connected(xp) (clear_bit(XPRT_CONNECT, &(xp)->sockstate))
#endif
/* __KERNEL__*/
...
...
net/sunrpc/auth_gss/auth_gss.c
View file @
193ce3be
...
...
@@ -365,7 +365,7 @@ gss_upcall(struct rpc_clnt *clnt, struct rpc_task *task, struct rpc_cred *cred)
gss_msg
=
gss_new
;
memset
(
gss_new
,
0
,
sizeof
(
*
gss_new
));
INIT_LIST_HEAD
(
&
gss_new
->
list
);
INIT_RPC_WAITQ
(
&
gss_new
->
waitq
,
"RPCSEC_GSS upcall waitq"
);
rpc_init_wait_queue
(
&
gss_new
->
waitq
,
"RPCSEC_GSS upcall waitq"
);
atomic_set
(
&
gss_new
->
count
,
2
);
msg
=
&
gss_new
->
msg
;
msg
->
data
=
&
gss_new
->
uid
;
...
...
@@ -721,8 +721,7 @@ gss_marshal(struct rpc_task *task, u32 *p, int ruid)
printk
(
"gss_marshal: gss_get_mic FAILED (%d)
\n
"
,
maj_stat
);
goto
out_put_ctx
;
}
*
p
++
=
htonl
(
mic
.
len
);
p
+=
XDR_QUADLEN
(
mic
.
len
);
p
=
xdr_encode_opaque
(
p
,
NULL
,
mic
.
len
);
gss_put_ctx
(
ctx
);
return
p
;
out_put_ctx:
...
...
@@ -857,9 +856,7 @@ gss_wrap_req(struct rpc_task *task,
status
=
-
EIO
;
/* XXX? */
if
(
maj_stat
)
goto
out
;
q
=
p
;
*
q
++
=
htonl
(
mic
.
len
);
q
+=
XDR_QUADLEN
(
mic
.
len
);
q
=
xdr_encode_opaque
(
p
,
NULL
,
mic
.
len
);
offset
=
(
u8
*
)
q
-
(
u8
*
)
p
;
iov
->
iov_len
+=
offset
;
...
...
net/sunrpc/auth_gss/gss_krb5_crypto.c
View file @
193ce3be
...
...
@@ -40,6 +40,7 @@
#include <asm/scatterlist.h>
#include <linux/crypto.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/sunrpc/gss_krb5.h>
#ifdef RPC_DEBUG
...
...
@@ -171,22 +172,24 @@ krb5_make_checksum(s32 cksumtype, char *header, struct xdr_buf *body,
}
len
=
body
->
page_len
;
offset
=
body
->
page_base
;
i
=
0
;
while
(
len
)
{
if
(
len
!=
0
)
{
offset
=
body
->
page_base
&
(
PAGE_CACHE_SIZE
-
1
);
i
=
body
->
page_base
>>
PAGE_CACHE_SHIFT
;
thislen
=
PAGE_CACHE_SIZE
-
offset
;
do
{
if
(
thislen
>
len
)
thislen
=
len
;
sg
->
page
=
body
->
pages
[
i
];
sg
->
offset
=
offset
;
offset
=
0
;
if
(
PAGE_SIZE
>
len
)
thislen
=
len
;
else
thislen
=
PAGE_SIZE
;
sg
->
length
=
thislen
;
kmap
(
sg
->
page
);
/* XXX kmap_atomic? */
crypto_digest_update
(
tfm
,
sg
,
1
);
kunmap
(
sg
->
page
);
len
-=
thislen
;
i
++
;
offset
=
0
;
thislen
=
PAGE_CACHE_SIZE
;
}
while
(
len
!=
0
);
}
if
(
body
->
tail
[
0
].
iov_len
)
{
buf_to_sg
(
sg
,
body
->
tail
[
0
].
iov_base
,
body
->
tail
[
0
].
iov_len
);
...
...
net/sunrpc/clnt.c
View file @
193ce3be
...
...
@@ -144,7 +144,7 @@ rpc_create_client(struct rpc_xprt *xprt, char *servname,
clnt
->
cl_vers
=
version
->
number
;
clnt
->
cl_prot
=
xprt
->
prot
;
clnt
->
cl_stats
=
program
->
stats
;
INIT_RPC_WAITQ
(
&
clnt
->
cl_pmap_default
.
pm_bindwait
,
"bindwait"
);
rpc_init_wait_queue
(
&
clnt
->
cl_pmap_default
.
pm_bindwait
,
"bindwait"
);
if
(
!
clnt
->
cl_port
)
clnt
->
cl_autobind
=
1
;
...
...
@@ -605,11 +605,13 @@ call_encode(struct rpc_task *task)
sndbuf
->
tail
[
0
].
iov_len
=
0
;
sndbuf
->
page_len
=
0
;
sndbuf
->
len
=
0
;
sndbuf
->
buflen
=
bufsiz
;
rcvbuf
->
head
[
0
].
iov_base
=
(
void
*
)((
char
*
)
task
->
tk_buffer
+
bufsiz
);
rcvbuf
->
head
[
0
].
iov_len
=
bufsiz
;
rcvbuf
->
tail
[
0
].
iov_len
=
0
;
rcvbuf
->
page_len
=
0
;
rcvbuf
->
len
=
bufsiz
;
rcvbuf
->
len
=
0
;
rcvbuf
->
buflen
=
bufsiz
;
/* Encode header and provided arguments */
encode
=
task
->
tk_msg
.
rpc_proc
->
p_encode
;
...
...
@@ -849,6 +851,8 @@ call_decode(struct rpc_task *task)
return
;
}
req
->
rq_rcv_buf
.
len
=
req
->
rq_private_buf
.
len
;
/* Check that the softirq receive buffer is valid */
WARN_ON
(
memcmp
(
&
req
->
rq_rcv_buf
,
&
req
->
rq_private_buf
,
sizeof
(
req
->
rq_rcv_buf
))
!=
0
);
...
...
@@ -884,7 +888,7 @@ call_decode(struct rpc_task *task)
task
->
tk_status
);
return
;
out_retry:
req
->
rq_received
=
0
;
req
->
rq_received
=
req
->
rq_private_buf
.
len
=
0
;
task
->
tk_status
=
0
;
}
...
...
@@ -956,7 +960,7 @@ call_header(struct rpc_task *task)
static
u32
*
call_verify
(
struct
rpc_task
*
task
)
{
u32
*
p
=
task
->
tk_rqstp
->
rq_r
vec
[
0
].
iov_base
,
n
;
u32
*
p
=
task
->
tk_rqstp
->
rq_r
cv_buf
.
head
[
0
].
iov_base
,
n
;
p
+=
1
;
/* skip XID */
...
...
net/sunrpc/sched.c
View file @
193ce3be
...
...
@@ -161,6 +161,26 @@ rpc_delete_timer(struct rpc_task *task)
dprintk
(
"RPC: %4d deleting timer
\n
"
,
task
->
tk_pid
);
}
/*
* Add new request to a priority queue.
*/
static
void
__rpc_add_wait_queue_priority
(
struct
rpc_wait_queue
*
queue
,
struct
rpc_task
*
task
)
{
struct
list_head
*
q
;
struct
rpc_task
*
t
;
q
=
&
queue
->
tasks
[
task
->
tk_priority
];
if
(
unlikely
(
task
->
tk_priority
>
queue
->
maxpriority
))
q
=
&
queue
->
tasks
[
queue
->
maxpriority
];
list_for_each_entry
(
t
,
q
,
tk_list
)
{
if
(
t
->
tk_cookie
==
task
->
tk_cookie
)
{
list_add_tail
(
&
task
->
tk_list
,
&
t
->
tk_links
);
return
;
}
}
list_add_tail
(
&
task
->
tk_list
,
q
);
}
/*
* Add new request to wait queue.
*
...
...
@@ -169,8 +189,7 @@ rpc_delete_timer(struct rpc_task *task)
* improve overall performance.
* Everyone else gets appended to the queue to ensure proper FIFO behavior.
*/
static
inline
int
__rpc_add_wait_queue
(
struct
rpc_wait_queue
*
queue
,
struct
rpc_task
*
task
)
static
int
__rpc_add_wait_queue
(
struct
rpc_wait_queue
*
queue
,
struct
rpc_task
*
task
)
{
if
(
task
->
tk_rpcwait
==
queue
)
return
0
;
...
...
@@ -179,10 +198,12 @@ __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
printk
(
KERN_WARNING
"RPC: doubly enqueued task!
\n
"
);
return
-
EWOULDBLOCK
;
}
if
(
RPC_IS_SWAPPER
(
task
))
list_add
(
&
task
->
tk_list
,
&
queue
->
tasks
);
if
(
RPC_IS_PRIORITY
(
queue
))
__rpc_add_wait_queue_priority
(
queue
,
task
);
else
if
(
RPC_IS_SWAPPER
(
task
))
list_add
(
&
task
->
tk_list
,
&
queue
->
tasks
[
0
]);
else
list_add_tail
(
&
task
->
tk_list
,
&
queue
->
tasks
);
list_add_tail
(
&
task
->
tk_list
,
&
queue
->
tasks
[
0
]
);
task
->
tk_rpcwait
=
queue
;
dprintk
(
"RPC: %4d added to queue %p
\"
%s
\"\n
"
,
...
...
@@ -191,8 +212,7 @@ __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
return
0
;
}
int
rpc_add_wait_queue
(
struct
rpc_wait_queue
*
q
,
struct
rpc_task
*
task
)
int
rpc_add_wait_queue
(
struct
rpc_wait_queue
*
q
,
struct
rpc_task
*
task
)
{
int
result
;
...
...
@@ -202,18 +222,35 @@ rpc_add_wait_queue(struct rpc_wait_queue *q, struct rpc_task *task)
return
result
;
}
/*
* Remove request from a priority queue.
*/
static
void
__rpc_remove_wait_queue_priority
(
struct
rpc_task
*
task
)
{
struct
rpc_task
*
t
;
if
(
!
list_empty
(
&
task
->
tk_links
))
{
t
=
list_entry
(
task
->
tk_links
.
next
,
struct
rpc_task
,
tk_list
);
list_move
(
&
t
->
tk_list
,
&
task
->
tk_list
);
list_splice_init
(
&
task
->
tk_links
,
&
t
->
tk_links
);
}
list_del
(
&
task
->
tk_list
);
}
/*
* Remove request from queue.
* Note: must be called with spin lock held.
*/
static
inline
void
__rpc_remove_wait_queue
(
struct
rpc_task
*
task
)
static
void
__rpc_remove_wait_queue
(
struct
rpc_task
*
task
)
{
struct
rpc_wait_queue
*
queue
=
task
->
tk_rpcwait
;
if
(
!
queue
)
return
;
if
(
RPC_IS_PRIORITY
(
queue
))
__rpc_remove_wait_queue_priority
(
task
);
else
list_del
(
&
task
->
tk_list
);
task
->
tk_rpcwait
=
NULL
;
...
...
@@ -231,6 +268,48 @@ rpc_remove_wait_queue(struct rpc_task *task)
spin_unlock_bh
(
&
rpc_queue_lock
);
}
static
inline
void
rpc_set_waitqueue_priority
(
struct
rpc_wait_queue
*
queue
,
int
priority
)
{
queue
->
priority
=
priority
;
queue
->
count
=
1
<<
(
priority
*
2
);
}
static
inline
void
rpc_set_waitqueue_cookie
(
struct
rpc_wait_queue
*
queue
,
unsigned
long
cookie
)
{
queue
->
cookie
=
cookie
;
queue
->
nr
=
RPC_BATCH_COUNT
;
}
static
inline
void
rpc_reset_waitqueue_priority
(
struct
rpc_wait_queue
*
queue
)
{
rpc_set_waitqueue_priority
(
queue
,
queue
->
maxpriority
);
rpc_set_waitqueue_cookie
(
queue
,
0
);
}
static
void
__rpc_init_priority_wait_queue
(
struct
rpc_wait_queue
*
queue
,
const
char
*
qname
,
int
maxprio
)
{
int
i
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
queue
->
tasks
);
i
++
)
INIT_LIST_HEAD
(
&
queue
->
tasks
[
i
]);
queue
->
maxpriority
=
maxprio
;
rpc_reset_waitqueue_priority
(
queue
);
#ifdef RPC_DEBUG
queue
->
name
=
qname
;
#endif
}
void
rpc_init_priority_wait_queue
(
struct
rpc_wait_queue
*
queue
,
const
char
*
qname
)
{
__rpc_init_priority_wait_queue
(
queue
,
qname
,
RPC_PRIORITY_HIGH
);
}
void
rpc_init_wait_queue
(
struct
rpc_wait_queue
*
queue
,
const
char
*
qname
)
{
__rpc_init_priority_wait_queue
(
queue
,
qname
,
0
);
}
EXPORT_SYMBOL
(
rpc_init_wait_queue
);
/*
* Make an RPC task runnable.
*
...
...
@@ -255,12 +334,10 @@ rpc_make_runnable(struct rpc_task *task)
return
;
}
rpc_clear_sleeping
(
task
);
if
(
waitqueue_active
(
&
rpciod_idle
))
wake_up
(
&
rpciod_idle
);
}
}
else
{
rpc_clear_sleeping
(
task
);
if
(
waitqueue_active
(
&
task
->
tk_wait
))
wake_up
(
&
task
->
tk_wait
);
}
}
...
...
@@ -287,7 +364,6 @@ void rpciod_wake_up(void)
{
if
(
rpciod_pid
==
0
)
printk
(
KERN_ERR
"rpciod: wot no daemon?
\n
"
);
if
(
waitqueue_active
(
&
rpciod_idle
))
wake_up
(
&
rpciod_idle
);
}
...
...
@@ -405,18 +481,73 @@ rpc_wake_up_task(struct rpc_task *task)
spin_unlock_bh
(
&
rpc_queue_lock
);
}
/*
* Wake up the next task on a priority queue.
*/
static
struct
rpc_task
*
__rpc_wake_up_next_priority
(
struct
rpc_wait_queue
*
queue
)
{
struct
list_head
*
q
;
struct
rpc_task
*
task
;
/*
* Service a batch of tasks from a single cookie.
*/
q
=
&
queue
->
tasks
[
queue
->
priority
];
if
(
!
list_empty
(
q
))
{
task
=
list_entry
(
q
->
next
,
struct
rpc_task
,
tk_list
);
if
(
queue
->
cookie
==
task
->
tk_cookie
)
{
if
(
--
queue
->
nr
)
goto
out
;
list_move_tail
(
&
task
->
tk_list
,
q
);
}
/*
* Check if we need to switch queues.
*/
if
(
--
queue
->
count
)
goto
new_cookie
;
}
/*
* Service the next queue.
*/
do
{
if
(
q
==
&
queue
->
tasks
[
0
])
q
=
&
queue
->
tasks
[
queue
->
maxpriority
];
else
q
=
q
-
1
;
if
(
!
list_empty
(
q
))
{
task
=
list_entry
(
q
->
next
,
struct
rpc_task
,
tk_list
);
goto
new_queue
;
}
}
while
(
q
!=
&
queue
->
tasks
[
queue
->
priority
]);
rpc_reset_waitqueue_priority
(
queue
);
return
NULL
;
new_queue:
rpc_set_waitqueue_priority
(
queue
,
(
unsigned
int
)(
q
-
&
queue
->
tasks
[
0
]));
new_cookie:
rpc_set_waitqueue_cookie
(
queue
,
task
->
tk_cookie
);
out:
__rpc_wake_up_task
(
task
);
return
task
;
}
/*
* Wake up the next task on the wait queue.
*/
struct
rpc_task
*
rpc_wake_up_next
(
struct
rpc_wait_queue
*
queue
)
struct
rpc_task
*
rpc_wake_up_next
(
struct
rpc_wait_queue
*
queue
)
{
struct
rpc_task
*
task
=
NULL
;
dprintk
(
"RPC: wake_up_next(%p
\"
%s
\"
)
\n
"
,
queue
,
rpc_qname
(
queue
));
spin_lock_bh
(
&
rpc_queue_lock
);
task_for_first
(
task
,
&
queue
->
tasks
)
if
(
RPC_IS_PRIORITY
(
queue
))
task
=
__rpc_wake_up_next_priority
(
queue
);
else
{
task_for_first
(
task
,
&
queue
->
tasks
[
0
])
__rpc_wake_up_task
(
task
);
}
spin_unlock_bh
(
&
rpc_queue_lock
);
return
task
;
...
...
@@ -428,15 +559,22 @@ rpc_wake_up_next(struct rpc_wait_queue *queue)
*
* Grabs rpc_queue_lock
*/
void
rpc_wake_up
(
struct
rpc_wait_queue
*
queue
)
void
rpc_wake_up
(
struct
rpc_wait_queue
*
queue
)
{
struct
rpc_task
*
task
;
struct
list_head
*
head
;
spin_lock_bh
(
&
rpc_queue_lock
);
while
(
!
list_empty
(
&
queue
->
tasks
))
task_for_first
(
task
,
&
queue
->
tasks
)
head
=
&
queue
->
tasks
[
queue
->
maxpriority
];
for
(;;)
{
while
(
!
list_empty
(
head
))
{
task
=
list_entry
(
head
->
next
,
struct
rpc_task
,
tk_list
);
__rpc_wake_up_task
(
task
);
}
if
(
head
==
&
queue
->
tasks
[
0
])
break
;
head
--
;
}
spin_unlock_bh
(
&
rpc_queue_lock
);
}
...
...
@@ -447,17 +585,22 @@ rpc_wake_up(struct rpc_wait_queue *queue)
*
* Grabs rpc_queue_lock
*/
void
rpc_wake_up_status
(
struct
rpc_wait_queue
*
queue
,
int
status
)
void
rpc_wake_up_status
(
struct
rpc_wait_queue
*
queue
,
int
status
)
{
struct
list_head
*
head
;
struct
rpc_task
*
task
;
spin_lock_bh
(
&
rpc_queue_lock
);
while
(
!
list_empty
(
&
queue
->
tasks
))
{
task_for_first
(
task
,
&
queue
->
tasks
)
{
head
=
&
queue
->
tasks
[
queue
->
maxpriority
];
for
(;;)
{
while
(
!
list_empty
(
head
))
{
task
=
list_entry
(
head
->
next
,
struct
rpc_task
,
tk_list
);
task
->
tk_status
=
status
;
__rpc_wake_up_task
(
task
);
}
if
(
head
==
&
queue
->
tasks
[
0
])
break
;
head
--
;
}
spin_unlock_bh
(
&
rpc_queue_lock
);
}
...
...
@@ -643,21 +786,22 @@ __rpc_schedule(void)
dprintk
(
"RPC: rpc_schedule enter
\n
"
);
while
(
1
)
{
spin_lock_bh
(
&
rpc_queue_lock
);
task_for_first
(
task
,
&
schedq
.
tasks
)
{
task_for_first
(
task
,
&
schedq
.
tasks
[
0
]
)
{
__rpc_remove_wait_queue
(
task
);
spin_unlock_bh
(
&
rpc_queue_lock
);
__rpc_execute
(
task
);
spin_lock_bh
(
&
rpc_queue_lock
);
}
else
{
spin_unlock_bh
(
&
rpc_queue_lock
);
break
;
}
if
(
++
count
>=
200
||
need_resched
())
{
count
=
0
;
spin_unlock_bh
(
&
rpc_queue_lock
);
schedule
();
spin_lock_bh
(
&
rpc_queue_lock
);
}
}
dprintk
(
"RPC: rpc_schedule leave
\n
"
);
...
...
@@ -709,9 +853,7 @@ rpc_free(struct rpc_task *task)
/*
* Creation and deletion of RPC task structures
*/
inline
void
rpc_init_task
(
struct
rpc_task
*
task
,
struct
rpc_clnt
*
clnt
,
rpc_action
callback
,
int
flags
)
void
rpc_init_task
(
struct
rpc_task
*
task
,
struct
rpc_clnt
*
clnt
,
rpc_action
callback
,
int
flags
)
{
memset
(
task
,
0
,
sizeof
(
*
task
));
init_timer
(
&
task
->
tk_timer
);
...
...
@@ -729,6 +871,10 @@ rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
task
->
tk_cred_retry
=
2
;
task
->
tk_suid_retry
=
1
;
task
->
tk_priority
=
RPC_PRIORITY_NORMAL
;
task
->
tk_cookie
=
(
unsigned
long
)
current
;
INIT_LIST_HEAD
(
&
task
->
tk_links
);
/* Add to global list of all tasks */
spin_lock
(
&
rpc_sched_lock
);
list_add
(
&
task
->
tk_task
,
&
all_tasks
);
...
...
@@ -866,7 +1012,7 @@ rpc_find_parent(struct rpc_task *child)
struct
list_head
*
le
;
parent
=
(
struct
rpc_task
*
)
child
->
tk_calldata
;
task_for_each
(
task
,
le
,
&
childq
.
tasks
)
task_for_each
(
task
,
le
,
&
childq
.
tasks
[
0
]
)
if
(
task
==
parent
)
return
parent
;
...
...
@@ -946,7 +1092,7 @@ static DECLARE_MUTEX_LOCKED(rpciod_running);
static
inline
int
rpciod_task_pending
(
void
)
{
return
!
list_empty
(
&
schedq
.
tasks
);
return
!
list_empty
(
&
schedq
.
tasks
[
0
]
);
}
...
...
@@ -969,27 +1115,41 @@ rpciod(void *ptr)
allow_signal
(
SIGKILL
);
dprintk
(
"RPC: rpciod starting (pid %d)
\n
"
,
rpciod_pid
);
spin_lock_bh
(
&
rpc_queue_lock
);
while
(
rpciod_users
)
{
DEFINE_WAIT
(
wait
);
if
(
signalled
())
{
spin_unlock_bh
(
&
rpc_queue_lock
);
rpciod_killall
();
flush_signals
(
current
);
spin_lock_bh
(
&
rpc_queue_lock
);
}
__rpc_schedule
();
if
(
current
->
flags
&
PF_FREEZE
)
if
(
current
->
flags
&
PF_FREEZE
)
{
spin_unlock_bh
(
&
rpc_queue_lock
);
refrigerator
(
PF_IOTHREAD
);
spin_lock_bh
(
&
rpc_queue_lock
);
}
if
(
++
rounds
>=
64
)
{
/* safeguard */
spin_unlock_bh
(
&
rpc_queue_lock
);
schedule
();
rounds
=
0
;
spin_lock_bh
(
&
rpc_queue_lock
);
}
if
(
!
rpciod_task_pending
())
{
dprintk
(
"RPC: rpciod back to sleep
\n
"
);
wait_event_interruptible
(
rpciod_idle
,
rpciod_task_pending
());
dprintk
(
"RPC: switch to rpciod
\n
"
);
prepare_to_wait
(
&
rpciod_idle
,
&
wait
,
TASK_INTERRUPTIBLE
);
if
(
!
rpciod_task_pending
()
&&
!
signalled
())
{
spin_unlock_bh
(
&
rpc_queue_lock
);
schedule
();
rounds
=
0
;
spin_lock_bh
(
&
rpc_queue_lock
);
}
finish_wait
(
&
rpciod_idle
,
&
wait
);
dprintk
(
"RPC: switch to rpciod
\n
"
);
}
spin_unlock_bh
(
&
rpc_queue_lock
);
dprintk
(
"RPC: rpciod shutdown commences
\n
"
);
if
(
!
list_empty
(
&
all_tasks
))
{
...
...
@@ -1013,7 +1173,9 @@ rpciod_killall(void)
while
(
!
list_empty
(
&
all_tasks
))
{
clear_thread_flag
(
TIF_SIGPENDING
);
rpc_killall_tasks
(
NULL
);
spin_lock_bh
(
&
rpc_queue_lock
);
__rpc_schedule
();
spin_unlock_bh
(
&
rpc_queue_lock
);
if
(
!
list_empty
(
&
all_tasks
))
{
dprintk
(
"rpciod_killall: waiting for tasks to exit
\n
"
);
yield
();
...
...
net/sunrpc/sunrpc_syms.c
View file @
193ce3be
...
...
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(svcauth_unix_purge);
EXPORT_SYMBOL
(
unix_domain_find
);
/* Generic XDR */
EXPORT_SYMBOL
(
xdr_encode_array
);
EXPORT_SYMBOL
(
xdr_encode_string
);
EXPORT_SYMBOL
(
xdr_decode_string
);
EXPORT_SYMBOL
(
xdr_decode_string_inplace
);
...
...
@@ -129,8 +128,6 @@ EXPORT_SYMBOL(xdr_encode_netobj);
EXPORT_SYMBOL
(
xdr_encode_pages
);
EXPORT_SYMBOL
(
xdr_inline_pages
);
EXPORT_SYMBOL
(
xdr_shift_buf
);
EXPORT_SYMBOL
(
xdr_write_pages
);
EXPORT_SYMBOL
(
xdr_read_pages
);
EXPORT_SYMBOL
(
xdr_buf_from_iov
);
EXPORT_SYMBOL
(
xdr_buf_subsegment
);
EXPORT_SYMBOL
(
xdr_buf_read_netobj
);
...
...
net/sunrpc/xdr.c
View file @
193ce3be
...
...
@@ -53,16 +53,50 @@ xdr_decode_netobj(u32 *p, struct xdr_netobj *obj)
return
p
+
XDR_QUADLEN
(
len
);
}
u32
*
xdr_encode_array
(
u32
*
p
,
const
void
*
array
,
unsigned
int
len
)
/**
* xdr_encode_opaque_fixed - Encode fixed length opaque data
* @p - pointer to current position in XDR buffer.
* @ptr - pointer to data to encode (or NULL)
* @nbytes - size of data.
*
* Copy the array of data of length nbytes at ptr to the XDR buffer
* at position p, then align to the next 32-bit boundary by padding
* with zero bytes (see RFC1832).
* Note: if ptr is NULL, only the padding is performed.
*
* Returns the updated current XDR buffer position
*
*/
u32
*
xdr_encode_opaque_fixed
(
u32
*
p
,
const
void
*
ptr
,
unsigned
int
nbytes
)
{
int
quadlen
=
XDR_QUADLEN
(
len
);
if
(
likely
(
nbytes
!=
0
))
{
unsigned
int
quadlen
=
XDR_QUADLEN
(
nbytes
);
unsigned
int
padding
=
(
quadlen
<<
2
)
-
nbytes
;
if
(
ptr
!=
NULL
)
memcpy
(
p
,
ptr
,
nbytes
);
if
(
padding
!=
0
)
memset
((
char
*
)
p
+
nbytes
,
0
,
padding
);
p
+=
quadlen
;
}
return
p
;
}
EXPORT_SYMBOL
(
xdr_encode_opaque_fixed
);
p
[
quadlen
]
=
0
;
*
p
++
=
htonl
(
len
);
memcpy
(
p
,
array
,
len
);
return
p
+
quadlen
;
/**
* xdr_encode_opaque - Encode variable length opaque data
* @p - pointer to current position in XDR buffer.
* @ptr - pointer to data to encode (or NULL)
* @nbytes - size of data.
*
* Returns the updated current XDR buffer position
*/
u32
*
xdr_encode_opaque
(
u32
*
p
,
const
void
*
ptr
,
unsigned
int
nbytes
)
{
*
p
++
=
htonl
(
nbytes
);
return
xdr_encode_opaque_fixed
(
p
,
ptr
,
nbytes
);
}
EXPORT_SYMBOL
(
xdr_encode_opaque
);
u32
*
xdr_encode_string
(
u32
*
p
,
const
char
*
string
)
...
...
@@ -126,6 +160,7 @@ xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
tail
->
iov_len
=
pad
;
len
+=
pad
;
}
xdr
->
buflen
+=
len
;
xdr
->
len
+=
len
;
}
...
...
@@ -147,7 +182,7 @@ xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
tail
->
iov_base
=
buf
+
offset
;
tail
->
iov_len
=
buflen
-
offset
;
xdr
->
len
+=
len
;
xdr
->
buf
len
+=
len
;
}
/*
...
...
@@ -641,7 +676,10 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
copy
);
}
head
->
iov_len
-=
len
;
buf
->
len
-=
len
;
buf
->
buflen
-=
len
;
/* Have we truncated the message? */
if
(
buf
->
len
>
buf
->
buflen
)
buf
->
len
=
buf
->
buflen
;
}
/*
...
...
@@ -671,7 +709,7 @@ xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
copy
=
tail
->
iov_len
-
len
;
memmove
(
p
,
tail
->
iov_base
,
copy
);
}
else
buf
->
len
-=
len
;
buf
->
buf
len
-=
len
;
/* Copy from the inlined pages into the tail */
copy
=
len
;
if
(
copy
>
tail
->
iov_len
)
...
...
@@ -681,7 +719,10 @@ xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
copy
);
}
buf
->
page_len
-=
len
;
buf
->
len
-=
len
;
buf
->
buflen
-=
len
;
/* Have we truncated the message? */
if
(
buf
->
len
>
buf
->
buflen
)
buf
->
len
=
buf
->
buflen
;
}
void
...
...
@@ -690,8 +731,67 @@ xdr_shift_buf(struct xdr_buf *buf, size_t len)
xdr_shrink_bufhead
(
buf
,
len
);
}
void
xdr_write_pages
(
struct
xdr_stream
*
xdr
,
struct
page
**
pages
,
unsigned
int
base
,
/**
* xdr_init_encode - Initialize a struct xdr_stream for sending data.
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer in which to encode data
* @p: current pointer inside XDR buffer
*
* Note: at the moment the RPC client only passes the length of our
* scratch buffer in the xdr_buf's header iovec. Previously this
* meant we needed to call xdr_adjust_iovec() after encoding the
* data. With the new scheme, the xdr_stream manages the details
* of the buffer length, and takes care of adjusting the iovec
* length for us.
*/
void
xdr_init_encode
(
struct
xdr_stream
*
xdr
,
struct
xdr_buf
*
buf
,
uint32_t
*
p
)
{
struct
iovec
*
iov
=
buf
->
head
;
xdr
->
buf
=
buf
;
xdr
->
iov
=
iov
;
xdr
->
end
=
(
uint32_t
*
)((
char
*
)
iov
->
iov_base
+
iov
->
iov_len
);
buf
->
len
=
iov
->
iov_len
=
(
char
*
)
p
-
(
char
*
)
iov
->
iov_base
;
xdr
->
p
=
p
;
}
EXPORT_SYMBOL
(
xdr_init_encode
);
/**
* xdr_reserve_space - Reserve buffer space for sending
* @xdr: pointer to xdr_stream
* @nbytes: number of bytes to reserve
*
* Checks that we have enough buffer space to encode 'nbytes' more
* bytes of data. If so, update the total xdr_buf length, and
* adjust the length of the current iovec.
*/
uint32_t
*
xdr_reserve_space
(
struct
xdr_stream
*
xdr
,
size_t
nbytes
)
{
uint32_t
*
p
=
xdr
->
p
;
uint32_t
*
q
;
/* align nbytes on the next 32-bit boundary */
nbytes
+=
3
;
nbytes
&=
~
3
;
q
=
p
+
(
nbytes
>>
2
);
if
(
unlikely
(
q
>
xdr
->
end
||
q
<
p
))
return
NULL
;
xdr
->
p
=
q
;
xdr
->
iov
->
iov_len
+=
nbytes
;
xdr
->
buf
->
len
+=
nbytes
;
return
p
;
}
EXPORT_SYMBOL
(
xdr_reserve_space
);
/**
* xdr_write_pages - Insert a list of pages into an XDR buffer for sending
* @xdr: pointer to xdr_stream
* @pages: list of pages
* @base: offset of first byte
* @len: length of data in bytes
*
*/
void
xdr_write_pages
(
struct
xdr_stream
*
xdr
,
struct
page
**
pages
,
unsigned
int
base
,
unsigned
int
len
)
{
struct
xdr_buf
*
buf
=
xdr
->
buf
;
...
...
@@ -713,15 +813,69 @@ xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
len
+=
pad
;
*
xdr
->
p
++
=
0
;
}
buf
->
buflen
+=
len
;
buf
->
len
+=
len
;
}
EXPORT_SYMBOL
(
xdr_write_pages
);
void
xdr_read_pages
(
struct
xdr_stream
*
xdr
,
unsigned
int
len
)
/**
* xdr_init_decode - Initialize an xdr_stream for decoding data.
* @xdr: pointer to xdr_stream struct
* @buf: pointer to XDR buffer from which to decode data
* @p: current pointer inside XDR buffer
*/
void
xdr_init_decode
(
struct
xdr_stream
*
xdr
,
struct
xdr_buf
*
buf
,
uint32_t
*
p
)
{
struct
iovec
*
iov
=
buf
->
head
;
unsigned
int
len
=
iov
->
iov_len
;
if
(
len
>
buf
->
len
)
len
=
buf
->
len
;
xdr
->
buf
=
buf
;
xdr
->
iov
=
iov
;
xdr
->
p
=
p
;
xdr
->
end
=
(
uint32_t
*
)((
char
*
)
iov
->
iov_base
+
len
);
}
EXPORT_SYMBOL
(
xdr_init_decode
);
/**
* xdr_inline_decode - Retrieve non-page XDR data to decode
* @xdr: pointer to xdr_stream struct
* @nbytes: number of bytes of data to decode
*
* Check if the input buffer is long enough to enable us to decode
* 'nbytes' more bytes of data starting at the current position.
* If so return the current pointer, then update the current
* pointer position.
*/
uint32_t
*
xdr_inline_decode
(
struct
xdr_stream
*
xdr
,
size_t
nbytes
)
{
uint32_t
*
p
=
xdr
->
p
;
uint32_t
*
q
=
p
+
XDR_QUADLEN
(
nbytes
);
if
(
unlikely
(
q
>
xdr
->
end
||
q
<
p
))
return
NULL
;
xdr
->
p
=
q
;
return
p
;
}
EXPORT_SYMBOL
(
xdr_inline_decode
);
/**
* xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
* @xdr: pointer to xdr_stream struct
* @len: number of bytes of page data
*
* Moves data beyond the current pointer position from the XDR head[] buffer
* into the page list. Any data that lies beyond current position + "len"
* bytes is moved into the XDR tail[]. The current pointer is then
* repositioned at the beginning of the XDR tail.
*/
void
xdr_read_pages
(
struct
xdr_stream
*
xdr
,
unsigned
int
len
)
{
struct
xdr_buf
*
buf
=
xdr
->
buf
;
struct
iovec
*
iov
;
ssize_t
shift
;
unsigned
int
end
;
int
padding
;
/* Realign pages to current pointer position */
...
...
@@ -735,9 +889,21 @@ xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
xdr_shrink_pagelen
(
buf
,
buf
->
page_len
-
len
);
padding
=
(
XDR_QUADLEN
(
len
)
<<
2
)
-
len
;
xdr
->
iov
=
iov
=
buf
->
tail
;
/* Compute remaining message length. */
end
=
iov
->
iov_len
;
shift
=
buf
->
buflen
-
buf
->
len
;
if
(
shift
<
end
)
end
-=
shift
;
else
if
(
shift
>
0
)
end
=
0
;
/*
* Position current pointer at beginning of tail, and
* set remaining message length.
*/
xdr
->
p
=
(
uint32_t
*
)((
char
*
)
iov
->
iov_base
+
padding
);
xdr
->
end
=
(
uint32_t
*
)((
char
*
)
iov
->
iov_base
+
iov
->
iov_len
);
xdr
->
end
=
(
uint32_t
*
)((
char
*
)
iov
->
iov_base
+
end
);
}
EXPORT_SYMBOL
(
xdr_read_pages
);
static
struct
iovec
empty_iov
=
{.
iov_base
=
NULL
,
.
iov_len
=
0
};
...
...
@@ -747,7 +913,7 @@ xdr_buf_from_iov(struct iovec *iov, struct xdr_buf *buf)
buf
->
head
[
0
]
=
*
iov
;
buf
->
tail
[
0
]
=
empty_iov
;
buf
->
page_len
=
0
;
buf
->
len
=
iov
->
iov_len
;
buf
->
buflen
=
buf
->
len
=
iov
->
iov_len
;
}
/* Sets subiov to the intersection of iov with the buffer of length len
...
...
@@ -777,7 +943,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
{
int
i
;
subbuf
->
len
=
len
;
subbuf
->
buflen
=
subbuf
->
len
=
len
;
iov_subsegment
(
buf
->
head
,
subbuf
->
head
,
&
base
,
&
len
);
if
(
base
<
buf
->
page_len
)
{
...
...
net/sunrpc/xprt.c
View file @
193ce3be
...
...
@@ -405,7 +405,6 @@ xprt_close(struct rpc_xprt *xprt)
sk
->
sk_write_space
=
xprt
->
old_write_space
;
write_unlock_bh
(
&
sk
->
sk_callback_lock
);
xprt_disconnect
(
xprt
);
sk
->
sk_no_check
=
0
;
sock_release
(
sock
);
...
...
@@ -416,6 +415,7 @@ xprt_socket_autoclose(void *args)
{
struct
rpc_xprt
*
xprt
=
(
struct
rpc_xprt
*
)
args
;
xprt_disconnect
(
xprt
);
xprt_close
(
xprt
);
xprt_release_write
(
xprt
,
NULL
);
}
...
...
@@ -448,6 +448,9 @@ xprt_init_autodisconnect(unsigned long data)
goto
out_abort
;
spin_unlock
(
&
xprt
->
sock_lock
);
/* Let keventd close the socket */
if
(
test_bit
(
XPRT_CONNECTING
,
&
xprt
->
sockstate
)
!=
0
)
xprt_release_write
(
xprt
,
NULL
);
else
schedule_work
(
&
xprt
->
task_cleanup
);
return
;
out_abort:
...
...
@@ -460,12 +463,8 @@ static void xprt_socket_connect(void *args)
struct
socket
*
sock
=
xprt
->
sock
;
int
status
=
-
EIO
;
if
(
xprt
->
shutdown
)
{
rpc_wake_up_status
(
&
xprt
->
pending
,
-
EIO
);
return
;
}
if
(
!
xprt
->
addr
.
sin_port
)
goto
out_err
;
if
(
xprt
->
shutdown
||
xprt
->
addr
.
sin_port
==
0
)
goto
out
;
/*
* Start by resetting any existing state
...
...
@@ -475,12 +474,12 @@ static void xprt_socket_connect(void *args)
if
(
sock
==
NULL
)
{
/* couldn't create socket or bind to reserved port;
* this is likely a permanent error, so cause an abort */
goto
out_err
;
return
;
goto
out
;
}
xprt_bind_socket
(
xprt
,
sock
);
xprt_sock_setbufsize
(
xprt
);
status
=
0
;
if
(
!
xprt
->
stream
)
goto
out
;
...
...
@@ -491,28 +490,22 @@ static void xprt_socket_connect(void *args)
sizeof
(
xprt
->
addr
),
O_NONBLOCK
);
dprintk
(
"RPC: %p connect status %d connected %d sock state %d
\n
"
,
xprt
,
-
status
,
xprt_connected
(
xprt
),
sock
->
sk
->
sk_state
);
if
(
status
>=
0
)
goto
out
;
if
(
status
<
0
)
{
switch
(
status
)
{
case
-
EINPROGRESS
:
case
-
EALREADY
:
return
;
default:
goto
out_err
;
goto
out_clear
;
}
out:
spin_lock_bh
(
&
xprt
->
sock_lock
);
if
(
xprt
->
snd_task
)
rpc_wake_up_task
(
xprt
->
snd_task
);
spin_unlock_bh
(
&
xprt
->
sock_lock
);
return
;
out_err:
spin_lock_bh
(
&
xprt
->
sock_lock
);
if
(
xprt
->
snd_task
)
{
xprt
->
snd_task
->
tk_status
=
status
;
rpc_wake_up_task
(
xprt
->
snd_task
);
}
spin_unlock_bh
(
&
xprt
->
sock_lock
);
out:
if
(
status
<
0
)
rpc_wake_up_status
(
&
xprt
->
pending
,
status
);
else
rpc_wake_up
(
&
xprt
->
pending
);
out_clear:
smp_mb__before_clear_bit
();
clear_bit
(
XPRT_CONNECTING
,
&
xprt
->
sockstate
);
smp_mb__after_clear_bit
();
}
/*
...
...
@@ -544,6 +537,7 @@ void xprt_connect(struct rpc_task *task)
task
->
tk_timeout
=
RPC_CONNECT_TIMEOUT
;
rpc_sleep_on
(
&
xprt
->
pending
,
task
,
xprt_connect_status
,
NULL
);
if
(
!
test_and_set_bit
(
XPRT_CONNECTING
,
&
xprt
->
sockstate
))
schedule_work
(
&
xprt
->
sock_connect
);
return
;
out_write:
...
...
@@ -647,8 +641,8 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
#endif
dprintk
(
"RPC: %4d has input (%d bytes)
\n
"
,
task
->
tk_pid
,
copied
);
req
->
rq_received
=
copied
;
list_del_init
(
&
req
->
rq_list
);
req
->
rq_received
=
req
->
rq_private_buf
.
len
=
copied
;
/* ... and wake up the process. */
rpc_wake_up_task
(
task
);
...
...
@@ -765,7 +759,7 @@ udp_data_ready(struct sock *sk, int len)
dprintk
(
"RPC: %4d received reply
\n
"
,
task
->
tk_pid
);
if
((
copied
=
rovr
->
rq_private_buf
.
len
)
>
repsize
)
if
((
copied
=
rovr
->
rq_private_buf
.
buf
len
)
>
repsize
)
copied
=
repsize
;
/* Suck it into the iovec, verify checksum if not done by hw. */
...
...
@@ -782,8 +776,6 @@ udp_data_ready(struct sock *sk, int len)
dropit:
skb_free_datagram
(
sk
,
skb
);
out:
if
(
sk
->
sk_sleep
&&
waitqueue_active
(
sk
->
sk_sleep
))
wake_up_interruptible
(
sk
->
sk_sleep
);
read_unlock
(
&
sk
->
sk_callback_lock
);
}
...
...
@@ -910,7 +902,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
xprt
->
tcp_copied
+=
len
;
xprt
->
tcp_offset
+=
len
;
if
(
xprt
->
tcp_copied
==
req
->
rq_private_buf
.
len
)
if
(
xprt
->
tcp_copied
==
req
->
rq_private_buf
.
buf
len
)
xprt
->
tcp_flags
&=
~
XPRT_COPY_DATA
;
else
if
(
xprt
->
tcp_offset
==
xprt
->
tcp_reclen
)
{
if
(
xprt
->
tcp_flags
&
XPRT_LAST_FRAG
)
...
...
@@ -1028,9 +1020,6 @@ tcp_state_change(struct sock *sk)
xprt
->
tcp_reclen
=
0
;
xprt
->
tcp_copied
=
0
;
xprt
->
tcp_flags
=
XPRT_COPY_RECM
|
XPRT_COPY_XID
;
if
(
xprt
->
snd_task
)
rpc_wake_up_task
(
xprt
->
snd_task
);
rpc_wake_up
(
&
xprt
->
pending
);
}
spin_unlock_bh
(
&
xprt
->
sock_lock
);
...
...
@@ -1039,12 +1028,11 @@ tcp_state_change(struct sock *sk)
case
TCP_SYN_RECV
:
break
;
default:
xprt_disconnect
(
xprt
);
if
(
xprt_test_and_clear_connected
(
xprt
))
rpc_wake_up_status
(
&
xprt
->
pending
,
-
ENOTCONN
);
break
;
}
out:
if
(
sk
->
sk_sleep
&&
waitqueue_active
(
sk
->
sk_sleep
))
wake_up_interruptible_all
(
sk
->
sk_sleep
);
read_unlock
(
&
sk
->
sk_callback_lock
);
}
...
...
@@ -1084,8 +1072,6 @@ xprt_write_space(struct sock *sk)
if
(
xprt
->
snd_task
&&
xprt
->
snd_task
->
tk_rpcwait
==
&
xprt
->
pending
)
rpc_wake_up_task
(
xprt
->
snd_task
);
spin_unlock_bh
(
&
xprt
->
sock_lock
);
if
(
sk
->
sk_sleep
&&
waitqueue_active
(
sk
->
sk_sleep
))
wake_up_interruptible
(
sk
->
sk_sleep
);
out:
read_unlock
(
&
sk
->
sk_callback_lock
);
}
...
...
@@ -1464,10 +1450,10 @@ xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
}
else
xprt_default_timeout
(
&
xprt
->
timeout
,
xprt
->
prot
);
INIT_RPC_WAITQ
(
&
xprt
->
pending
,
"xprt_pending"
);
INIT_RPC_WAITQ
(
&
xprt
->
sending
,
"xprt_sending"
);
INIT_RPC_WAITQ
(
&
xprt
->
resend
,
"xprt_resend"
);
INIT_RPC_WAITQ
(
&
xprt
->
backlog
,
"xprt_backlog"
);
rpc_init_wait_queue
(
&
xprt
->
pending
,
"xprt_pending"
);
rpc_init_wait_queue
(
&
xprt
->
sending
,
"xprt_sending"
);
rpc_init_wait_queue
(
&
xprt
->
resend
,
"xprt_resend"
);
rpc_init_priority_wait_queue
(
&
xprt
->
backlog
,
"xprt_backlog"
);
/* initialize free list */
for
(
req
=
&
xprt
->
slot
[
entries
-
1
];
req
>=
&
xprt
->
slot
[
0
];
req
--
)
...
...
@@ -1626,7 +1612,6 @@ xprt_shutdown(struct rpc_xprt *xprt)
rpc_wake_up
(
&
xprt
->
resend
);
rpc_wake_up
(
&
xprt
->
pending
);
rpc_wake_up
(
&
xprt
->
backlog
);
if
(
waitqueue_active
(
&
xprt
->
cong_wait
))
wake_up
(
&
xprt
->
cong_wait
);
del_timer_sync
(
&
xprt
->
timer
);
}
...
...
@@ -1637,7 +1622,6 @@ xprt_shutdown(struct rpc_xprt *xprt)
int
xprt_clear_backlog
(
struct
rpc_xprt
*
xprt
)
{
rpc_wake_up_next
(
&
xprt
->
backlog
);
if
(
waitqueue_active
(
&
xprt
->
cong_wait
))
wake_up
(
&
xprt
->
cong_wait
);
return
1
;
}
...
...
@@ -1650,6 +1634,7 @@ xprt_destroy(struct rpc_xprt *xprt)
{
dprintk
(
"RPC: destroying transport %p
\n
"
,
xprt
);
xprt_shutdown
(
xprt
);
xprt_disconnect
(
xprt
);
xprt_close
(
xprt
);
kfree
(
xprt
->
slot
);
kfree
(
xprt
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment