Commit 913d239d authored by Paul Mackerras's avatar Paul Mackerras

Merge samba.org:/home/paulus/kernel/linux-2.5

into samba.org:/home/paulus/kernel/for-linus-ppc
parents 77225995 23a29063
......@@ -6,7 +6,7 @@ menu "Character devices"
config VT
bool "Virtual terminal" if EMBEDDED
requires INPUT=y
select INPUT
default y
---help---
If you say Y here, you will get support for terminal devices with
......
......@@ -647,7 +647,6 @@ static void __exit exit_sedlbauer_cs(void)
/* XXX: this really needs to move into generic code.. */
while (dev_list != NULL) {
del_timer(&dev_list->release);
if (dev_list->state & DEV_CONFIG)
sedlbauer_release(dev_list);
sedlbauer_detach(dev_list);
......
......@@ -6,7 +6,8 @@ comment "SCSI support is needed for USB Storage"
config USB_STORAGE
tristate "USB Mass Storage support"
depends on USB && SCSI
depends on USB
select SCSI
---help---
Say Y here if you want to connect USB mass storage devices to your
computer's USB port. This is the driver you need for USB floppy drives,
......
......@@ -557,7 +557,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
/* Force a full look up iff the parent directory has changed */
if (nfs_check_verifier(dir, dentry)) {
if (nfs_lookup_verify_inode(inode, isopen))
goto out_bad;
goto out_zap_parent;
goto out_valid;
}
......@@ -566,7 +566,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
if (memcmp(NFS_FH(inode), &fhandle, sizeof(struct nfs_fh))!= 0)
goto out_bad;
if (nfs_lookup_verify_inode(inode, isopen))
goto out_bad;
goto out_zap_parent;
goto out_valid_renew;
}
......@@ -587,6 +587,8 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
unlock_kernel();
dput(parent);
return 1;
out_zap_parent:
nfs_zap_caches(dir);
out_bad:
NFS_CACHEINV(dir);
if (inode && S_ISDIR(inode->i_mode)) {
......@@ -670,36 +672,29 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
error = -ENOMEM;
dentry->d_op = &nfs_dentry_operations;
lock_kernel();
/* If we're doing an exclusive create, optimize away the lookup */
if (nfs_is_exclusive_create(dir, nd))
return NULL;
goto no_entry;
lock_kernel();
error = nfs_cached_lookup(dir, dentry, &fhandle, &fattr);
if (!error) {
error = -EACCES;
inode = nfs_fhget(dentry, &fhandle, &fattr);
if (inode) {
d_add(dentry, inode);
nfs_renew_times(dentry);
error = 0;
}
goto out_unlock;
}
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
if (error == -ENOENT)
goto no_entry;
if (!error) {
error = -EACCES;
inode = nfs_fhget(dentry, &fhandle, &fattr);
if (inode) {
no_entry:
d_add(dentry, inode);
error = 0;
}
nfs_renew_times(dentry);
if (error != 0) {
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name,
&fhandle, &fattr);
if (error == -ENOENT)
goto no_entry;
if (error != 0)
goto out_unlock;
}
error = -EACCES;
inode = nfs_fhget(dentry, &fhandle, &fattr);
if (!inode)
goto out_unlock;
no_entry:
error = 0;
d_add(dentry, inode);
nfs_renew_times(dentry);
out_unlock:
unlock_kernel();
out:
......
......@@ -437,6 +437,7 @@ static int __init root_nfs_ports(void)
*/
static int __init root_nfs_get_handle(void)
{
struct nfs_fh fh;
struct sockaddr_in sin;
int status;
int protocol = (nfs_data.flags & NFS_MOUNT_TCP) ?
......@@ -445,11 +446,14 @@ static int __init root_nfs_get_handle(void)
NFS_MNT3_VERSION : NFS_MNT_VERSION;
set_sockaddr(&sin, servaddr, mount_port);
status = nfsroot_mount(&sin, nfs_path, &nfs_data.root,
version, protocol);
status = nfsroot_mount(&sin, nfs_path, &fh, version, protocol);
if (status < 0)
printk(KERN_ERR "Root-NFS: Server returned error %d "
"while mounting %s\n", status, nfs_path);
else {
nfs_data.root.size = fh.size;
memcpy(nfs_data.root.data, fh.data, fh.size);
}
return status;
}
......
......@@ -15,7 +15,6 @@ struct rpc_rtt {
unsigned long timeo; /* default timeout value */
unsigned long srtt[5]; /* smoothed round trip time << 3 */
unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */
atomic_t ntimeouts; /* Global count of the number of timeouts */
};
......@@ -23,19 +22,4 @@ extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo);
extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m);
extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer);
static inline void rpc_inc_timeo(struct rpc_rtt *rt)
{
atomic_inc(&rt->ntimeouts);
}
static inline void rpc_clear_timeo(struct rpc_rtt *rt)
{
atomic_set(&rt->ntimeouts, 0);
}
static inline int rpc_ntimeo(struct rpc_rtt *rt)
{
return atomic_read(&rt->ntimeouts);
}
#endif /* _LINUX_SUNRPC_TIMER_H */
......@@ -98,6 +98,10 @@ struct rpc_rqst {
struct list_head rq_list;
struct xdr_buf rq_private_buf; /* The receive buffer
* used in the softirq.
*/
/*
* For authentication (e.g. auth_des)
*/
......@@ -111,7 +115,7 @@ struct rpc_rqst {
unsigned long rq_xtime; /* when transmitted */
int rq_ntimeo;
int rq_nresend;
int rq_ntrans;
};
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
......
......@@ -252,6 +252,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
reset_iter(iter);
ret = seq_open(file, &kallsyms_op);
if (ret == 0)
......
......@@ -659,7 +659,7 @@ call_transmit(struct rpc_task *task)
if (task->tk_status < 0)
return;
task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status < 0)
if (task->tk_status != 0)
return;
/* Encode here so that rpcsec_gss can use correct sequence number. */
if (!task->tk_rqstp->rq_bytes_sent)
......@@ -685,7 +685,7 @@ call_status(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
int status;
if (req->rq_received != 0)
if (req->rq_received > 0 && !req->rq_bytes_sent)
task->tk_status = req->rq_received;
dprintk("RPC: %4d call_status (status %d)\n",
......@@ -744,14 +744,14 @@ call_timeout(struct rpc_task *task)
dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
if (clnt->cl_softrtry) {
if (clnt->cl_chatty && !task->tk_exit)
if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
clnt->cl_protname, clnt->cl_server);
rpc_exit(task, -EIO);
return;
}
if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN) && rpc_ntimeo(&clnt->cl_rtt) > 7) {
if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
task->tk_flags |= RPC_CALL_MAJORSEEN;
printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
clnt->cl_protname, clnt->cl_server);
......@@ -787,19 +787,26 @@ call_decode(struct rpc_task *task)
if (task->tk_status < 12) {
if (!clnt->cl_softrtry) {
task->tk_action = call_transmit;
task->tk_action = call_bind;
clnt->cl_stats->rpcretrans++;
} else {
printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
clnt->cl_protname, task->tk_status);
rpc_exit(task, -EIO);
goto out_retry;
}
printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
clnt->cl_protname, task->tk_status);
rpc_exit(task, -EIO);
return;
}
/* Check that the softirq receive buffer is valid */
WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
sizeof(req->rq_rcv_buf)) != 0);
/* Verify the RPC header */
if (!(p = call_verify(task)))
return;
if (!(p = call_verify(task))) {
if (task->tk_action == NULL)
return;
goto out_retry;
}
/*
* The following is an NFS-specific hack to cater for setuid
......@@ -812,7 +819,7 @@ call_decode(struct rpc_task *task)
task->tk_flags ^= RPC_CALL_REALUID;
task->tk_action = call_bind;
task->tk_suid_retry--;
return;
goto out_retry;
}
}
......@@ -822,6 +829,10 @@ call_decode(struct rpc_task *task)
task->tk_status = decode(req, p, task->tk_msg.rpc_resp);
dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
task->tk_status);
return;
out_retry:
req->rq_received = 0;
task->tk_status = 0;
}
/*
......
......@@ -25,7 +25,7 @@
#define RPC_RTO_MAX (60*HZ)
#define RPC_RTO_INIT (HZ/5)
#define RPC_RTO_MIN (2)
#define RPC_RTO_MIN (HZ/10)
void
rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
......@@ -41,8 +41,6 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
rt->srtt[i] = init;
rt->sdrtt[i] = RPC_RTO_INIT;
}
atomic_set(&rt->ntimeouts, 0);
}
/*
......@@ -52,7 +50,7 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
void
rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
{
unsigned long *srtt, *sdrtt;
long *srtt, *sdrtt;
if (timer-- == 0)
return;
......@@ -64,14 +62,14 @@ rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
if (m == 0)
m = 1L;
srtt = &rt->srtt[timer];
srtt = (long *)&rt->srtt[timer];
m -= *srtt >> 3;
*srtt += m;
if (m < 0)
m = -m;
sdrtt = &rt->sdrtt[timer];
sdrtt = (long *)&rt->sdrtt[timer];
m -= *sdrtt >> 2;
*sdrtt += m;
......@@ -101,7 +99,7 @@ rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
if (timer-- == 0)
return rt->timeo;
res = (rt->srtt[timer] >> 3) + rt->sdrtt[timer];
res = ((rt->srtt[timer] + 7) >> 3) + rt->sdrtt[timer];
if (res > RPC_RTO_MAX)
res = RPC_RTO_MAX;
......
......@@ -138,15 +138,22 @@ xprt_from_sock(struct sock *sk)
static int
__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
if (!xprt->snd_task) {
if (xprt->nocong || __xprt_get_cong(xprt, task))
if (xprt->nocong || __xprt_get_cong(xprt, task)) {
xprt->snd_task = task;
if (req) {
req->rq_bytes_sent = 0;
req->rq_ntrans++;
}
}
}
if (xprt->snd_task != task) {
dprintk("RPC: %4d TCP write queue full\n", task->tk_pid);
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
if (task->tk_rqstp && task->tk_rqstp->rq_nresend)
if (req && req->rq_ntrans)
rpc_sleep_on(&xprt->resend, task, NULL, NULL);
else
rpc_sleep_on(&xprt->sending, task, NULL, NULL);
......@@ -181,8 +188,14 @@ __xprt_lock_write_next(struct rpc_xprt *xprt)
if (!task)
return;
}
if (xprt->nocong || __xprt_get_cong(xprt, task))
if (xprt->nocong || __xprt_get_cong(xprt, task)) {
struct rpc_rqst *req = task->tk_rqstp;
xprt->snd_task = task;
if (req) {
req->rq_bytes_sent = 0;
req->rq_ntrans++;
}
}
}
/*
......@@ -422,6 +435,9 @@ xprt_connect(struct rpc_task *task)
if (xprt_connected(xprt))
goto out_write;
if (task->tk_rqstp)
task->tk_rqstp->rq_bytes_sent = 0;
/*
* We're here because the xprt was marked disconnected.
* Start by resetting any existing state.
......@@ -566,14 +582,13 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
if (!xprt->nocong) {
xprt_adjust_cwnd(xprt, copied);
__xprt_put_cong(xprt, req);
if (!req->rq_nresend) {
if (req->rq_ntrans == 1) {
unsigned timer =
task->tk_msg.rpc_proc->p_timer;
if (timer)
rpc_update_rtt(&clnt->cl_rtt, timer,
(long)jiffies - req->rq_xtime);
}
rpc_clear_timeo(&clnt->cl_rtt);
}
#ifdef RPC_PROFILE
......@@ -714,11 +729,11 @@ udp_data_ready(struct sock *sk, int len)
dprintk("RPC: %4d received reply\n", task->tk_pid);
if ((copied = rovr->rq_rlen) > repsize)
if ((copied = rovr->rq_private_buf.len) > repsize)
copied = repsize;
/* Suck it into the iovec, verify checksum if not done by hw. */
if (csum_partial_copy_to_xdr(&rovr->rq_rcv_buf, skb))
if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
goto out_unlock;
/* Something worked... */
......@@ -841,7 +856,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
return;
}
rcvbuf = &req->rq_rcv_buf;
rcvbuf = &req->rq_private_buf;
len = desc->count;
if (len > xprt->tcp_reclen - xprt->tcp_offset) {
skb_reader_t my_desc;
......@@ -859,7 +874,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
xprt->tcp_copied += len;
xprt->tcp_offset += len;
if (xprt->tcp_copied == req->rq_rlen)
if (xprt->tcp_copied == req->rq_private_buf.len)
xprt->tcp_flags &= ~XPRT_COPY_DATA;
else if (xprt->tcp_offset == xprt->tcp_reclen) {
if (xprt->tcp_flags & XPRT_LAST_FRAG)
......@@ -1039,21 +1054,6 @@ xprt_write_space(struct sock *sk)
read_unlock(&sk->sk_callback_lock);
}
/*
* Exponential backoff for UDP retries
*/
static inline int
xprt_expbackoff(struct rpc_task *task, struct rpc_rqst *req)
{
int backoff;
req->rq_ntimeo++;
backoff = min(rpc_ntimeo(&task->tk_client->cl_rtt), XPRT_MAX_BACKOFF);
if (req->rq_ntimeo < (1 << backoff))
return 1;
return 0;
}
/*
* RPC receive timeout handler.
*/
......@@ -1067,15 +1067,8 @@ xprt_timer(struct rpc_task *task)
if (req->rq_received)
goto out;
if (!xprt->nocong) {
if (xprt_expbackoff(task, req)) {
rpc_add_timer(task, xprt_timer);
goto out_unlock;
}
rpc_inc_timeo(&task->tk_client->cl_rtt);
xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
}
req->rq_nresend++;
xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
__xprt_put_cong(xprt, req);
dprintk("RPC: %4d xprt_timer (%s request)\n",
task->tk_pid, req ? "pending" : "backlogged");
......@@ -1084,7 +1077,6 @@ xprt_timer(struct rpc_task *task)
out:
task->tk_timeout = 0;
rpc_wake_up_task(task);
out_unlock:
spin_unlock(&xprt->sock_lock);
}
......@@ -1104,10 +1096,11 @@ xprt_prepare_transmit(struct rpc_task *task)
if (xprt->shutdown)
return -EIO;
if (task->tk_rpcwait)
rpc_remove_wait_queue(task);
spin_lock_bh(&xprt->sock_lock);
if (req->rq_received && !req->rq_bytes_sent) {
err = req->rq_received;
goto out_unlock;
}
if (!__xprt_lock_write(xprt, task)) {
err = -EAGAIN;
goto out_unlock;
......@@ -1117,11 +1110,6 @@ xprt_prepare_transmit(struct rpc_task *task)
err = -ENOTCONN;
goto out_unlock;
}
if (list_empty(&req->rq_list)) {
list_add_tail(&req->rq_list, &xprt->recv);
req->rq_received = 0;
}
out_unlock:
spin_unlock_bh(&xprt->sock_lock);
return err;
......@@ -1146,6 +1134,20 @@ xprt_transmit(struct rpc_task *task)
*marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker)));
}
smp_rmb();
if (!req->rq_received) {
if (list_empty(&req->rq_list)) {
spin_lock_bh(&xprt->sock_lock);
/* Update the softirq receive buffer */
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
sizeof(req->rq_private_buf));
/* Add request to the receive list */
list_add_tail(&req->rq_list, &xprt->recv);
spin_unlock_bh(&xprt->sock_lock);
}
} else if (!req->rq_bytes_sent)
return;
/* Continue transmitting the packet/record. We must be careful
* to cope with writespace callbacks arriving _after_ we have
* called xprt_sendmsg().
......@@ -1160,8 +1162,12 @@ xprt_transmit(struct rpc_task *task)
if (xprt->stream) {
req->rq_bytes_sent += status;
if (req->rq_bytes_sent >= req->rq_slen)
/* If we've sent the entire packet, immediately
* reset the count of bytes sent. */
if (req->rq_bytes_sent >= req->rq_slen) {
req->rq_bytes_sent = 0;
goto out_receive;
}
} else {
if (status >= req->rq_slen)
goto out_receive;
......@@ -1182,9 +1188,6 @@ xprt_transmit(struct rpc_task *task)
* hence there is no danger of the waking up task being put on
* schedq, and being picked up by a parallel run of rpciod().
*/
if (req->rq_received)
goto out_release;
task->tk_status = status;
switch (status) {
......@@ -1214,22 +1217,21 @@ xprt_transmit(struct rpc_task *task)
if (xprt->stream)
xprt_disconnect(xprt);
}
out_release:
xprt_release_write(xprt, task);
req->rq_bytes_sent = 0;
return;
out_receive:
dprintk("RPC: %4d xmit complete\n", task->tk_pid);
/* Set the task's receive timeout value */
spin_lock_bh(&xprt->sock_lock);
if (!xprt->nocong) {
task->tk_timeout = rpc_calc_rto(&clnt->cl_rtt,
task->tk_msg.rpc_proc->p_timer);
req->rq_ntimeo = 0;
task->tk_timeout <<= clnt->cl_timeout.to_retries
- req->rq_timeout.to_retries;
if (task->tk_timeout > req->rq_timeout.to_maxval)
task->tk_timeout = req->rq_timeout.to_maxval;
} else
task->tk_timeout = req->rq_timeout.to_current;
spin_lock_bh(&xprt->sock_lock);
/* Don't race with disconnect */
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
......@@ -1237,7 +1239,6 @@ xprt_transmit(struct rpc_task *task)
rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
__xprt_release_write(xprt, task);
spin_unlock_bh(&xprt->sock_lock);
req->rq_bytes_sent = 0;
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment