Commit ba44b066 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://nfsclient.bkbits.net/linux-2.5

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents a063b7fc 04d0f23c
......@@ -32,6 +32,7 @@
* 18 Dec 2001 Initial implementation for 2.4 --cel
* 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
* 08 Jun 2003 Port to 2.5 APIs --cel
* 31 Mar 2004 Handle direct I/O without VFS support --cel
*
*/
......@@ -252,9 +253,7 @@ nfs_direct_write_seg(struct inode *inode, struct file *file,
{
const unsigned int wsize = NFS_SERVER(inode)->wsize;
size_t request;
int need_commit;
int tot_bytes;
int curpage;
int curpage, need_commit, result, tot_bytes;
struct nfs_writeverf first_verf;
struct nfs_write_data wdata = {
.inode = inode,
......@@ -281,8 +280,6 @@ nfs_direct_write_seg(struct inode *inode, struct file *file,
wdata.args.pgbase = user_addr & ~PAGE_MASK;
wdata.args.offset = file_offset;
do {
int result;
wdata.args.count = request;
if (wdata.args.count > wsize)
wdata.args.count = wsize;
......@@ -299,7 +296,7 @@ nfs_direct_write_seg(struct inode *inode, struct file *file,
if (result <= 0) {
if (tot_bytes > 0)
break;
return result;
goto out;
}
if (tot_bytes == 0)
......@@ -324,8 +321,6 @@ nfs_direct_write_seg(struct inode *inode, struct file *file,
* Commit data written so far, even in the event of an error
*/
if (need_commit) {
int result;
wdata.args.count = tot_bytes;
wdata.args.offset = file_offset;
......@@ -338,9 +333,12 @@ nfs_direct_write_seg(struct inode *inode, struct file *file,
VERF_SIZE) != 0)
goto sync_retry;
}
result = tot_bytes;
out:
nfs_end_data_update_defer(inode);
return tot_bytes;
return result;
sync_retry:
wdata.args.stable = NFS_FILE_SYNC;
......@@ -409,12 +407,6 @@ nfs_direct_write(struct inode *inode, struct file *file,
* file_offset: offset in file to begin the operation
* nr_segs: size of iovec array
*
* Usually a file system implements direct I/O by calling out to
* blockdev_direct_IO. The NFS client doesn't have a backing block
* device, so we do everything by hand instead.
*
* The inode's i_sem is no longer held by the VFS layer before it calls
* this function to do a write.
*/
ssize_t
nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
......@@ -429,11 +421,7 @@ nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
* No support for async yet
*/
if (!is_sync_kiocb(iocb))
goto out;
result = nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (result < 0)
goto out;
return result;
switch (rw) {
case READ:
......@@ -453,8 +441,160 @@ nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
default:
break;
}
return result;
}
/**
* nfs_file_direct_read - file direct read operation for NFS files
* @iocb: target I/O control block
* @buf: user's buffer into which to read data
* count: number of bytes to read
* pos: byte offset in file where reading starts
*
* We use this function for direct reads instead of calling
* generic_file_aio_read() in order to avoid gfar's check to see if
* the request starts before the end of the file. For that check
* to work, we must generate a GETATTR before each direct read, and
* even then there is a window between the GETATTR and the subsequent
* READ where the file size could change. So our preference is simply
* to do all reads the application wants, and the server will take
* care of managing the end of file boundary.
*
* This function also eliminates unnecessarily updating the file's
* atime locally, as the NFS server sets the file's atime, and this
* client must read the updated atime from the server back into its
* cache.
*/
ssize_t
nfs_file_direct_read(struct kiocb *iocb, char *buf, size_t count, loff_t pos)
{
ssize_t retval = -EINVAL;
loff_t *ppos = &iocb->ki_pos;
struct file *file = iocb->ki_filp;
struct dentry *dentry = file->f_dentry;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct iovec iov = {
.iov_base = buf,
.iov_len = count,
};
dprintk("nfs: direct read(%s/%s, %lu@%lu)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
(unsigned long) count, (unsigned long) pos);
if (!is_sync_kiocb(iocb))
goto out;
if (count < 0)
goto out;
retval = -EFAULT;
if (!access_ok(VERIFY_WRITE, iov.iov_base, iov.iov_len))
goto out;
retval = 0;
if (!count)
goto out;
if (mapping->nrpages) {
retval = filemap_fdatawrite(mapping);
if (retval == 0)
retval = filemap_fdatawait(mapping);
if (retval)
goto out;
}
retval = nfs_direct_read(inode, file, &iov, pos, 1);
if (retval > 0)
*ppos = pos + retval;
out:
dprintk("NFS: direct_IO result=%zd\n", result);
return result;
return retval;
}
/**
* nfs_file_direct_write - file direct write operation for NFS files
* @iocb: target I/O control block
* @buf: user's buffer from which to write data
* count: number of bytes to write
* pos: byte offset in file where writing starts
*
* We use this function for direct writes instead of calling
* generic_file_aio_write() in order to avoid taking the inode
* semaphore and updating the i_size. The NFS server will set
* the new i_size and this client must read the updated size
* back into its cache. We let the server do generic write
* parameter checking and report problems.
*
* We also avoid an unnecessary invocation of generic_osync_inode(),
* as it is fairly meaningless to sync the metadata of an NFS file.
*
* We eliminate local atime updates, see direct read above.
*
* We avoid unnecessary page cache invalidations for normal cached
* readers of this file.
*
* Note that O_APPEND is not supported for NFS direct writes, as there
* is no atomic O_APPEND write facility in the NFS protocol.
*/
ssize_t
nfs_file_direct_write(struct kiocb *iocb, const char *buf, size_t count, loff_t pos)
{
ssize_t retval = -EINVAL;
loff_t *ppos = &iocb->ki_pos;
unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
struct file *file = iocb->ki_filp;
struct dentry *dentry = file->f_dentry;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct iovec iov = {
.iov_base = (void __user *)buf,
.iov_len = count,
};
dfprintk(VFS, "nfs: direct write(%s/%s(%ld), %lu@%lu)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
inode->i_ino, (unsigned long) count, (unsigned long) pos);
if (!is_sync_kiocb(iocb))
goto out;
if (count < 0)
goto out;
if (pos < 0)
goto out;
retval = -EFAULT;
if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
goto out;
if (file->f_error) {
retval = file->f_error;
file->f_error = 0;
goto out;
}
retval = -EFBIG;
if (limit != RLIM_INFINITY) {
if (pos >= limit) {
send_sig(SIGXFSZ, current, 0);
goto out;
}
if (count > limit - (unsigned long) pos)
count = limit - (unsigned long) pos;
}
retval = 0;
if (!count)
goto out;
if (mapping->nrpages) {
retval = filemap_fdatawrite(mapping);
if (retval == 0)
retval = filemap_fdatawait(mapping);
if (retval)
goto out;
}
retval = nfs_direct_write(inode, file, &iov, pos, 1);
if (mapping->nrpages)
invalidate_inode_pages2(mapping);
if (retval > 0)
*ppos = pos + retval;
out:
return retval;
}
......@@ -154,6 +154,11 @@ nfs_file_read(struct kiocb *iocb, char * buf, size_t count, loff_t pos)
struct inode * inode = dentry->d_inode;
ssize_t result;
#ifdef CONFIG_NFS_DIRECTIO
if (iocb->ki_filp->f_flags & O_DIRECT)
return nfs_file_direct_read(iocb, buf, count, pos);
#endif
dfprintk(VFS, "nfs: read(%s/%s, %lu@%lu)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
(unsigned long) count, (unsigned long) pos);
......@@ -268,6 +273,11 @@ nfs_file_write(struct kiocb *iocb, const char *buf, size_t count, loff_t pos)
struct inode * inode = dentry->d_inode;
ssize_t result;
#ifdef CONFIG_NFS_DIRECTIO
if (iocb->ki_filp->f_flags & O_DIRECT)
return nfs_file_direct_write(iocb, buf, count, pos);
#endif
dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%lu)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
inode->i_ino, (unsigned long) count, (unsigned long) pos);
......
......@@ -237,7 +237,7 @@ nfs_get_root(struct super_block *sb, struct nfs_fh *rootfh, struct nfs_fsinfo *f
error = server->rpc_ops->getroot(server, rootfh, fsinfo);
if (error < 0) {
printk(KERN_NOTICE "nfs_get_root: getattr error = %d\n", -error);
dprintk("nfs_get_root: getattr error = %d\n", -error);
return ERR_PTR(error);
}
......@@ -262,6 +262,7 @@ nfs_sb_init(struct super_block *sb, rpc_authflavor_t authflavor)
struct nfs_pathconf pathinfo = {
.fattr = &fattr,
};
int no_root_error = 0;
/* We probably want something more informative here */
snprintf(sb->s_id, sizeof(sb->s_id), "%x:%x", MAJOR(sb->s_dev), MINOR(sb->s_dev));
......@@ -272,12 +273,15 @@ nfs_sb_init(struct super_block *sb, rpc_authflavor_t authflavor)
root_inode = nfs_get_root(sb, &server->fh, &fsinfo);
/* Did getting the root inode fail? */
if (IS_ERR(root_inode))
if (IS_ERR(root_inode)) {
no_root_error = PTR_ERR(root_inode);
goto out_no_root;
}
sb->s_root = d_alloc_root(root_inode);
if (!sb->s_root)
if (!sb->s_root) {
no_root_error = -ENOMEM;
goto out_no_root;
}
sb->s_root->d_op = server->rpc_ops->dentry_ops;
/* Get some general file system info */
......@@ -337,10 +341,10 @@ nfs_sb_init(struct super_block *sb, rpc_authflavor_t authflavor)
return 0;
/* Yargs. It didn't work out. */
out_no_root:
printk("nfs_read_super: get root inode failed\n");
dprintk("nfs_sb_init: get root inode failed: errno %d\n", -no_root_error);
if (!IS_ERR(root_inode))
iput(root_inode);
return -EINVAL;
return no_root_error;
}
/*
......
......@@ -731,6 +731,8 @@ nfs4_reclaim_open_state(struct nfs4_state_owner *sp)
int status = 0;
list_for_each_entry(state, &sp->so_states, open_states) {
if (state->state == 0)
continue;
status = nfs4_open_reclaim(sp, state);
if (status >= 0)
continue;
......
......@@ -43,14 +43,12 @@ static mempool_t *nfs_rdata_mempool;
#define MIN_POOL_READ (32)
static __inline__ struct nfs_read_data *nfs_readdata_alloc(void)
static struct nfs_read_data *nfs_readdata_alloc(void)
{
struct nfs_read_data *p;
p = (struct nfs_read_data *)mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
if (p) {
if (p)
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
}
return p;
}
......@@ -99,10 +97,17 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
unsigned int rsize = NFS_SERVER(inode)->rsize;
unsigned int count = PAGE_CACHE_SIZE;
int result;
struct nfs_read_data rdata = {
struct nfs_read_data *rdata;
rdata = nfs_readdata_alloc();
if (!rdata)
return -ENOMEM;
*rdata = (struct nfs_read_data) {
.flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0),
.cred = NULL,
.inode = inode,
.pages = LIST_HEAD_INIT(rdata->pages),
.args = {
.fh = NFS_FH(inode),
.lockowner = current->files,
......@@ -111,7 +116,7 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
.count = rsize,
},
.res = {
.fattr = &rdata.fattr,
.fattr = &rdata->fattr,
}
};
......@@ -123,19 +128,19 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
*/
do {
if (count < rsize)
rdata.args.count = count;
rdata.res.count = rdata.args.count;
rdata.args.offset = page_offset(page) + rdata.args.pgbase;
rdata->args.count = count;
rdata->res.count = rdata->args.count;
rdata->args.offset = page_offset(page) + rdata->args.pgbase;
dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n",
NFS_SERVER(inode)->hostname,
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
(unsigned long long)rdata.args.pgbase,
rdata.args.count);
(unsigned long long)rdata->args.pgbase,
rdata->args.count);
lock_kernel();
result = NFS_PROTO(inode)->read(&rdata, file);
result = NFS_PROTO(inode)->read(rdata, file);
unlock_kernel();
/*
......@@ -148,17 +153,17 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
goto io_error;
}
count -= result;
rdata.args.pgbase += result;
rdata->args.pgbase += result;
/* Note: result == 0 should only happen if we're caching
* a write that extends the file and punches a hole.
*/
if (rdata.res.eof != 0 || result == 0)
if (rdata->res.eof != 0 || result == 0)
break;
} while (count);
NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;
if (count)
memclear_highpage_flush(page, rdata.args.pgbase, count);
memclear_highpage_flush(page, rdata->args.pgbase, count);
SetPageUptodate(page);
if (PageError(page))
ClearPageError(page);
......@@ -166,6 +171,7 @@ nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
io_error:
unlock_page(page);
nfs_readdata_free(rdata);
return result;
}
......@@ -305,6 +311,7 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode)
data = nfs_readdata_alloc();
if (!data)
goto out_bad;
INIT_LIST_HEAD(&data->pages);
list_add(&data->pages, &list);
requests++;
if (nbytes <= rsize)
......@@ -361,6 +368,7 @@ static int nfs_pagein_one(struct list_head *head, struct inode *inode)
if (!data)
goto out_bad;
INIT_LIST_HEAD(&data->pages);
pages = data->pagevec;
count = 0;
while (!list_empty(head)) {
......
......@@ -179,7 +179,13 @@ static int nfs_writepage_sync(struct file *file, struct inode *inode,
{
unsigned int wsize = NFS_SERVER(inode)->wsize;
int result, written = 0;
struct nfs_write_data wdata = {
struct nfs_write_data *wdata;
wdata = kmalloc(sizeof(*wdata), GFP_NOFS);
if (!wdata)
return -ENOMEM;
*wdata = (struct nfs_write_data) {
.flags = how,
.cred = NULL,
.inode = inode,
......@@ -192,8 +198,8 @@ static int nfs_writepage_sync(struct file *file, struct inode *inode,
.count = wsize,
},
.res = {
.fattr = &wdata.fattr,
.verf = &wdata.verf,
.fattr = &wdata->fattr,
.verf = &wdata->verf,
},
};
......@@ -205,22 +211,22 @@ static int nfs_writepage_sync(struct file *file, struct inode *inode,
nfs_begin_data_update(inode);
do {
if (count < wsize)
wdata.args.count = count;
wdata.args.offset = page_offset(page) + wdata.args.pgbase;
wdata->args.count = count;
wdata->args.offset = page_offset(page) + wdata->args.pgbase;
result = NFS_PROTO(inode)->write(&wdata, file);
result = NFS_PROTO(inode)->write(wdata, file);
if (result < 0) {
/* Must mark the page invalid after I/O error */
ClearPageUptodate(page);
goto io_error;
}
if (result < wdata.args.count)
if (result < wdata->args.count)
printk(KERN_WARNING "NFS: short write, count=%u, result=%d\n",
wdata.args.count, result);
wdata->args.count, result);
wdata.args.offset += result;
wdata.args.pgbase += result;
wdata->args.offset += result;
wdata->args.pgbase += result;
written += result;
count -= result;
} while (count);
......@@ -234,9 +240,10 @@ static int nfs_writepage_sync(struct file *file, struct inode *inode,
io_error:
nfs_end_data_update_defer(inode);
if (wdata.cred)
put_rpccred(wdata.cred);
if (wdata->cred)
put_rpccred(wdata->cred);
kfree(wdata);
return written ? written : result;
}
......
......@@ -306,6 +306,10 @@ nfs_file_cred(struct file *file)
*/
extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
unsigned long);
extern ssize_t nfs_file_direct_read(struct kiocb *iocb, char *buf,
size_t count, loff_t pos);
extern ssize_t nfs_file_direct_write(struct kiocb *iocb, const char *buf,
size_t count, loff_t pos);
/*
* linux/fs/nfs/dir.c
......
......@@ -50,46 +50,36 @@ u32 gss_verify_mic(
u32 gss_delete_sec_context(
struct gss_ctx **ctx_id);
/* We maintain a list of the pseudoflavors (equivalently, mechanism-qop-service
* triples) that we currently support: */
struct sup_sec_triple {
struct list_head triples;
u32 pseudoflavor;
struct gss_api_mech *mech;
u32 qop;
u32 service;
struct gss_api_mech * gss_mech_get_by_name(char *name);
struct gss_api_mech * gss_mech_get_by_pseudoflavor(u32 pseudoflavor);
u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor);
char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service);
struct pf_desc {
u32 pseudoflavor;
u32 qop;
u32 service;
char *name;
char *auth_domain_name;
};
int gss_register_triple(u32 pseudoflavor, struct gss_api_mech *mech, u32 qop,
u32 service);
int gss_unregister_triple(u32 pseudoflavor);
int gss_pseudoflavor_supported(u32 pseudoflavor);
u32 gss_cmp_triples(u32 oid_len, char *oid_data, u32 qop, u32 service);
u32 gss_get_pseudoflavor(struct gss_ctx *ctx_id, u32 qop, u32 service);
u32 gss_pseudoflavor_to_service(u32 pseudoflavor);
/* Both return NULL on failure: */
struct gss_api_mech * gss_pseudoflavor_to_mech(u32 pseudoflavor);
int gss_pseudoflavor_to_mechOID(u32 pseudoflavor, struct xdr_netobj *mech);
/* Different mechanisms (e.g., krb5 or spkm3) may implement gss-api, and
* mechanisms may be dynamically registered or unregistered by modules.
* Our only built-in mechanism is a trivial debugging mechanism that provides
* no actual security; the following function registers that mechanism: */
void gss_mech_register_debug(void);
* mechanisms may be dynamically registered or unregistered by modules. */
/* Each mechanism is described by the following struct: */
struct gss_api_mech {
struct xdr_netobj gm_oid;
struct list_head gm_list;
atomic_t gm_count;
struct module *gm_owner;
struct xdr_netobj gm_oid;
char *gm_name;
struct gss_api_ops *gm_ops;
/* pseudoflavors supported by this mechanism: */
int gm_pf_num;
struct pf_desc gm_pfs[];
};
/* and must provide the following operations: */
struct gss_api_ops {
char *name;
u32 (*gss_import_sec_context)(
struct xdr_netobj *input_token,
struct gss_ctx *ctx_id);
......@@ -107,29 +97,25 @@ struct gss_api_ops {
void *internal_ctx_id);
};
/* Returns nonzero on failure. */
int gss_mech_register(struct xdr_netobj *, struct gss_api_ops *);
int gss_mech_register(struct gss_api_mech *);
void gss_mech_unregister(struct gss_api_mech *);
/* Returns nonzero iff someone still has a reference to this mech. */
int gss_mech_unregister(struct gss_api_mech *);
/* Returns nonzer iff someone still has a reference to some mech. */
int gss_mech_unregister_all(void);
/* returns a mechanism descriptor given an OID, an increments the mechanism's
/* returns a mechanism descriptor given an OID, and increments the mechanism's
* reference count. */
struct gss_api_mech * gss_mech_get_by_OID(struct xdr_netobj *);
/* Similar, but get by name like "krb5", "spkm", etc., instead of OID. */
/* Returns a reference to a mechanism, given a name like "krb5" etc. */
struct gss_api_mech *gss_mech_get_by_name(char *);
/* Similar, but get by pseudoflavor. */
struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32);
/* Just increments the mechanism's reference count and returns its input: */
struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
/* Returns nonzero iff you've released the last reference to this mech.
* Note that for every succesful gss_get_mech call there must be exactly
* one corresponding call to gss_mech_put.*/
int gss_mech_put(struct gss_api_mech *);
/* For every succesful gss_mech_get or gss_mech_get_by_* call there must be a
* corresponding call to gss_mech_put. */
void gss_mech_put(struct gss_api_mech *);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_GSS_API_H */
......
......@@ -81,5 +81,4 @@ int g_token_size(
void g_make_token_header(
struct xdr_netobj *mech,
int body_size,
unsigned char **buf,
int tok_type);
unsigned char **buf);
......@@ -115,7 +115,7 @@ enum seal_alg {
#define ENCTYPE_UNKNOWN 0x01ff
s32
krb5_make_checksum(s32 cksumtype, char *header, struct xdr_buf *body,
make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
struct xdr_netobj *cksum);
u32
......
......@@ -69,8 +69,7 @@ extern unsigned int xprt_tcp_slot_table_entries;
* This describes a timeout strategy
*/
struct rpc_timeout {
unsigned long to_current, /* current timeout */
to_initval, /* initial timeout */
unsigned long to_initval, /* initial timeout */
to_maxval, /* max timeout */
to_increment; /* if !exponential */
unsigned int to_retries; /* max # of retries */
......@@ -85,7 +84,6 @@ struct rpc_rqst {
* This is the user-visible part
*/
struct rpc_xprt * rq_xprt; /* RPC client */
struct rpc_timeout rq_timeout; /* timeout parms */
struct xdr_buf rq_snd_buf; /* send buffer */
struct xdr_buf rq_rcv_buf; /* recv buffer */
......@@ -103,6 +101,9 @@ struct rpc_rqst {
struct xdr_buf rq_private_buf; /* The receive buffer
* used in the softirq.
*/
unsigned long rq_majortimeo; /* major timeout alarm */
unsigned long rq_timeout; /* Current timeout value */
unsigned int rq_retries; /* # of retries */
/*
* For authentication (e.g. auth_des)
*/
......@@ -115,7 +116,6 @@ struct rpc_rqst {
u32 rq_bytes_sent; /* Bytes we have sent */
unsigned long rq_xtime; /* when transmitted */
int rq_ntimeo;
int rq_ntrans;
};
#define rq_svec rq_snd_buf.head
......@@ -210,7 +210,7 @@ void xprt_reserve(struct rpc_task *);
int xprt_prepare_transmit(struct rpc_task *);
void xprt_transmit(struct rpc_task *);
void xprt_receive(struct rpc_task *);
int xprt_adjust_timeout(struct rpc_timeout *);
int xprt_adjust_timeout(struct rpc_rqst *req);
void xprt_release(struct rpc_task *);
void xprt_connect(struct rpc_task *);
int xprt_clear_backlog(struct rpc_xprt *);
......
......@@ -4,11 +4,11 @@
obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o
auth_rpcgss-objs := auth_gss.o gss_pseudoflavors.o gss_generic_token.o \
sunrpcgss_syms.o gss_mech_switch.o svcauth_gss.o
auth_rpcgss-objs := auth_gss.o gss_generic_token.o \
gss_mech_switch.o svcauth_gss.o gss_krb5_crypto.o
obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
gss_krb5_crypto.o gss_krb5_seqnum.o
gss_krb5_seqnum.o
This diff is collapsed.
......@@ -32,6 +32,7 @@
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/sunrpc/sched.h>
......@@ -151,22 +152,23 @@ g_token_size(struct xdr_netobj *mech, unsigned int body_size)
return(1 + der_length_size(body_size) + body_size);
}
EXPORT_SYMBOL(g_token_size);
/* fills in a buffer with the token header. The buffer is assumed to
be the right size. buf is advanced past the token header */
void
g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf,
int tok_type)
g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf)
{
*(*buf)++ = 0x60;
der_write_length(buf, 4 + mech->len + body_size);
*(*buf)++ = 0x06;
*(*buf)++ = (unsigned char) mech->len;
TWRITE_STR(*buf, mech->data, ((int) mech->len));
*(*buf)++ = (unsigned char) ((tok_type>>8)&0xff);
*(*buf)++ = (unsigned char) (tok_type&0xff);
}
EXPORT_SYMBOL(g_make_token_header);
/*
* Given a buffer containing a token, reads and verifies the token,
* leaving buf advanced past the token header, and setting body_size
......@@ -221,9 +223,6 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
if (ret)
return(ret);
if ((*buf++ != ((tok_type>>8)&0xff)) || (*buf++ != (tok_type&0xff)))
return(G_WRONG_TOKID);
if (!ret) {
*buf_in = buf;
*body_size = toksize;
......@@ -232,6 +231,8 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
return(ret);
}
EXPORT_SYMBOL(g_verify_token_header);
/* Given a buffer containing a token, returns a copy of the mech oid in
* the parameter mech. */
u32
......
......@@ -59,14 +59,14 @@ krb5_encrypt(
struct scatterlist sg[1];
u8 local_iv[16] = {0};
dprintk("RPC: krb5_encrypt: input data:\n");
dprintk("RPC: krb5_encrypt: input data:\n");
print_hexl((u32 *)in, length, 0);
if (length % crypto_tfm_alg_blocksize(tfm) != 0)
goto out;
if (crypto_tfm_alg_ivsize(tfm) > 16) {
dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
crypto_tfm_alg_ivsize(tfm));
goto out;
}
......@@ -81,13 +81,15 @@ krb5_encrypt(
ret = crypto_cipher_encrypt_iv(tfm, sg, sg, length, local_iv);
dprintk("RPC: krb5_encrypt: output data:\n");
dprintk("RPC: krb5_encrypt: output data:\n");
print_hexl((u32 *)out, length, 0);
out:
dprintk("krb5_encrypt returns %d\n",ret);
dprintk("RPC: krb5_encrypt returns %d\n",ret);
return(ret);
}
EXPORT_SYMBOL(krb5_encrypt);
u32
krb5_decrypt(
struct crypto_tfm *tfm,
......@@ -100,14 +102,14 @@ krb5_decrypt(
struct scatterlist sg[1];
u8 local_iv[16] = {0};
dprintk("RPC: krb5_decrypt: input data:\n");
dprintk("RPC: krb5_decrypt: input data:\n");
print_hexl((u32 *)in, length, 0);
if (length % crypto_tfm_alg_blocksize(tfm) != 0)
goto out;
if (crypto_tfm_alg_ivsize(tfm) > 16) {
dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n",
dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n",
crypto_tfm_alg_ivsize(tfm));
goto out;
}
......@@ -121,13 +123,15 @@ krb5_decrypt(
ret = crypto_cipher_decrypt_iv(tfm, sg, sg, length, local_iv);
dprintk("RPC: krb5_decrypt: output_data:\n");
dprintk("RPC: krb5_decrypt: output_data:\n");
print_hexl((u32 *)out, length, 0);
out:
dprintk("gss_k5decrypt returns %d\n",ret);
dprintk("RPC: gss_k5decrypt returns %d\n",ret);
return(ret);
}
EXPORT_SYMBOL(krb5_decrypt);
void
buf_to_sg(struct scatterlist *sg, char *ptr, int len) {
sg->page = virt_to_page(ptr);
......@@ -135,10 +139,9 @@ buf_to_sg(struct scatterlist *sg, char *ptr, int len) {
sg->length = len;
}
/* checksum the plaintext data and the first 8 bytes of the krb5 token header,
* as specified by the rfc: */
/* checksum the plaintext data and hdrlen bytes of the token header */
s32
krb5_make_checksum(s32 cksumtype, char *header, struct xdr_buf *body,
make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
struct xdr_netobj *cksum)
{
char *cksumname;
......@@ -153,7 +156,7 @@ krb5_make_checksum(s32 cksumtype, char *header, struct xdr_buf *body,
cksumname = "md5";
break;
default:
dprintk("RPC: krb5_make_checksum:"
dprintk("RPC: krb5_make_checksum:"
" unsupported checksum %d", cksumtype);
goto out;
}
......@@ -164,7 +167,7 @@ krb5_make_checksum(s32 cksumtype, char *header, struct xdr_buf *body,
goto out;
crypto_digest_init(tfm);
buf_to_sg(sg, header, 8);
buf_to_sg(sg, header, hdrlen);
crypto_digest_update(tfm, sg, 1);
if (body->head[0].iov_len) {
buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len);
......@@ -202,3 +205,5 @@ krb5_make_checksum(s32 cksumtype, char *header, struct xdr_buf *body,
crypto_free_tfm(tfm);
return code;
}
EXPORT_SYMBOL(make_checksum);
......@@ -40,7 +40,6 @@
#include <linux/slab.h>
#include <linux/sunrpc/auth.h>
#include <linux/in.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
#include <linux/crypto.h>
......@@ -100,7 +99,7 @@ get_key(char **p, char *end, struct crypto_tfm **res)
alg_mode = CRYPTO_TFM_MODE_CBC;
break;
default:
dprintk("RPC: get_key: unsupported algorithm %d\n", alg);
dprintk("RPC: get_key: unsupported algorithm %d\n", alg);
goto out_err_free_key;
}
if (!(*res = crypto_alloc_tfm(alg_name, alg_mode)))
......@@ -155,7 +154,7 @@ gss_import_sec_context_kerberos(struct xdr_netobj *inbuf,
goto out_err_free_key2;
ctx_id->internal_ctx_id = ctx;
dprintk("Succesfully imported new context.\n");
dprintk("RPC: Succesfully imported new context.\n");
return 0;
out_err_free_key2:
......@@ -197,7 +196,7 @@ gss_verify_mic_kerberos(struct gss_ctx *ctx,
if (!maj_stat && qop_state)
*qstate = qop_state;
dprintk("RPC: gss_verify_mic_kerberos returning %d\n", maj_stat);
dprintk("RPC: gss_verify_mic_kerberos returning %d\n", maj_stat);
return maj_stat;
}
......@@ -211,41 +210,42 @@ gss_get_mic_kerberos(struct gss_ctx *ctx,
err = krb5_make_token(kctx, qop, message, mic_token, KG_TOK_MIC_MSG);
dprintk("RPC: gss_get_mic_kerberos returning %d\n",err);
dprintk("RPC: gss_get_mic_kerberos returning %d\n",err);
return err;
}
static struct gss_api_ops gss_kerberos_ops = {
.name = "krb5",
.gss_import_sec_context = gss_import_sec_context_kerberos,
.gss_get_mic = gss_get_mic_kerberos,
.gss_verify_mic = gss_verify_mic_kerberos,
.gss_delete_sec_context = gss_delete_sec_context_kerberos,
};
/* XXX error checking? reference counting? */
static struct gss_api_mech gss_kerberos_mech = {
.gm_name = "krb5",
.gm_owner = THIS_MODULE,
.gm_ops = &gss_kerberos_ops,
.gm_pf_num = 2,
.gm_pfs = {
{RPC_AUTH_GSS_KRB5, 0, RPC_GSS_SVC_NONE, "krb5"},
{RPC_AUTH_GSS_KRB5I, 0, RPC_GSS_SVC_INTEGRITY, "krb5i"},
},
};
static int __init init_kerberos_module(void)
{
struct gss_api_mech *gm;
int status;
if (gss_mech_register(&gss_mech_krb5_oid, &gss_kerberos_ops))
status = gss_mech_register(&gss_kerberos_mech);
if (status)
printk("Failed to register kerberos gss mechanism!\n");
gm = gss_mech_get_by_OID(&gss_mech_krb5_oid);
gss_register_triple(RPC_AUTH_GSS_KRB5 , gm, 0, RPC_GSS_SVC_NONE);
gss_register_triple(RPC_AUTH_GSS_KRB5I, gm, 0, RPC_GSS_SVC_INTEGRITY);
if (svcauth_gss_register_pseudoflavor(RPC_AUTH_GSS_KRB5, "krb5"))
printk("Failed to register %s with server!\n", "krb5");
if (svcauth_gss_register_pseudoflavor(RPC_AUTH_GSS_KRB5I, "krb5i"))
printk("Failed to register %s with server!\n", "krb5i");
gss_mech_put(gm);
return 0;
return status;
}
static void __exit cleanup_kerberos_module(void)
{
gss_unregister_triple(RPC_AUTH_GSS_KRB5I);
gss_unregister_triple(RPC_AUTH_GSS_KRB5);
gss_mech_unregister(&gss_kerberos_mech);
}
MODULE_LICENSE("GPL");
......
......@@ -91,7 +91,7 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
dprintk("RPC: gss_krb5_seal\n");
now = jiffies;
now = get_seconds();
if (qop_req != 0)
goto out_err;
......@@ -101,12 +101,12 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
checksum_type = CKSUMTYPE_RSA_MD5;
break;
default:
dprintk("RPC: gss_krb5_seal: ctx->signalg %d not"
dprintk("RPC: gss_krb5_seal: ctx->signalg %d not"
" supported\n", ctx->signalg);
goto out_err;
}
if (ctx->sealalg != SEAL_ALG_NONE && ctx->sealalg != SEAL_ALG_DES) {
dprintk("RPC: gss_krb5_seal: ctx->sealalg %d not supported\n",
dprintk("RPC: gss_krb5_seal: ctx->sealalg %d not supported\n",
ctx->sealalg);
goto out_err;
}
......@@ -122,7 +122,10 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
token->len = g_token_size(&ctx->mech_used, 22 + tmsglen);
ptr = token->data;
g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr, toktype);
g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr);
*ptr++ = (unsigned char) ((toktype>>8)&0xff);
*ptr++ = (unsigned char) (toktype&0xff);
/* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
krb5_hdr = ptr - 2;
......@@ -137,7 +140,7 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
/* XXX removing support for now */
goto out_err;
} else { /* Sign only. */
if (krb5_make_checksum(checksum_type, krb5_hdr, text,
if (make_checksum(checksum_type, krb5_hdr, 8, text,
&md5cksum))
goto out_err;
}
......@@ -151,7 +154,7 @@ krb5_make_token(struct krb5_ctx *ctx, int qop_req,
md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
KRB5_CKSUM_LENGTH);
dprintk("make_seal_token: cksum data: \n");
dprintk("RPC: make_seal_token: cksum data: \n");
print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
break;
default:
......
......@@ -70,7 +70,7 @@ krb5_get_seq_num(struct crypto_tfm *key,
s32 code;
unsigned char plain[8];
dprintk("krb5_get_seq_num: \n");
dprintk("RPC: krb5_get_seq_num:\n");
if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
return code;
......
......@@ -99,6 +99,10 @@ krb5_read_token(struct krb5_ctx *ctx,
if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, toktype,
read_token->len))
goto out;
if ((*ptr++ != ((toktype>>8)&0xff)) || (*ptr++ != (toktype&0xff)))
goto out;
/* XXX sanity-check bodysize?? */
if (toktype == KG_TOK_WRAP_MSG) {
......@@ -149,7 +153,7 @@ krb5_read_token(struct krb5_ctx *ctx,
switch (signalg) {
case SGN_ALG_DES_MAC_MD5:
ret = krb5_make_checksum(checksum_type, ptr - 2,
ret = make_checksum(checksum_type, ptr - 2, 8,
message_buffer, &md5cksum);
if (ret)
goto out;
......@@ -174,7 +178,7 @@ krb5_read_token(struct krb5_ctx *ctx,
if (qop_state)
*qop_state = GSS_C_QOP_DEFAULT;
now = jiffies;
now = get_seconds();
ret = GSS_S_CONTEXT_EXPIRED;
if (now > ctx->endtime)
......
......@@ -36,9 +36,11 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/module.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/gss_asn1.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/gss_err.h>
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/gss_api.h>
......@@ -51,146 +53,186 @@
static LIST_HEAD(registered_mechs);
static spinlock_t registered_mechs_lock = SPIN_LOCK_UNLOCKED;
/* Reference counting: The reference count includes the reference in the
* global registered_mechs list. That reference will never diseappear
* (so the reference count will never go below 1) until after the mech
* is removed from the list. Nothing can be removed from the list without
* first getting the registered_mechs_lock, so a gss_api_mech won't diseappear
* from underneath us while we hold the registered_mech_lock. */
int
gss_mech_register(struct xdr_netobj * mech_type, struct gss_api_ops * ops)
static void
gss_mech_free(struct gss_api_mech *gm)
{
struct gss_api_mech *gm;
struct pf_desc *pf;
int i;
if (!(gm = kmalloc(sizeof(*gm), GFP_KERNEL))) {
printk("Failed to allocate memory in gss_mech_register");
return -1;
for (i = 0; i < gm->gm_pf_num; i++) {
pf = &gm->gm_pfs[i];
if (pf->auth_domain_name)
kfree(pf->auth_domain_name);
pf->auth_domain_name = NULL;
}
gm->gm_oid.len = mech_type->len;
if (!(gm->gm_oid.data = kmalloc(mech_type->len, GFP_KERNEL))) {
kfree(gm);
printk("Failed to allocate memory in gss_mech_register");
return -1;
}
memcpy(gm->gm_oid.data, mech_type->data, mech_type->len);
/* We're counting the reference in the registered_mechs list: */
atomic_set(&gm->gm_count, 1);
gm->gm_ops = ops;
spin_lock(&registered_mechs_lock);
list_add(&gm->gm_list, &registered_mechs);
spin_unlock(&registered_mechs_lock);
dprintk("RPC: gss_mech_register: registered mechanism with oid:\n");
print_hexl((u32 *)mech_type->data, mech_type->len, 0);
return 0;
}
/* The following must be called with spinlock held: */
int
do_gss_mech_unregister(struct gss_api_mech *gm)
static inline char *
make_auth_domain_name(char *name)
{
static char *prefix = "gss/";
char *new;
list_del(&gm->gm_list);
new = kmalloc(strlen(name) + strlen(prefix) + 1, GFP_KERNEL);
if (new) {
strcpy(new, prefix);
strcat(new, name);
}
return new;
}
dprintk("RPC: unregistered mechanism with oid:\n");
print_hexl((u32 *)gm->gm_oid.data, gm->gm_oid.len, 0);
if (!gss_mech_put(gm)) {
dprintk("RPC: We just unregistered a gss_mechanism which"
" someone is still using.\n");
return -1;
} else {
return 0;
static int
gss_mech_svc_setup(struct gss_api_mech *gm)
{
struct pf_desc *pf;
int i, status;
for (i = 0; i < gm->gm_pf_num; i++) {
pf = &gm->gm_pfs[i];
pf->auth_domain_name = make_auth_domain_name(pf->name);
status = -ENOMEM;
if (pf->auth_domain_name == NULL)
goto out;
status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor,
pf->auth_domain_name);
if (status)
goto out;
}
return 0;
out:
gss_mech_free(gm);
return status;
}
int
gss_mech_unregister(struct gss_api_mech *gm)
gss_mech_register(struct gss_api_mech *gm)
{
int status;
status = gss_mech_svc_setup(gm);
if (status)
return status;
spin_lock(&registered_mechs_lock);
status = do_gss_mech_unregister(gm);
list_add(&gm->gm_list, &registered_mechs);
spin_unlock(&registered_mechs_lock);
return status;
dprintk("RPC: registered gss mechanism %s\n", gm->gm_name);
return 0;
}
int
gss_mech_unregister_all(void)
{
struct list_head *pos;
struct gss_api_mech *gm;
int status = 0;
EXPORT_SYMBOL(gss_mech_register);
void
gss_mech_unregister(struct gss_api_mech *gm)
{
spin_lock(&registered_mechs_lock);
while (!list_empty(&registered_mechs)) {
pos = registered_mechs.next;
gm = list_entry(pos, struct gss_api_mech, gm_list);
if (do_gss_mech_unregister(gm))
status = -1;
}
list_del(&gm->gm_list);
spin_unlock(&registered_mechs_lock);
return status;
dprintk("RPC: unregistered gss mechanism %s\n", gm->gm_name);
gss_mech_free(gm);
}
EXPORT_SYMBOL(gss_mech_unregister);
struct gss_api_mech *
gss_mech_get(struct gss_api_mech *gm)
{
atomic_inc(&gm->gm_count);
__module_get(gm->gm_owner);
return gm;
}
EXPORT_SYMBOL(gss_mech_get);
struct gss_api_mech *
gss_mech_get_by_OID(struct xdr_netobj *mech_type)
gss_mech_get_by_name(char *name)
{
struct gss_api_mech *pos, *gm = NULL;
struct gss_api_mech *pos, *gm = NULL;
dprintk("RPC: gss_mech_get_by_OID searching for mechanism with OID:\n");
print_hexl((u32 *)mech_type->data, mech_type->len, 0);
spin_lock(&registered_mechs_lock);
list_for_each_entry(pos, &registered_mechs, gm_list) {
if ((pos->gm_oid.len == mech_type->len)
&& !memcmp(pos->gm_oid.data, mech_type->data,
mech_type->len)) {
gm = gss_mech_get(pos);
if (0 == strcmp(name, pos->gm_name)) {
if (!try_module_get(pos->gm_owner))
continue;
gm = pos;
break;
}
}
spin_unlock(&registered_mechs_lock);
dprintk("RPC: gss_mech_get_by_OID %s it\n", gm ? "found" : "didn't find");
return gm;
}
EXPORT_SYMBOL(gss_mech_get_by_name);
static inline int
mech_supports_pseudoflavor(struct gss_api_mech *gm, u32 pseudoflavor)
{
int i;
for (i = 0; i < gm->gm_pf_num; i++) {
if (gm->gm_pfs[i].pseudoflavor == pseudoflavor)
return 1;
}
return 0;
}
struct gss_api_mech *
gss_mech_get_by_name(char *name)
gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
{
struct gss_api_mech *pos, *gm = NULL;
struct gss_api_mech *pos, *gm = NULL;
spin_lock(&registered_mechs_lock);
list_for_each_entry(pos, &registered_mechs, gm_list) {
if (0 == strcmp(name, pos->gm_ops->name)) {
gm = gss_mech_get(pos);
break;
if (!try_module_get(pos->gm_owner))
continue;
if (!mech_supports_pseudoflavor(pos, pseudoflavor)) {
module_put(pos->gm_owner);
continue;
}
gm = pos;
break;
}
spin_unlock(&registered_mechs_lock);
return gm;
}
EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor);
u32
gss_pseudoflavor_to_service(struct gss_api_mech *gm, u32 pseudoflavor)
{
int i;
for (i = 0; i < gm->gm_pf_num; i++) {
if (gm->gm_pfs[i].pseudoflavor == pseudoflavor)
return gm->gm_pfs[i].service;
}
return 0;
}
int
gss_mech_put(struct gss_api_mech * gm)
EXPORT_SYMBOL(gss_pseudoflavor_to_service);
char *
gss_service_to_auth_domain_name(struct gss_api_mech *gm, u32 service)
{
if (atomic_dec_and_test(&gm->gm_count)) {
if (gm->gm_oid.len >0)
kfree(gm->gm_oid.data);
kfree(gm);
return 1;
} else {
return 0;
int i;
for (i = 0; i < gm->gm_pf_num; i++) {
if (gm->gm_pfs[i].service == service)
return gm->gm_pfs[i].auth_domain_name;
}
return NULL;
}
EXPORT_SYMBOL(gss_service_to_auth_domain_name);
void
gss_mech_put(struct gss_api_mech * gm)
{
module_put(gm->gm_owner);
}
EXPORT_SYMBOL(gss_mech_put);
/* The mech could probably be determined from the token instead, but it's just
* as easy for now to pass it in. */
u32
......@@ -244,7 +286,8 @@ gss_verify_mic(struct gss_ctx *context_handle,
u32
gss_delete_sec_context(struct gss_ctx **context_handle)
{
dprintk("gss_delete_sec_context deleting %p\n",*context_handle);
dprintk("RPC: gss_delete_sec_context deleting %p\n",
*context_handle);
if (!*context_handle)
return(GSS_S_NO_CONTEXT);
......
......@@ -82,12 +82,13 @@ gss_register_triple(u32 pseudoflavor, struct gss_api_mech *mech,
spin_lock(&registered_triples_lock);
if (do_lookup_triple_by_pseudoflavor(pseudoflavor)) {
printk("Registered pseudoflavor %d again\n", pseudoflavor);
printk(KERN_WARNING "RPC: Registered pseudoflavor %d again\n",
pseudoflavor);
goto err_unlock;
}
list_add(&triple->triples, &registered_triples);
spin_unlock(&registered_triples_lock);
dprintk("RPC: registered pseudoflavor %d\n", pseudoflavor);
dprintk("RPC: registered pseudoflavor %d\n", pseudoflavor);
return 0;
......@@ -145,7 +146,7 @@ gss_cmp_triples(u32 oid_len, char *oid_data, u32 qop, u32 service)
oid.len = oid_len;
oid.data = oid_data;
dprintk("RPC: gss_cmp_triples \n");
dprintk("RPC: gss_cmp_triples\n");
print_sec_triple(&oid,qop,service);
spin_lock(&registered_triples_lock);
......@@ -158,7 +159,7 @@ gss_cmp_triples(u32 oid_len, char *oid_data, u32 qop, u32 service)
}
}
spin_unlock(&registered_triples_lock);
dprintk("RPC: gss_cmp_triples return %d\n", pseudoflavor);
dprintk("RPC: gss_cmp_triples return %d\n", pseudoflavor);
return pseudoflavor;
}
......@@ -193,8 +194,8 @@ gss_pseudoflavor_to_service(u32 pseudoflavor)
triple = do_lookup_triple_by_pseudoflavor(pseudoflavor);
spin_unlock(&registered_triples_lock);
if (!triple) {
dprintk("RPC: gss_pseudoflavor_to_service called with"
" unsupported pseudoflavor %d\n", pseudoflavor);
dprintk("RPC: gss_pseudoflavor_to_service called with unsupported pseudoflavor %d\n",
pseudoflavor);
return 0;
}
return triple->service;
......@@ -211,8 +212,8 @@ gss_pseudoflavor_to_mech(u32 pseudoflavor) {
if (triple)
mech = gss_mech_get(triple->mech);
else
dprintk("RPC: gss_pseudoflavor_to_mech called with"
" unsupported pseudoflavor %d\n", pseudoflavor);
dprintk("RPC: gss_pseudoflavor_to_mech called with unsupported pseudoflavor %d\n",
pseudoflavor);
return mech;
}
......@@ -223,8 +224,8 @@ gss_pseudoflavor_to_mechOID(u32 pseudoflavor, struct xdr_netobj * oid)
mech = gss_pseudoflavor_to_mech(pseudoflavor);
if (!mech) {
dprintk("RPC: gss_pseudoflavor_to_mechOID called with"
" unsupported pseudoflavor %d\n", pseudoflavor);
dprintk("RPC: gss_pseudoflavor_to_mechOID called with unsupported pseudoflavor %d\n",
pseudoflavor);
return -1;
}
oid->len = mech->gm_oid.len;
......
......@@ -10,26 +10,28 @@
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/svcauth_gss.h>
#include <linux/sunrpc/gss_asn1.h>
#include <linux/sunrpc/gss_krb5.h>
/* sec_triples: */
EXPORT_SYMBOL(gss_register_triple);
EXPORT_SYMBOL(gss_unregister_triple);
EXPORT_SYMBOL(gss_cmp_triples);
EXPORT_SYMBOL(gss_pseudoflavor_to_mechOID);
EXPORT_SYMBOL(gss_pseudoflavor_supported);
EXPORT_SYMBOL(gss_pseudoflavor_to_service);
/* svcauth_gss.c: */
EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor);
/* registering gss mechanisms to the mech switching code: */
EXPORT_SYMBOL(gss_mech_register);
EXPORT_SYMBOL(gss_mech_unregister);
EXPORT_SYMBOL(gss_mech_get);
EXPORT_SYMBOL(gss_mech_get_by_OID);
EXPORT_SYMBOL(gss_mech_get_by_pseudoflavor);
EXPORT_SYMBOL(gss_mech_get_by_name);
EXPORT_SYMBOL(gss_mech_put);
EXPORT_SYMBOL(gss_pseudoflavor_to_service);
EXPORT_SYMBOL(gss_service_to_auth_domain_name);
/* generic functionality in gss code: */
EXPORT_SYMBOL(g_make_token_header);
EXPORT_SYMBOL(g_verify_token_header);
EXPORT_SYMBOL(g_token_size);
EXPORT_SYMBOL(make_checksum);
EXPORT_SYMBOL(krb5_encrypt);
EXPORT_SYMBOL(krb5_decrypt);
/* debug */
EXPORT_SYMBOL(print_hexl);
......@@ -570,14 +570,14 @@ gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
}
if (gc->gc_seq > MAXSEQ) {
dprintk("svcauth_gss: discarding request with large"
" sequence number %d\n", gc->gc_seq);
dprintk("RPC: svcauth_gss: discarding request with large sequence number %d\n",
gc->gc_seq);
*authp = rpcsec_gsserr_ctxproblem;
return SVC_DENIED;
}
if (!gss_check_seq_num(rsci, gc->gc_seq)) {
dprintk("svcauth_gss: discarding request with old"
" sequence number %d\n", gc->gc_seq);
dprintk("RPC: svcauth_gss: discarding request with old sequence number %d\n",
gc->gc_seq);
return SVC_DROP;
}
return SVC_OK;
......@@ -617,19 +617,15 @@ struct gss_domain {
u32 pseudoflavor;
};
/* XXX this should be done in gss_pseudoflavors, and shouldn't be hardcoded: */
static struct auth_domain *
find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
{
switch(gss_get_pseudoflavor(ctx, 0, svc)) {
case RPC_AUTH_GSS_KRB5:
return auth_domain_find("gss/krb5");
case RPC_AUTH_GSS_KRB5I:
return auth_domain_find("gss/krb5i");
case RPC_AUTH_GSS_KRB5P:
return auth_domain_find("gss/krb5p");
}
return NULL;
char *name;
name = gss_service_to_auth_domain_name(ctx->mech_type, svc);
if (!name)
return NULL;
return auth_domain_find(name);
}
int
......@@ -637,19 +633,17 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
{
struct gss_domain *new;
struct auth_domain *test;
static char *prefix = "gss/";
int stat = -1;
int stat = -ENOMEM;
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (!new)
goto out;
cache_init(&new->h.h);
atomic_inc(&new->h.h.refcnt);
new->h.name = kmalloc(strlen(name) + strlen(prefix) + 1, GFP_KERNEL);
new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL);
if (!new->h.name)
goto out_free_dom;
strcpy(new->h.name, prefix);
strcat(new->h.name, name);
strcpy(new->h.name, name);
new->h.flavour = RPC_AUTH_GSS;
new->pseudoflavor = pseudoflavor;
new->h.h.expiry_time = NEVER;
......@@ -670,6 +664,8 @@ svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
return stat;
}
EXPORT_SYMBOL(svcauth_gss_register_pseudoflavor);
static inline int
read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
{
......@@ -755,7 +751,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
u32 *reject_stat = resv->iov_base + resv->iov_len;
int ret;
dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",argv->iov_len);
dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",argv->iov_len);
*authp = rpc_autherr_badcred;
if (!svcdata)
......
......@@ -788,13 +788,11 @@ static void
call_timeout(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_timeout *to = &task->tk_rqstp->rq_timeout;
if (xprt_adjust_timeout(to)) {
if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid);
goto retry;
}
to->to_retries = clnt->cl_timeout.to_retries;
dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
if (RPC_IS_SOFT(task)) {
......
......@@ -39,6 +39,7 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
for (i = 0; i < 5; i++) {
rt->srtt[i] = init;
rt->sdrtt[i] = RPC_RTO_INIT;
rt->ntimeouts[i] = 0;
}
}
......
......@@ -351,36 +351,58 @@ xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
xprt->cwnd = cwnd;
}
/*
* Reset the major timeout value
*/
static void xprt_reset_majortimeo(struct rpc_rqst *req)
{
struct rpc_timeout *to = &req->rq_xprt->timeout;
req->rq_majortimeo = req->rq_timeout;
if (to->to_exponential)
req->rq_majortimeo <<= to->to_retries;
else
req->rq_majortimeo += to->to_increment * to->to_retries;
if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
req->rq_majortimeo = to->to_maxval;
req->rq_majortimeo += jiffies;
}
/*
* Adjust timeout values etc for next retransmit
*/
int
xprt_adjust_timeout(struct rpc_timeout *to)
int xprt_adjust_timeout(struct rpc_rqst *req)
{
if (to->to_retries > 0) {
struct rpc_xprt *xprt = req->rq_xprt;
struct rpc_timeout *to = &xprt->timeout;
int status = 0;
if (time_before(jiffies, req->rq_majortimeo)) {
if (to->to_exponential)
to->to_current <<= 1;
req->rq_timeout <<= 1;
else
to->to_current += to->to_increment;
if (to->to_maxval && to->to_current >= to->to_maxval)
to->to_current = to->to_maxval;
req->rq_timeout += to->to_increment;
if (to->to_maxval && req->rq_timeout >= to->to_maxval)
req->rq_timeout = to->to_maxval;
req->rq_retries++;
pprintk("RPC: %lu retrans\n", jiffies);
} else {
if (to->to_exponential)
to->to_initval <<= 1;
else
to->to_initval += to->to_increment;
if (to->to_maxval && to->to_initval >= to->to_maxval)
to->to_initval = to->to_maxval;
to->to_current = to->to_initval;
req->rq_timeout = to->to_initval;
req->rq_retries = 0;
xprt_reset_majortimeo(req);
/* Reset the RTT counters == "slow start" */
spin_lock_bh(&xprt->sock_lock);
rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
spin_unlock_bh(&xprt->sock_lock);
pprintk("RPC: %lu timeout\n", jiffies);
status = -ETIMEDOUT;
}
if (!to->to_current) {
printk(KERN_WARNING "xprt_adjust_timeout: to_current = 0!\n");
to->to_current = 5 * HZ;
if (req->rq_timeout == 0) {
printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
req->rq_timeout = 5 * HZ;
}
pprintk("RPC: %lu %s\n", jiffies,
to->to_retries? "retrans" : "timeout");
return to->to_retries-- > 0;
return status;
}
/*
......@@ -537,8 +559,17 @@ void xprt_connect(struct rpc_task *task)
task->tk_timeout = RPC_CONNECT_TIMEOUT;
rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate))
schedule_work(&xprt->sock_connect);
if (!test_and_set_bit(XPRT_CONNECTING, &xprt->sockstate)) {
/* Note: if we are here due to a dropped connection
* we delay reconnecting by RPC_REESTABLISH_TIMEOUT/HZ
* seconds
*/
if (xprt->sock != NULL)
schedule_delayed_work(&xprt->sock_connect,
RPC_REESTABLISH_TIMEOUT);
else
schedule_work(&xprt->sock_connect);
}
return;
out_write:
xprt_release_write(xprt, task);
......@@ -566,7 +597,6 @@ xprt_connect_status(struct rpc_task *task)
case -ECONNREFUSED:
case -ECONNRESET:
case -ENOTCONN:
rpc_delay(task, RPC_REESTABLISH_TIMEOUT);
return;
case -ETIMEDOUT:
dprintk("RPC: %4d xprt_connect_status: timed out\n",
......@@ -1166,6 +1196,7 @@ xprt_transmit(struct rpc_task *task)
/* Add request to the receive list */
list_add_tail(&req->rq_list, &xprt->recv);
spin_unlock_bh(&xprt->sock_lock);
xprt_reset_majortimeo(req);
}
} else if (!req->rq_bytes_sent)
return;
......@@ -1221,7 +1252,7 @@ xprt_transmit(struct rpc_task *task)
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) {
task->tk_timeout = req->rq_timeout.to_current;
task->tk_timeout = req->rq_timeout;
rpc_sleep_on(&xprt->pending, task, NULL, NULL);
}
spin_unlock_bh(&xprt->sock_lock);
......@@ -1248,13 +1279,11 @@ xprt_transmit(struct rpc_task *task)
if (!xprt->nocong) {
int timer = task->tk_msg.rpc_proc->p_timer;
task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer);
task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer);
task->tk_timeout <<= clnt->cl_timeout.to_retries
- req->rq_timeout.to_retries;
if (task->tk_timeout > req->rq_timeout.to_maxval)
task->tk_timeout = req->rq_timeout.to_maxval;
task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer) + req->rq_retries;
if (task->tk_timeout > xprt->timeout.to_maxval || task->tk_timeout == 0)
task->tk_timeout = xprt->timeout.to_maxval;
} else
task->tk_timeout = req->rq_timeout.to_current;
task->tk_timeout = req->rq_timeout;
/* Don't race with disconnect */
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
......@@ -1324,7 +1353,7 @@ xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
{
struct rpc_rqst *req = task->tk_rqstp;
req->rq_timeout = xprt->timeout;
req->rq_timeout = xprt->timeout.to_initval;
req->rq_task = task;
req->rq_xprt = xprt;
req->rq_xid = xprt_alloc_xid(xprt);
......@@ -1381,7 +1410,6 @@ xprt_default_timeout(struct rpc_timeout *to, int proto)
void
xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
{
to->to_current =
to->to_initval =
to->to_increment = incr;
to->to_maxval = incr * retr;
......@@ -1446,7 +1474,6 @@ xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
/* Set timeout parameters */
if (to) {
xprt->timeout = *to;
xprt->timeout.to_current = to->to_initval;
} else
xprt_default_timeout(&xprt->timeout, xprt->prot);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment