Commit 390bb9ba authored by Steve French's avatar Steve French

Merge bk://linux.bkbits.net/linux-2.5

into hostme.bitkeeper.com:/repos/c/cifs/linux-2.5cifs
parents 20e743bb 53cb4fea
Version 1.17
------------
Update number of blocks in file so du command is happier (in Linux a fake
blocksize of 512 is required for calculating number of blocks in inode).
Fix prepare write of partial pages to read in data from server if possible.
Version 1.16
------------
Fix incorrect file size in file handle based setattr on big endian hardware.
......
......@@ -93,5 +93,5 @@ extern int cifs_setxattr(struct dentry *, const char *, const void *,
size_t, int);
extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
#define CIFS_VERSION "1.16"
#define CIFS_VERSION "1.17"
#endif /* _CIFSFS_H */
......@@ -159,6 +159,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
struct cifsFileInfo * pCifsFile = NULL;
struct cifsInodeInfo * pCifsInode;
int disposition = FILE_OVERWRITE_IF;
int write_only = FALSE;
xid = GetXid();
......@@ -176,9 +177,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
if(nd) {
if ((nd->intent.open.flags & O_ACCMODE) == O_RDONLY)
desiredAccess = GENERIC_READ;
else if ((nd->intent.open.flags & O_ACCMODE) == O_WRONLY)
else if ((nd->intent.open.flags & O_ACCMODE) == O_WRONLY) {
desiredAccess = GENERIC_WRITE;
else if ((nd->intent.open.flags & O_ACCMODE) == O_RDWR) {
write_only = TRUE;
} else if ((nd->intent.open.flags & O_ACCMODE) == O_RDWR) {
/* GENERIC_ALL is too much permission to request */
/* can cause unnecessary access denied on create */
/* desiredAccess = GENERIC_ALL; */
......@@ -262,16 +264,25 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
pCifsFile->invalidHandle = FALSE;
pCifsFile->closePend = FALSE;
init_MUTEX(&pCifsFile->fh_sem);
/* pCifsFile->pfile = file; */ /* put in at open time */
/* put the following in at open now */
/* pCifsFile->pfile = file; */
write_lock(&GlobalSMBSeslock);
list_add(&pCifsFile->tlist,&pTcon->openFileList);
pCifsInode = CIFS_I(newinode);
if(pCifsInode) {
list_add(&pCifsFile->flist,&pCifsInode->openFileList);
/* if readable file instance put first in list*/
if (write_only == TRUE) {
list_add_tail(&pCifsFile->flist,
&pCifsInode->openFileList);
} else {
list_add(&pCifsFile->flist,
&pCifsInode->openFileList);
}
if((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
pCifsInode->clientCanCacheAll = TRUE;
pCifsInode->clientCanCacheRead = TRUE;
cFYI(1,("Exclusive Oplock granted on inode %p",newinode));
cFYI(1,("Exclusive Oplock granted on inode %p",
newinode));
} else if((oplock & 0xF) == OPLOCK_READ)
pCifsInode->clientCanCacheRead = TRUE;
}
......
......@@ -173,7 +173,14 @@ cifs_open(struct inode *inode, struct file *file)
list_add(&pCifsFile->tlist,&pTcon->openFileList);
pCifsInode = CIFS_I(file->f_dentry->d_inode);
if(pCifsInode) {
list_add(&pCifsFile->flist,&pCifsInode->openFileList);
/* want handles we can use to read with first */
/* in the list so we do not have to walk the */
/* list to search for one in prepare_write */
if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
list_add_tail(&pCifsFile->flist,&pCifsInode->openFileList);
} else {
list_add(&pCifsFile->flist,&pCifsInode->openFileList);
}
write_unlock(&GlobalSMBSeslock);
write_unlock(&file->f_owner.lock);
if(pCifsInode->clientCanCacheRead) {
......@@ -924,6 +931,11 @@ cifs_read(struct file * file, char *read_data, size_t read_size,
}
open_file = (struct cifsFileInfo *)file->private_data;
if((file->f_flags & O_ACCMODE) == O_WRONLY) {
cFYI(1,("attempting read on write only file instance"));
}
for (total_read = 0,current_offset=read_data; read_size > total_read;
total_read += bytes_read,current_offset+=bytes_read) {
current_read_size = min_t(const int,read_size - total_read,cifs_sb->rsize);
......@@ -1169,11 +1181,42 @@ cifs_readpages(struct file *file, struct address_space *mapping,
return rc;
}
static int cifs_readpage_worker(struct file *file, struct page *page, loff_t * poffset)
{
char * read_data;
int rc;
page_cache_get(page);
read_data = kmap(page);
/* for reads over a certain size could initiate async read ahead */
rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
if (rc < 0)
goto io_error;
else {
cFYI(1,("Bytes read %d ",rc));
}
file->f_dentry->d_inode->i_atime = CURRENT_TIME;
if(PAGE_CACHE_SIZE > rc) {
memset(read_data+rc, 0, PAGE_CACHE_SIZE - rc);
}
flush_dcache_page(page);
SetPageUptodate(page);
rc = 0;
io_error:
kunmap(page);
page_cache_release(page);
return rc;
}
static int
cifs_readpage(struct file *file, struct page *page)
{
loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
char * read_data;
int rc = -EACCES;
int xid;
......@@ -1184,34 +1227,12 @@ cifs_readpage(struct file *file, struct page *page)
return -EBADF;
}
cFYI(0,("readpage %p at offset %d 0x%x\n",page,(int)offset,(int)offset));
cFYI(1,("readpage %p at offset %d 0x%x\n",page,(int)offset,(int)offset));
page_cache_get(page);
read_data = kmap(page);
/* for reads over a certain size could initiate async read ahead */
rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, &offset);
if (rc < 0)
goto io_error;
else {
cFYI(1,("Bytes read %d ",rc));
}
rc = cifs_readpage_worker(file,page,&offset);
file->f_dentry->d_inode->i_atime = CURRENT_TIME;
if(PAGE_CACHE_SIZE > rc) {
memset(read_data+rc, 0, PAGE_CACHE_SIZE - rc);
}
flush_dcache_page(page);
SetPageUptodate(page);
rc = 0;
io_error:
kunmap(page);
unlock_page(page);
page_cache_release(page);
FreeXid(xid);
return rc;
}
......@@ -1276,8 +1297,11 @@ fill_in_inode(struct inode *tmp_inode,
}
i_size_write(tmp_inode,pfindData->EndOfFile);
tmp_inode->i_blocks =
(tmp_inode->i_blksize - 1 + pfindData->AllocationSize) >> tmp_inode->i_blkbits;
/* 512 bytes (2**9) is the fake blocksize that must be used */
/* for this calculation, even though the reported blocksize is larger */
tmp_inode->i_blocks = (512 - 1 + pfindData->AllocationSize) >> 9;
if (pfindData->AllocationSize < pfindData->EndOfFile)
cFYI(1, ("Possible sparse file: allocation size less than end of file "));
cFYI(1,
......@@ -1350,8 +1374,10 @@ unix_fill_in_inode(struct inode *tmp_inode,
pfindData->NumOfBytes = le64_to_cpu(pfindData->NumOfBytes);
pfindData->EndOfFile = le64_to_cpu(pfindData->EndOfFile);
i_size_write(tmp_inode,pfindData->EndOfFile);
tmp_inode->i_blocks =
(tmp_inode->i_blksize - 1 + pfindData->NumOfBytes) >> tmp_inode->i_blkbits;
/* 512 bytes (2**9) is the fake blocksize that must be used */
/* for this calculation, not the real blocksize */
tmp_inode->i_blocks = (512 - 1 + pfindData->NumOfBytes) >> 9;
if (S_ISREG(tmp_inode->i_mode)) {
cFYI(1, ("File inode"));
......@@ -1393,12 +1419,15 @@ construct_dentry(struct qstr *qstring, struct file *file,
/* BB overwrite the old name? i.e. tmp_dentry->d_name and tmp_dentry->d_name.len ?? */
if(*ptmp_inode == NULL) {
*ptmp_inode = new_inode(file->f_dentry->d_sb);
if(*ptmp_inode == NULL)
return;
d_instantiate(tmp_dentry, *ptmp_inode);
}
} else {
tmp_dentry = d_alloc(file->f_dentry, qstring);
if(tmp_dentry == NULL) {
cERROR(1,("Failed allocating dentry"));
*ptmp_inode = NULL;
return;
}
......@@ -1406,6 +1435,8 @@ construct_dentry(struct qstr *qstring, struct file *file,
tmp_dentry->d_op = &cifs_dentry_ops;
cFYI(0, (" instantiate dentry 0x%p with inode 0x%p ",
tmp_dentry, *ptmp_inode));
if(*ptmp_inode == NULL)
return;
d_instantiate(tmp_dentry, *ptmp_inode);
d_rehash(tmp_dentry);
}
......@@ -1462,7 +1493,9 @@ cifs_filldir(struct qstr *pqstring, FILE_DIRECTORY_INFO * pfindData,
pqstring->len = pfindData->FileNameLength;
construct_dentry(pqstring, file, &tmp_inode, &tmp_dentry);
if((tmp_inode == NULL) || (tmp_dentry == NULL)) {
return -ENOMEM;
}
fill_in_inode(tmp_inode, pfindData, &object_type);
rc = filldir(direntry, pfindData->FileName, pqstring->len, file->f_pos,
tmp_inode->i_ino, object_type);
......@@ -1488,6 +1521,9 @@ cifs_filldir_unix(struct qstr *pqstring,
pqstring->len = strnlen(pUnixFindData->FileName, MAX_PATHCONF);
construct_dentry(pqstring, file, &tmp_inode, &tmp_dentry);
if((tmp_inode == NULL) || (tmp_dentry == NULL)) {
return -ENOMEM;
}
unix_fill_in_inode(tmp_inode, pUnixFindData, &object_type);
rc = filldir(direntry, pUnixFindData->FileName, pqstring->len,
......@@ -1950,17 +1986,30 @@ cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
int cifs_prepare_write(struct file *file, struct page *page,
unsigned from, unsigned to)
{
int rc = 0;
loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
cFYI(1,("prepare write for page %p from %d to %d",page,from,to));
if (!PageUptodate(page)) {
if (to - from != PAGE_CACHE_SIZE) {
/* if (to - from != PAGE_CACHE_SIZE) {
void *kaddr = kmap_atomic(page, KM_USER0);
memset(kaddr, 0, from);
memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
flush_dcache_page(page);
kunmap_atomic(kaddr, KM_USER0);
}
SetPageUptodate(page);
} */
/* If we are writing a full page it will be up to date,
no need to read from the server */
if((to==PAGE_CACHE_SIZE) && (from == 0))
SetPageUptodate(page);
/* might as well read a page, it is fast enough */
rc = cifs_readpage_worker(file,page,&offset);
/* if this returns an error should we try using another
file handle if there is one - how would we lock it
to prevent close of that handle racing with this read? */
}
/* BB should we pass any errors back? e.g. if we do not have read access to the file */
return 0;
}
......@@ -1969,8 +2018,7 @@ struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
.readpages = cifs_readpages,
.writepage = cifs_writepage,
.prepare_write = simple_prepare_write, /* BB fixme BB */
/* .prepare_write = cifs_prepare_write, */ /* BB removeme BB */
.prepare_write = cifs_prepare_write,
.commit_write = cifs_commit_write,
/* .sync_page = cifs_sync_page, */
/*.direct_IO = */
......
......@@ -130,8 +130,18 @@ cifs_get_inode_info_unix(struct inode **pinode,
and blkbits set in superblock so 2**blkbits and blksize will match */
/* inode->i_blksize =
(pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
inode->i_blocks =
(inode->i_blksize - 1 + findData.NumOfBytes) >> inode->i_blkbits;
/* This seems incredibly stupid but it turns out that
i_blocks is not related to (i_size / i_blksize), instead a
size of 512 is required to be used for calculating num blocks */
/* inode->i_blocks =
(inode->i_blksize - 1 + findData.NumOfBytes) >> inode->i_blkbits;*/
/* 512 bytes (2**9) is the fake blocksize that must be used */
/* for this calculation */
inode->i_blocks = (512 - 1 + findData.NumOfBytes) >> 9;
if (findData.NumOfBytes < findData.EndOfFile)
cFYI(1, ("Server inconsistency Error: it says allocation size less than end of file "));
......@@ -275,8 +285,10 @@ cifs_get_inode_info(struct inode **pinode, const unsigned char *search_path,
}
i_size_write(inode,le64_to_cpu(pfindData->EndOfFile));
pfindData->AllocationSize = le64_to_cpu(pfindData->AllocationSize);
inode->i_blocks =
(inode->i_blksize - 1 + pfindData->AllocationSize) >> inode->i_blkbits;
/* 512 bytes (2**9) is the fake blocksize that must be used */
/* for this calculation */
inode->i_blocks = (512 - 1 + pfindData->AllocationSize) >> 9;
inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment