Commit eafe5916 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] i_size atomic access

From: Daniel McNeil <daniel@osdl.org>

This adds i_seqcount to the inode structure and then uses i_size_read() and
i_size_write() to provide atomic access to i_size.  This is a port of
Andrea Arcangeli's i_size atomic access patch from 2.4.  This only uses the
generic reader/writer consistent mechanism.

Before:
mnm:/usr/src/25> size vmlinux
   text    data     bss     dec     hex filename
2229582 1027683  162436 3419701  342e35 vmlinux

After:
mnm:/usr/src/25> size vmlinux
   text    data     bss     dec     hex filename
2225642 1027655  162436 3415733  341eb5 vmlinux

3.9k more text, a lot of it fastpath :(

It's a very minor bug, and the fix has a fairly non-minor cost.  The most
compelling reason for fixing this is that writepage() checks i_size.  If it
sees a transient value it may decide that page is outside i_size and will
refuse to write it.  Lost user data.
parent e9b94f6a
......@@ -140,7 +140,8 @@ figure_loop_size(struct loop_device *lo)
sector_t x;
/* Compute loopsize in bytes */
size = lo->lo_backing_file->f_dentry->d_inode->i_mapping->host->i_size;
size = i_size_read(lo->lo_backing_file->f_dentry->
d_inode->i_mapping->host);
offset = lo->lo_offset;
loopsize = size - offset;
if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
......
......@@ -65,7 +65,7 @@ int inode_setattr(struct inode * inode, struct iattr * attr)
int error = 0;
if (ia_valid & ATTR_SIZE) {
if (attr->ia_size != inode->i_size) {
if (attr->ia_size != i_size_read(inode)) {
error = vmtruncate(inode, attr->ia_size);
if (error || (ia_valid == ATTR_SIZE))
goto out;
......
......@@ -269,7 +269,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
N_TRSIZE(ex) || N_DRSIZE(ex) ||
bprm->file->f_dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
i_size_read(bprm->file->f_dentry->d_inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
return -ENOEXEC;
}
......@@ -454,7 +454,7 @@ static int load_aout_library(struct file *file)
/* We come in here for the regular a.out style of shared libraries */
if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
i_size_read(inode) < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
goto out;
}
......
......@@ -29,7 +29,7 @@
static sector_t max_block(struct block_device *bdev)
{
sector_t retval = ~((sector_t)0);
loff_t sz = bdev->bd_inode->i_size;
loff_t sz = i_size_read(bdev->bd_inode);
if (sz) {
unsigned int size = block_size(bdev);
......@@ -161,7 +161,7 @@ static loff_t block_llseek(struct file *file, loff_t offset, int origin)
bd_inode = file->f_dentry->d_inode->i_bdev->bd_inode;
down(&bd_inode->i_sem);
size = bd_inode->i_size;
size = i_size_read(bd_inode);
switch (origin) {
case 2:
......@@ -487,7 +487,7 @@ int check_disk_change(struct block_device *bdev)
static void bd_set_size(struct block_device *bdev, loff_t size)
{
unsigned bsize = bdev_hardsect_size(bdev);
bdev->bd_inode->i_size = size;
i_size_write(bdev->bd_inode, size);
while (bsize < PAGE_CACHE_SIZE) {
if (size & bsize)
break;
......
......@@ -1721,7 +1721,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
BUG_ON(!PageLocked(page));
last_block = (inode->i_size - 1) >> inode->i_blkbits;
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
if (!page_has_buffers(page)) {
if (!PageUptodate(page))
......@@ -2057,7 +2057,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
head = page_buffers(page);
iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
lblock = (inode->i_size+blocksize-1) >> inode->i_blkbits;
lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
bh = head;
nr = 0;
i = 0;
......@@ -2282,8 +2282,12 @@ int generic_commit_write(struct file *file, struct page *page,
struct inode *inode = page->mapping->host;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
__block_commit_write(inode,page,from,to);
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold i_sem.
*/
if (pos > inode->i_size) {
inode->i_size = pos;
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
return 0;
......@@ -2435,7 +2439,7 @@ int nobh_commit_write(struct file *file, struct page *page,
set_page_dirty(page);
if (pos > inode->i_size) {
inode->i_size = pos;
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
return 0;
......@@ -2565,7 +2569,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
struct writeback_control *wbc)
{
struct inode * const inode = page->mapping->host;
const unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT;
loff_t i_size = i_size_read(inode);
const unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
unsigned offset;
void *kaddr;
......@@ -2574,7 +2579,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
return __block_write_full_page(inode, page, get_block, wbc);
/* Is the page fully outside i_size? (truncate in progress) */
offset = inode->i_size & (PAGE_CACHE_SIZE-1);
offset = i_size & (PAGE_CACHE_SIZE-1);
if (page->index >= end_index+1 || !offset) {
/*
* The page may have dirty, unmapped buffers. For example,
......
......@@ -757,7 +757,7 @@ static int do_direct_IO(struct dio *dio)
char *kaddr;
if (dio->block_in_file >=
dio->inode->i_size>>blkbits) {
i_size_read(dio->inode)>>blkbits) {
/* We hit eof */
page_cache_release(page);
goto out;
......@@ -943,13 +943,15 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
if (ret == 0)
ret = dio->page_errors;
if (ret == 0 && dio->result) {
loff_t i_size = i_size_read(inode);
ret = dio->result;
/*
* Adjust the return value if the read crossed a
* non-block-aligned EOF.
*/
if (rw == READ && (offset + ret > inode->i_size))
ret = inode->i_size - offset;
if (rw == READ && (offset + ret > i_size))
ret = i_size - offset;
}
kfree(dio);
}
......
......@@ -1200,7 +1200,7 @@ static int ext3_journalled_commit_write(struct file *file,
if (!partial)
SetPageUptodate(page);
if (pos > inode->i_size)
inode->i_size = pos;
i_size_write(inode, pos);
EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
if (inode->i_size > EXT3_I(inode)->i_disksize) {
EXT3_I(inode)->i_disksize = inode->i_size;
......@@ -1574,7 +1574,7 @@ static int ext3_direct_IO(int rw, struct kiocb *iocb,
loff_t end = offset + ret;
if (end > inode->i_size) {
ei->i_disksize = end;
inode->i_size = end;
i_size_write(inode, end);
err = ext3_mark_inode_dirty(handle, inode);
if (!ret)
ret = err;
......
......@@ -189,6 +189,7 @@ void inode_init_once(struct inode *inode)
INIT_LIST_HEAD(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_shared);
spin_lock_init(&inode->i_lock);
i_size_ordered_init(inode);
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
......
......@@ -40,7 +40,7 @@ static int file_ioctl(struct file *filp,unsigned int cmd,unsigned long arg)
return -EBADF;
return put_user(inode->i_sb->s_blocksize, (int *) arg);
case FIONREAD:
return put_user(inode->i_size - filp->f_pos, (int *) arg);
return put_user(i_size_read(inode) - filp->f_pos, (int *) arg);
}
if (filp->f_op && filp->f_op->ioctl)
return filp->f_op->ioctl(inode, filp, cmd, arg);
......
......@@ -328,8 +328,12 @@ int simple_commit_write(struct file *file, struct page *page,
struct inode *inode = page->mapping->host;
loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
/*
* No need to use i_size_read() here, the i_size
* cannot change under us because we hold the i_sem.
*/
if (pos > inode->i_size)
inode->i_size = pos;
i_size_write(inode, pos);
set_page_dirty(page);
return 0;
}
......
......@@ -285,7 +285,7 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl,
start = filp->f_pos;
break;
case 2: /*SEEK_END*/
start = filp->f_dentry->d_inode->i_size;
start = i_size_read(filp->f_dentry->d_inode);
break;
default:
return -EINVAL;
......@@ -335,7 +335,7 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl,
start = filp->f_pos;
break;
case 2: /*SEEK_END*/
start = filp->f_dentry->d_inode->i_size;
start = i_size_read(filp->f_dentry->d_inode);
break;
default:
return -EINVAL;
......
......@@ -227,7 +227,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
goto confused;
block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits);
last_block = (inode->i_size + blocksize - 1) >> blkbits;
last_block = (i_size_read(inode) + blocksize - 1) >> blkbits;
bh.b_page = page;
for (page_block = 0; page_block < blocks_per_page;
......@@ -459,7 +459,7 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
*/
BUG_ON(!PageUptodate(page));
block_in_file = page->index << (PAGE_CACHE_SHIFT - blkbits);
last_block = (inode->i_size - 1) >> blkbits;
last_block = (i_size_read(inode) - 1) >> blkbits;
map_bh.b_page = page;
for (page_block = 0; page_block < blocks_per_page; ) {
......@@ -489,9 +489,9 @@ mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block,
first_unmapped = page_block;
end_index = inode->i_size >> PAGE_CACHE_SHIFT;
end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
if (page->index >= end_index) {
unsigned offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
unsigned offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
char *kaddr;
if (page->index > end_index || !offset)
......
......@@ -1001,6 +1001,7 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
loff_t new_isize;
int invalid = 0;
int mtime_update = 0;
loff_t cur_isize;
dfprintk(VFS, "NFS: refresh_inode(%s/%ld ct=%d info=0x%x)\n",
inode->i_sb->s_id, inode->i_ino,
......@@ -1087,8 +1088,9 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
* If we have pending writebacks, things can get
* messy.
*/
if (nfs_have_writebacks(inode) && new_isize < inode->i_size)
new_isize = inode->i_size;
cur_isize = i_size_read(inode);
if (nfs_have_writebacks(inode) && new_isize < cur_isize)
new_isize = cur_isize;
nfsi->read_cache_ctime = fattr->ctime;
inode->i_ctime = fattr->ctime;
......@@ -1102,7 +1104,7 @@ __nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
}
nfsi->read_cache_isize = new_size;
inode->i_size = new_isize;
i_size_write(inode, new_isize);
if (inode->i_mode != fattr->mode ||
inode->i_uid != fattr->uid ||
......
......@@ -180,8 +180,8 @@ nfs_writepage_sync(struct file *file, struct inode *inode, struct page *page,
* If we've extended the file, update the inode
* now so we don't invalidate the cache.
*/
if (base > inode->i_size)
inode->i_size = base;
if (base > i_size_read(inode))
i_size_write(inode, base);
} while (count);
if (PageError(page))
......@@ -211,8 +211,8 @@ nfs_writepage_async(struct file *file, struct inode *inode, struct page *page,
nfs_unlock_request(req);
nfs_strategy(inode);
end = ((loff_t)page->index<<PAGE_CACHE_SHIFT) + (loff_t)(offset + count);
if (inode->i_size < end)
inode->i_size = end;
if (i_size_read(inode) < end)
i_size_write(inode, end);
out:
return status;
......@@ -227,9 +227,10 @@ nfs_writepage(struct page *page, struct writeback_control *wbc)
struct inode *inode = page->mapping->host;
unsigned long end_index;
unsigned offset = PAGE_CACHE_SIZE;
loff_t i_size = i_size_read(inode);
int err;
end_index = inode->i_size >> PAGE_CACHE_SHIFT;
end_index = i_size >> PAGE_CACHE_SHIFT;
/* Ensure we've flushed out any previous writes */
nfs_wb_page(inode,page);
......@@ -238,7 +239,7 @@ nfs_writepage(struct page *page, struct writeback_control *wbc)
if (page->index < end_index)
goto do_it;
/* things got complicated... */
offset = inode->i_size & (PAGE_CACHE_SIZE-1);
offset = i_size & (PAGE_CACHE_SIZE-1);
/* OK, are we completely out? */
err = -EIO;
......@@ -701,8 +702,8 @@ nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsign
status = 0;
end = ((loff_t)page->index<<PAGE_CACHE_SHIFT) + (loff_t)(offset + count);
if (inode->i_size < end)
inode->i_size = end;
if (i_size_read(inode) < end)
i_size_write(inode, end);
/* If we wrote past the end of the page.
* Call the strategy routine so it can send out a bunch
......@@ -716,7 +717,7 @@ nfs_updatepage(struct file *file, struct page *page, unsigned int offset, unsign
nfs_unlock_request(req);
done:
dprintk("NFS: nfs_updatepage returns %d (isize %Ld)\n",
status, (long long)inode->i_size);
status, (long long)i_size_read(inode));
if (status < 0)
ClearPageUptodate(page);
return status;
......@@ -951,7 +952,7 @@ nfs_commit_rpcsetup(struct list_head *head, struct nfs_write_data *data, int how
end = req_offset(last) + last->wb_bytes;
len = end - start;
/* If 'len' is not a 32-bit quantity, pass '0' in the COMMIT call */
if (end >= inode->i_size || len < 0 || len > (~((u32)0) >> 1))
if (end >= i_size_read(inode) || len < 0 || len > (~((u32)0) >> 1))
len = 0;
data->inode = inode;
......
......@@ -1008,7 +1008,7 @@ asmlinkage long sys_vhangup(void)
*/
int generic_file_open(struct inode * inode, struct file * filp)
{
if (!(filp->f_flags & O_LARGEFILE) && inode->i_size > MAX_NON_LFS)
if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
return -EFBIG;
return 0;
}
......
......@@ -132,12 +132,14 @@ static int v1_check_quota_file(struct super_block *sb, int type)
mm_segment_t fs;
ssize_t size;
loff_t offset = 0;
loff_t isize;
static const uint quota_magics[] = V2_INITQMAGICS;
if (!inode->i_size)
isize = i_size_read(inode);
if (!isize)
return 0;
blocks = inode->i_size >> BLOCK_SIZE_BITS;
off = inode->i_size & (BLOCK_SIZE - 1);
blocks = isize >> BLOCK_SIZE_BITS;
off = isize & (BLOCK_SIZE - 1);
if ((blocks % sizeof(struct v1_disk_dqblk) * BLOCK_SIZE + off) % sizeof(struct v1_disk_dqblk))
return 0;
/* Doublecheck whether we didn't get file with new format - with old quotactl() this could happen */
......
......@@ -55,7 +55,7 @@ loff_t remote_llseek(struct file *file, loff_t offset, int origin)
lock_kernel();
switch (origin) {
case 2:
offset += file->f_dentry->d_inode->i_size;
offset += i_size_read(file->f_dentry->d_inode);
break;
case 1:
offset += file->f_pos;
......@@ -84,7 +84,7 @@ loff_t default_llseek(struct file *file, loff_t offset, int origin)
lock_kernel();
switch (origin) {
case 2:
offset += file->f_dentry->d_inode->i_size;
offset += i_size_read(file->f_dentry->d_inode);
break;
case 1:
offset += file->f_pos;
......
......@@ -28,7 +28,7 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
stat->size = inode->i_size;
stat->size = i_size_read(inode);
stat->blocks = inode->i_blocks;
stat->blksize = inode->i_blksize;
}
......
......@@ -349,6 +349,17 @@ struct block_device {
struct gendisk * bd_disk;
};
/*
* Use sequence counter to get consistent i_size on 32-bit processors.
*/
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
#include <linux/seqlock.h>
#define __NEED_I_SIZE_ORDERED
#define i_size_ordered_init(inode) seqcount_init(&inode->i_size_seqcount)
#else
#define i_size_ordered_init(inode) do { } while (0)
#endif
struct inode {
struct hlist_node i_hash;
struct list_head i_list;
......@@ -399,8 +410,60 @@ struct inode {
union {
void *generic_ip;
} u;
#ifdef __NEED_I_SIZE_ORDERED
seqcount_t i_size_seqcount;
#endif
};
/*
* NOTE: in a 32bit arch with a preemptable kernel and
* an UP compile the i_size_read/write must be atomic
* with respect to the local cpu (unlike with preempt disabled),
* but they don't need to be atomic with respect to other cpus like in
* true SMP (so they need either to either locally disable irq around
* the read or for example on x86 they can be still implemented as a
* cmpxchg8b without the need of the lock prefix). For SMP compiles
* and 64bit archs it makes no difference if preempt is enabled or not.
*/
static inline loff_t i_size_read(struct inode *inode)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
loff_t i_size;
unsigned int seq;
do {
seq = read_seqcount_begin(&inode->i_size_seqcount);
i_size = inode->i_size;
} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
return i_size;
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
loff_t i_size;
preempt_disable();
i_size = inode->i_size;
preempt_enable();
return i_size;
#else
return inode->i_size;
#endif
}
static inline void i_size_write(struct inode *inode, loff_t i_size)
{
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_begin(&inode->i_size_seqcount);
inode->i_size = i_size;
write_seqcount_end(&inode->i_size_seqcount);
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
preempt_disable();
inode->i_size = i_size;
preempt_enable();
#else
inode->i_size = i_size;
#endif
}
struct fown_struct {
rwlock_t lock; /* protects pid, uid, euid fields */
int pid; /* pid or -pgrp where SIGIO should be sent */
......
......@@ -703,7 +703,7 @@ long sys_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
}
file = shp->shm_file;
size = file->f_dentry->d_inode->i_size;
size = i_size_read(file->f_dentry->d_inode);
shp->shm_nattch++;
shm_unlock(shp);
......
......@@ -555,14 +555,15 @@ void do_generic_mapping_read(struct address_space *mapping,
for (;;) {
struct page *page;
unsigned long end_index, nr, ret;
loff_t isize = i_size_read(inode);
end_index = inode->i_size >> PAGE_CACHE_SHIFT;
end_index = isize >> PAGE_CACHE_SHIFT;
if (index > end_index)
break;
nr = PAGE_CACHE_SIZE;
if (index == end_index) {
nr = inode->i_size & ~PAGE_CACHE_MASK;
nr = isize & ~PAGE_CACHE_MASK;
if (nr <= offset)
break;
}
......@@ -763,7 +764,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
retval = 0;
if (!count)
goto out; /* skip atime */
size = inode->i_size;
size = i_size_read(inode);
if (pos < size) {
retval = generic_file_direct_IO(READ, iocb,
iov, pos, nr_segs);
......@@ -951,7 +952,7 @@ struct page * filemap_nopage(struct vm_area_struct * area, unsigned long address
endoff = ((area->vm_end - area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff;
retry_all:
size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (pgoff >= size)
goto outside_data_content;
......@@ -1233,7 +1234,7 @@ static int filemap_populate(struct vm_area_struct *vma,
pgoff, len >> PAGE_CACHE_SHIFT);
repeat:
size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (pgoff + (len >> PAGE_CACHE_SHIFT) > size)
return -EINVAL;
......@@ -1544,7 +1545,7 @@ inline int generic_write_checks(struct inode *inode,
if (!isblk) {
/* FIXME: this is for backwards compatibility with 2.4 */
if (file->f_flags & O_APPEND)
*pos = inode->i_size;
*pos = i_size_read(inode);
if (limit != RLIM_INFINITY) {
if (*pos >= limit) {
......@@ -1590,15 +1591,17 @@ inline int generic_write_checks(struct inode *inode,
if (unlikely(*pos + *count > inode->i_sb->s_maxbytes))
*count = inode->i_sb->s_maxbytes - *pos;
} else {
loff_t isize;
if (bdev_read_only(inode->i_bdev))
return -EPERM;
if (*pos >= inode->i_size) {
if (*count || *pos > inode->i_size)
isize = i_size_read(inode);
if (*pos >= isize) {
if (*count || *pos > isize)
return -ENOSPC;
}
if (*pos + *count > inode->i_size)
*count = inode->i_size - *pos;
if (*pos + *count > isize)
*count = isize - *pos;
}
return 0;
}
......@@ -1685,8 +1688,8 @@ generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
iov, pos, nr_segs);
if (written > 0) {
loff_t end = pos + written;
if (end > inode->i_size && !isblk) {
inode->i_size = end;
if (end > i_size_read(inode) && !isblk) {
i_size_write(inode, end);
mark_inode_dirty(inode);
}
*ppos = end;
......@@ -1730,14 +1733,15 @@ generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
status = a_ops->prepare_write(file, page, offset, offset+bytes);
if (unlikely(status)) {
loff_t isize = i_size_read(inode);
/*
* prepare_write() may have instantiated a few blocks
* outside i_size. Trim these off again.
*/
unlock_page(page);
page_cache_release(page);
if (pos + bytes > inode->i_size)
vmtruncate(inode, inode->i_size);
if (pos + bytes > isize)
vmtruncate(inode, isize);
break;
}
if (likely(nr_segs == 1))
......
......@@ -1109,7 +1109,7 @@ int vmtruncate(struct inode * inode, loff_t offset)
if (inode->i_size < offset)
goto do_expand;
inode->i_size = offset;
i_size_write(inode, offset);
pgoff = (offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
down(&mapping->i_shared_sem);
if (unlikely(!list_empty(&mapping->i_mmap)))
......@@ -1126,7 +1126,7 @@ int vmtruncate(struct inode * inode, loff_t offset)
goto out_sig;
if (offset > inode->i_sb->s_maxbytes)
goto out;
inode->i_size = offset;
i_size_write(inode, offset);
out_truncate:
if (inode->i_op && inode->i_op->truncate)
......
......@@ -48,7 +48,7 @@ int vmtruncate(struct inode *inode, loff_t offset)
if (inode->i_size < offset)
goto do_expand;
inode->i_size = offset;
i_size_write(inode, offset);
truncate_inode_pages(mapping, offset);
goto out_truncate;
......@@ -59,7 +59,7 @@ int vmtruncate(struct inode *inode, loff_t offset)
goto out_sig;
if (offset > inode->i_sb->s_maxbytes)
goto out;
inode->i_size = offset;
i_size_write(inode, offset);
out_truncate:
if (inode->i_op && inode->i_op->truncate)
......
......@@ -208,11 +208,12 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
LIST_HEAD(page_pool);
int page_idx;
int ret = 0;
loff_t isize = i_size_read(inode);
if (inode->i_size == 0)
if (isize == 0)
goto out;
end_index = ((inode->i_size - 1) >> PAGE_CACHE_SHIFT);
end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
/*
* Preallocate as many pages as we will need.
......
......@@ -299,7 +299,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
static const swp_entry_t unswapped = {0};
if (sgp != SGP_WRITE &&
((loff_t) index << PAGE_CACHE_SHIFT) >= inode->i_size)
((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
return ERR_PTR(-EINVAL);
while (!(entry = shmem_swp_entry(info, index, &page))) {
......@@ -332,7 +332,7 @@ static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long
return ERR_PTR(-ENOMEM);
}
if (sgp != SGP_WRITE &&
((loff_t) index << PAGE_CACHE_SHIFT) >= inode->i_size) {
((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
entry = ERR_PTR(-EINVAL);
break;
}
......@@ -641,7 +641,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
/* Racing against delete or truncate? Must leave out of page cache */
limit = (inode->i_state & I_FREEING)? 0:
(inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
(i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (idx >= limit ||
move_from_swap_cache(page, idx, inode->i_mapping) == 0)
......@@ -964,7 +964,7 @@ static int shmem_populate(struct vm_area_struct *vma,
enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
unsigned long size;
size = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
return -EINVAL;
......@@ -1239,12 +1239,13 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
for (;;) {
struct page *page = NULL;
unsigned long end_index, nr, ret;
loff_t i_size = i_size_read(inode);
end_index = inode->i_size >> PAGE_CACHE_SHIFT;
end_index = i_size >> PAGE_CACHE_SHIFT;
if (index > end_index)
break;
if (index == end_index) {
nr = inode->i_size & ~PAGE_CACHE_MASK;
nr = i_size & ~PAGE_CACHE_MASK;
if (nr <= offset)
break;
}
......@@ -1261,9 +1262,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
* are called without i_sem protection against truncate
*/
nr = PAGE_CACHE_SIZE;
end_index = inode->i_size >> PAGE_CACHE_SHIFT;
i_size = i_size_read(inode);
end_index = i_size >> PAGE_CACHE_SHIFT;
if (index == end_index) {
nr = inode->i_size & ~PAGE_CACHE_MASK;
nr = i_size & ~PAGE_CACHE_MASK;
if (nr <= offset) {
page_cache_release(page);
break;
......
......@@ -926,7 +926,7 @@ static int setup_swap_extents(struct swap_info_struct *sis)
*/
probe_block = 0;
page_no = 0;
last_block = inode->i_size >> blkbits;
last_block = i_size_read(inode) >> blkbits;
while ((probe_block + blocks_per_page) <= last_block &&
page_no < sis->max) {
unsigned block_in_page;
......@@ -1312,7 +1312,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
goto bad_swap;
}
swapfilesize = mapping->host->i_size >> PAGE_SHIFT;
swapfilesize = i_size_read(mapping->host) >> PAGE_SHIFT;
/*
* Read the swap header.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment