Commit 9bfccec2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "Lots of bugs fixes, including Zheng and Jan's extent status shrinker
  fixes, which should improve CPU utilization and potential soft lockups
  under heavy memory pressure, and Eric Whitney's bigalloc fixes"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (26 commits)
  ext4: ext4_da_convert_inline_data_to_extent drop locked page after error
  ext4: fix suboptimal seek_{data,hole} extents traversial
  ext4: ext4_inline_data_fiemap should respect callers argument
  ext4: prevent fsreentrance deadlock for inline_data
  ext4: forbid journal_async_commit in data=ordered mode
  jbd2: remove unnecessary NULL check before iput()
  ext4: Remove an unnecessary check for NULL before iput()
  ext4: remove unneeded code in ext4_unlink
  ext4: don't count external journal blocks as overhead
  ext4: remove never taken branch from ext4_ext_shift_path_extents()
  ext4: create nojournal_checksum mount option
  ext4: update comments regarding ext4_delete_inode()
  ext4: cleanup GFP flags inside resize path
  ext4: introduce aging to extent status tree
  ext4: cleanup flag definitions for extent status tree
  ext4: limit number of scanned extents in status tree shrinker
  ext4: move handling of list of shrinkable inodes into extent status code
  ext4: change LRU to round-robin in extent status tree shrinker
  ext4: cache extent hole in extent status tree for ext4_da_map_blocks()
  ext4: fix block reservation for bigalloc filesystems
  ...
parents 2756d373 50db71ab
......@@ -158,17 +158,8 @@ struct ext4_allocation_request {
#define EXT4_MAP_MAPPED (1 << BH_Mapped)
#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
#define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
/* Sometimes (in the bigalloc case, from ext4_da_get_block_prep) the caller of
* ext4_map_blocks wants to know whether or not the underlying cluster has
* already been accounted for. EXT4_MAP_FROM_CLUSTER conveys to the caller that
* the requested mapping was from previously mapped (or delayed allocated)
* cluster. We use BH_AllocFromCluster only for this flag. BH_AllocFromCluster
* should never appear on buffer_head's state flags.
*/
#define EXT4_MAP_FROM_CLUSTER (1 << BH_AllocFromCluster)
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
EXT4_MAP_FROM_CLUSTER)
EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
struct ext4_map_blocks {
ext4_fsblk_t m_pblk;
......@@ -565,10 +556,8 @@ enum {
#define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080
/* Do not take i_data_sem locking in ext4_map_blocks */
#define EXT4_GET_BLOCKS_NO_LOCK 0x0100
/* Do not put hole in extent cache */
#define EXT4_GET_BLOCKS_NO_PUT_HOLE 0x0200
/* Convert written extents to unwritten */
#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0400
#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0200
/*
* The bit position of these flags must not overlap with any of the
......@@ -889,10 +878,12 @@ struct ext4_inode_info {
/* extents status tree */
struct ext4_es_tree i_es_tree;
rwlock_t i_es_lock;
struct list_head i_es_lru;
struct list_head i_es_list;
unsigned int i_es_all_nr; /* protected by i_es_lock */
unsigned int i_es_lru_nr; /* protected by i_es_lock */
unsigned long i_touch_when; /* jiffies of last accessing */
unsigned int i_es_shk_nr; /* protected by i_es_lock */
ext4_lblk_t i_es_shrink_lblk; /* Offset where we start searching for
extents to shrink. Protected by
i_es_lock */
/* ialloc */
ext4_group_t i_last_alloc_group;
......@@ -1337,10 +1328,11 @@ struct ext4_sb_info {
/* Reclaim extents from extent status tree */
struct shrinker s_es_shrinker;
struct list_head s_es_lru;
struct list_head s_es_list; /* List of inodes with reclaimable extents */
long s_es_nr_inode;
struct ext4_es_stats s_es_stats;
struct mb_cache *s_mb_cache;
spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
spinlock_t s_es_lock ____cacheline_aligned_in_smp;
/* Ratelimit ext4 messages. */
struct ratelimit_state s_err_ratelimit_state;
......@@ -2196,7 +2188,6 @@ extern int ext4_calculate_overhead(struct super_block *sb);
extern void ext4_superblock_csum_set(struct super_block *sb);
extern void *ext4_kvmalloc(size_t size, gfp_t flags);
extern void *ext4_kvzalloc(size_t size, gfp_t flags);
extern void ext4_kvfree(void *ptr);
extern int ext4_alloc_flex_bg_array(struct super_block *sb,
ext4_group_t ngroup);
extern const char *ext4_decode_error(struct super_block *sb, int errno,
......@@ -2647,7 +2638,7 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
int *retval);
extern int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
int *has_inline);
int *has_inline, __u64 start, __u64 len);
extern int ext4_try_to_evict_inline_data(handle_t *handle,
struct inode *inode,
int needed);
......@@ -2794,16 +2785,6 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
/* mmp.c */
extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
/*
* Note that these flags will never ever appear in a buffer_head's state flag.
* See EXT4_MAP_... to see where this is used.
*/
enum ext4_state_bits {
BH_AllocFromCluster /* allocated blocks were part of already
* allocated cluster. */
= BH_JBDPrivateStart
};
/*
* Add new method to test whether block and inode bitmaps are properly
* initialized. With uninit_bg reading the block from disk is not enough
......
This diff is collapsed.
This diff is collapsed.
......@@ -29,25 +29,28 @@
/*
* These flags live in the high bits of extent_status.es_pblk
*/
#define ES_SHIFT 60
#define EXTENT_STATUS_WRITTEN (1 << 3)
#define EXTENT_STATUS_UNWRITTEN (1 << 2)
#define EXTENT_STATUS_DELAYED (1 << 1)
#define EXTENT_STATUS_HOLE (1 << 0)
enum {
ES_WRITTEN_B,
ES_UNWRITTEN_B,
ES_DELAYED_B,
ES_HOLE_B,
ES_REFERENCED_B,
ES_FLAGS
};
#define EXTENT_STATUS_FLAGS (EXTENT_STATUS_WRITTEN | \
EXTENT_STATUS_UNWRITTEN | \
EXTENT_STATUS_DELAYED | \
EXTENT_STATUS_HOLE)
#define ES_SHIFT (sizeof(ext4_fsblk_t)*8 - ES_FLAGS)
#define ES_MASK (~((ext4_fsblk_t)0) << ES_SHIFT)
#define ES_WRITTEN (1ULL << 63)
#define ES_UNWRITTEN (1ULL << 62)
#define ES_DELAYED (1ULL << 61)
#define ES_HOLE (1ULL << 60)
#define EXTENT_STATUS_WRITTEN (1 << ES_WRITTEN_B)
#define EXTENT_STATUS_UNWRITTEN (1 << ES_UNWRITTEN_B)
#define EXTENT_STATUS_DELAYED (1 << ES_DELAYED_B)
#define EXTENT_STATUS_HOLE (1 << ES_HOLE_B)
#define EXTENT_STATUS_REFERENCED (1 << ES_REFERENCED_B)
#define ES_MASK (ES_WRITTEN | ES_UNWRITTEN | \
ES_DELAYED | ES_HOLE)
#define ES_TYPE_MASK ((ext4_fsblk_t)(EXTENT_STATUS_WRITTEN | \
EXTENT_STATUS_UNWRITTEN | \
EXTENT_STATUS_DELAYED | \
EXTENT_STATUS_HOLE) << ES_SHIFT)
struct ext4_sb_info;
struct ext4_extent;
......@@ -65,14 +68,13 @@ struct ext4_es_tree {
};
struct ext4_es_stats {
unsigned long es_stats_last_sorted;
unsigned long es_stats_shrunk;
unsigned long es_stats_cache_hits;
unsigned long es_stats_cache_misses;
u64 es_stats_scan_time;
u64 es_stats_max_scan_time;
struct percpu_counter es_stats_all_cnt;
struct percpu_counter es_stats_lru_cnt;
struct percpu_counter es_stats_shk_cnt;
};
extern int __init ext4_init_es(void);
......@@ -93,29 +95,49 @@ extern void ext4_es_find_delayed_extent_range(struct inode *inode,
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status *es);
static inline unsigned int ext4_es_status(struct extent_status *es)
{
return es->es_pblk >> ES_SHIFT;
}
static inline unsigned int ext4_es_type(struct extent_status *es)
{
return (es->es_pblk & ES_TYPE_MASK) >> ES_SHIFT;
}
static inline int ext4_es_is_written(struct extent_status *es)
{
return (es->es_pblk & ES_WRITTEN) != 0;
return (ext4_es_type(es) & EXTENT_STATUS_WRITTEN) != 0;
}
static inline int ext4_es_is_unwritten(struct extent_status *es)
{
return (es->es_pblk & ES_UNWRITTEN) != 0;
return (ext4_es_type(es) & EXTENT_STATUS_UNWRITTEN) != 0;
}
static inline int ext4_es_is_delayed(struct extent_status *es)
{
return (es->es_pblk & ES_DELAYED) != 0;
return (ext4_es_type(es) & EXTENT_STATUS_DELAYED) != 0;
}
static inline int ext4_es_is_hole(struct extent_status *es)
{
return (es->es_pblk & ES_HOLE) != 0;
return (ext4_es_type(es) & EXTENT_STATUS_HOLE) != 0;
}
static inline unsigned int ext4_es_status(struct extent_status *es)
static inline void ext4_es_set_referenced(struct extent_status *es)
{
return es->es_pblk >> ES_SHIFT;
es->es_pblk |= ((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT;
}
static inline void ext4_es_clear_referenced(struct extent_status *es)
{
es->es_pblk &= ~(((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT);
}
static inline int ext4_es_is_referenced(struct extent_status *es)
{
return (ext4_es_status(es) & EXTENT_STATUS_REFERENCED) != 0;
}
static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
......@@ -135,23 +157,19 @@ static inline void ext4_es_store_pblock(struct extent_status *es,
static inline void ext4_es_store_status(struct extent_status *es,
unsigned int status)
{
es->es_pblk = (((ext4_fsblk_t)
(status & EXTENT_STATUS_FLAGS) << ES_SHIFT) |
(es->es_pblk & ~ES_MASK));
es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
(es->es_pblk & ~ES_MASK);
}
static inline void ext4_es_store_pblock_status(struct extent_status *es,
ext4_fsblk_t pb,
unsigned int status)
{
es->es_pblk = (((ext4_fsblk_t)
(status & EXTENT_STATUS_FLAGS) << ES_SHIFT) |
(pb & ~ES_MASK));
es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
(pb & ~ES_MASK);
}
extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi);
extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
extern void ext4_es_lru_add(struct inode *inode);
extern void ext4_es_lru_del(struct inode *inode);
#endif /* _EXT4_EXTENTS_STATUS_H */
......@@ -273,24 +273,19 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* we determine this extent as a data or a hole according to whether the
* page cache has data or not.
*/
static int ext4_find_unwritten_pgoff(struct inode *inode,
int whence,
struct ext4_map_blocks *map,
loff_t *offset)
static int ext4_find_unwritten_pgoff(struct inode *inode, int whence,
loff_t endoff, loff_t *offset)
{
struct pagevec pvec;
unsigned int blkbits;
pgoff_t index;
pgoff_t end;
loff_t endoff;
loff_t startoff;
loff_t lastoff;
int found = 0;
blkbits = inode->i_sb->s_blocksize_bits;
startoff = *offset;
lastoff = startoff;
endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
index = startoff >> PAGE_CACHE_SHIFT;
end = endoff >> PAGE_CACHE_SHIFT;
......@@ -408,147 +403,144 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
struct ext4_map_blocks map;
struct extent_status es;
ext4_lblk_t start, last, end;
loff_t dataoff, isize;
int blkbits;
int ret = 0;
struct fiemap_extent_info fie;
struct fiemap_extent ext[2];
loff_t next;
int i, ret = 0;
mutex_lock(&inode->i_mutex);
isize = i_size_read(inode);
if (offset >= isize) {
if (offset >= inode->i_size) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
blkbits = inode->i_sb->s_blocksize_bits;
start = offset >> blkbits;
last = start;
end = isize >> blkbits;
dataoff = offset;
do {
map.m_lblk = last;
map.m_len = end - last + 1;
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
if (last != start)
dataoff = (loff_t)last << blkbits;
fie.fi_flags = 0;
fie.fi_extents_max = 2;
fie.fi_extents_start = (struct fiemap_extent __user *) &ext;
while (1) {
mm_segment_t old_fs = get_fs();
fie.fi_extents_mapped = 0;
memset(ext, 0, sizeof(*ext) * fie.fi_extents_max);
set_fs(get_ds());
ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
set_fs(old_fs);
if (ret)
break;
}
/*
* If there is a delay extent at this offset,
* it will be as a data.
*/
ext4_es_find_delayed_extent_range(inode, last, last, &es);
if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
if (last != start)
dataoff = (loff_t)last << blkbits;
/* No extents found, EOF */
if (!fie.fi_extents_mapped) {
ret = -ENXIO;
break;
}
for (i = 0; i < fie.fi_extents_mapped; i++) {
next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
/*
* If there is a unwritten extent at this offset,
* it will be as a data or a hole according to page
* cache that has data or not.
*/
if (map.m_flags & EXT4_MAP_UNWRITTEN) {
int unwritten;
unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
&map, &dataoff);
if (unwritten)
break;
}
if (offset < (loff_t)ext[i].fe_logical)
offset = (loff_t)ext[i].fe_logical;
/*
* If extent is not unwritten, then it contains valid
* data, mapped or delayed.
*/
if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN))
goto out;
last++;
dataoff = (loff_t)last << blkbits;
} while (last <= end);
/*
* If there is a unwritten extent at this offset,
* it will be as a data or a hole according to page
* cache that has data or not.
*/
if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
next, &offset))
goto out;
if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
ret = -ENXIO;
goto out;
}
offset = next;
}
}
if (offset > inode->i_size)
offset = inode->i_size;
out:
mutex_unlock(&inode->i_mutex);
if (ret)
return ret;
if (dataoff > isize)
return -ENXIO;
return vfs_setpos(file, dataoff, maxsize);
return vfs_setpos(file, offset, maxsize);
}
/*
* ext4_seek_hole() retrieves the offset for SEEK_HOLE.
* ext4_seek_hole() retrieves the offset for SEEK_HOLE
*/
static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
struct ext4_map_blocks map;
struct extent_status es;
ext4_lblk_t start, last, end;
loff_t holeoff, isize;
int blkbits;
int ret = 0;
struct fiemap_extent_info fie;
struct fiemap_extent ext[2];
loff_t next;
int i, ret = 0;
mutex_lock(&inode->i_mutex);
isize = i_size_read(inode);
if (offset >= isize) {
if (offset >= inode->i_size) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
blkbits = inode->i_sb->s_blocksize_bits;
start = offset >> blkbits;
last = start;
end = isize >> blkbits;
holeoff = offset;
fie.fi_flags = 0;
fie.fi_extents_max = 2;
fie.fi_extents_start = (struct fiemap_extent __user *)&ext;
while (1) {
mm_segment_t old_fs = get_fs();
do {
map.m_lblk = last;
map.m_len = end - last + 1;
ret = ext4_map_blocks(NULL, inode, &map, 0);
if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
last += ret;
holeoff = (loff_t)last << blkbits;
continue;
}
fie.fi_extents_mapped = 0;
memset(ext, 0, sizeof(*ext));
/*
* If there is a delay extent at this offset,
* we will skip this extent.
*/
ext4_es_find_delayed_extent_range(inode, last, last, &es);
if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
last = es.es_lblk + es.es_len;
holeoff = (loff_t)last << blkbits;
continue;
}
set_fs(get_ds());
ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
set_fs(old_fs);
if (ret)
break;
/*
* If there is a unwritten extent at this offset,
* it will be as a data or a hole according to page
* cache that has data or not.
*/
if (map.m_flags & EXT4_MAP_UNWRITTEN) {
int unwritten;
unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
&map, &holeoff);
if (!unwritten) {
last += ret;
holeoff = (loff_t)last << blkbits;
/* No extents found */
if (!fie.fi_extents_mapped)
break;
for (i = 0; i < fie.fi_extents_mapped; i++) {
next = (loff_t)(ext[i].fe_logical + ext[i].fe_length);
/*
* If extent is not unwritten, then it contains valid
* data, mapped or delayed.
*/
if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) {
if (offset < (loff_t)ext[i].fe_logical)
goto out;
offset = next;
continue;
}
}
/* find a hole */
break;
} while (last <= end);
/*
* If there is a unwritten extent at this offset,
* it will be as a data or a hole according to page
* cache that has data or not.
*/
if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
next, &offset))
goto out;
offset = next;
if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
goto out;
}
}
if (offset > inode->i_size)
offset = inode->i_size;
out:
mutex_unlock(&inode->i_mutex);
if (ret)
return ret;
if (holeoff > isize)
holeoff = isize;
return vfs_setpos(file, holeoff, maxsize);
return vfs_setpos(file, offset, maxsize);
}
/*
......
......@@ -811,8 +811,11 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
ret = __block_write_begin(page, 0, inline_size,
ext4_da_get_block_prep);
if (ret) {
up_read(&EXT4_I(inode)->xattr_sem);
unlock_page(page);
page_cache_release(page);
ext4_truncate_failed_write(inode);
goto out;
return ret;
}
SetPageDirty(page);
......@@ -870,6 +873,12 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
goto out_journal;
}
/*
* We cannot recurse into the filesystem as the transaction
* is already started.
*/
flags |= AOP_FLAG_NOFS;
if (ret == -ENOSPC) {
ret = ext4_da_convert_inline_data_to_extent(mapping,
inode,
......@@ -882,11 +891,6 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
goto out;
}
/*
* We cannot recurse into the filesystem as the transaction
* is already started.
*/
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, 0, flags);
if (!page) {
......@@ -1807,11 +1811,12 @@ int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
int *has_inline)
int *has_inline, __u64 start, __u64 len)
{
__u64 physical = 0;
__u64 length;
__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST;
__u64 inline_len;
__u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
FIEMAP_EXTENT_LAST;
int error = 0;
struct ext4_iloc iloc;
......@@ -1820,6 +1825,13 @@ int ext4_inline_data_fiemap(struct inode *inode,
*has_inline = 0;
goto out;
}
inline_len = min_t(size_t, ext4_get_inline_size(inode),
i_size_read(inode));
if (start >= inline_len)
goto out;
if (start + len < inline_len)
inline_len = start + len;
inline_len -= start;
error = ext4_get_inode_loc(inode, &iloc);
if (error)
......@@ -1828,11 +1840,10 @@ int ext4_inline_data_fiemap(struct inode *inode,
physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
physical += offsetof(struct ext4_inode, i_block);
length = i_size_read(inode);
if (physical)
error = fiemap_fill_next_extent(fieinfo, 0, physical,
length, flags);
error = fiemap_fill_next_extent(fieinfo, start, physical,
inline_len, flags);
brelse(iloc.bh);
out:
up_read(&EXT4_I(inode)->xattr_sem);
......
......@@ -416,11 +416,6 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
}
if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
up_read((&EXT4_I(inode)->i_data_sem));
/*
* Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
* because it shouldn't be marked in es_map->m_flags.
*/
map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
/*
* We don't check m_len because extent will be collpased in status
......@@ -491,7 +486,6 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
ext4_es_lru_add(inode);
if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
map->m_pblk = ext4_es_pblock(&es) +
map->m_lblk - es.es_lblk;
......@@ -1393,7 +1387,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
/* Lookup extent status tree firstly */
if (ext4_es_lookup_extent(inode, iblock, &es)) {
ext4_es_lru_add(inode);
if (ext4_es_is_hole(&es)) {
retval = 0;
down_read(&EXT4_I(inode)->i_data_sem);
......@@ -1434,24 +1427,12 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
* file system block.
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_has_inline_data(inode)) {
/*
* We will soon create blocks for this page, and let
* us pretend as if the blocks aren't allocated yet.
* In case of clusters, we have to handle the work
* of mapping from cluster so that the reserved space
* is calculated properly.
*/
if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
ext4_find_delalloc_cluster(inode, map->m_lblk))
map->m_flags |= EXT4_MAP_FROM_CLUSTER;
if (ext4_has_inline_data(inode))
retval = 0;
} else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
retval = ext4_ext_map_blocks(NULL, inode, map,
EXT4_GET_BLOCKS_NO_PUT_HOLE);
else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
retval = ext4_ext_map_blocks(NULL, inode, map, 0);
else
retval = ext4_ind_map_blocks(NULL, inode, map,
EXT4_GET_BLOCKS_NO_PUT_HOLE);
retval = ext4_ind_map_blocks(NULL, inode, map, 0);
add_delayed:
if (retval == 0) {
......@@ -1465,7 +1446,8 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
* then we don't need to reserve it again. However we still need
* to reserve metadata for every block we're going to write.
*/
if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
if (EXT4_SB(inode->i_sb)->s_cluster_ratio <= 1 ||
!ext4_find_delalloc_cluster(inode, map->m_lblk)) {
ret = ext4_da_reserve_space(inode, iblock);
if (ret) {
/* not enough space to reserve */
......@@ -1481,11 +1463,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
goto out_unlock;
}
/* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
* and it should not appear on the bh->b_state.
*/
map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
map_bh(bh, inode->i_sb, invalid_block);
set_buffer_new(bh);
set_buffer_delay(bh);
......@@ -3643,7 +3620,7 @@ void ext4_truncate(struct inode *inode)
* If this was a simple ftruncate() and the file will remain alive,
* then we need to clear up the orphan record which we created above.
* However, if this was a real unlink then we were called by
* ext4_delete_inode(), and we allow that function to clean up the
* ext4_evict_inode(), and we allow that function to clean up the
* orphan info for us.
*/
if (inode->i_nlink)
......
......@@ -78,8 +78,6 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
ext4_es_lru_del(inode1);
ext4_es_lru_del(inode2);
isize = i_size_read(inode1);
i_size_write(inode1, i_size_read(inode2));
......
......@@ -2358,7 +2358,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
if (sbi->s_group_info) {
memcpy(new_groupinfo, sbi->s_group_info,
sbi->s_group_info_size * sizeof(*sbi->s_group_info));
ext4_kvfree(sbi->s_group_info);
kvfree(sbi->s_group_info);
}
sbi->s_group_info = new_groupinfo;
sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
......@@ -2385,7 +2385,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
metalen = sizeof(*meta_group_info) <<
EXT4_DESC_PER_BLOCK_BITS(sb);
meta_group_info = kmalloc(metalen, GFP_KERNEL);
meta_group_info = kmalloc(metalen, GFP_NOFS);
if (meta_group_info == NULL) {
ext4_msg(sb, KERN_ERR, "can't allocate mem "
"for a buddy group");
......@@ -2399,7 +2399,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL);
meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
if (meta_group_info[i] == NULL) {
ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
goto exit_group_info;
......@@ -2428,7 +2428,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
{
struct buffer_head *bh;
meta_group_info[i]->bb_bitmap =
kmalloc(sb->s_blocksize, GFP_KERNEL);
kmalloc(sb->s_blocksize, GFP_NOFS);
BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
bh = ext4_read_block_bitmap(sb, group);
BUG_ON(bh == NULL);
......@@ -2495,7 +2495,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
kfree(sbi->s_group_info[i]);
iput(sbi->s_buddy_cache);
err_freesgi:
ext4_kvfree(sbi->s_group_info);
kvfree(sbi->s_group_info);
return -ENOMEM;
}
......@@ -2708,12 +2708,11 @@ int ext4_mb_release(struct super_block *sb)
EXT4_DESC_PER_BLOCK_BITS(sb);
for (i = 0; i < num_meta_group_infos; i++)
kfree(sbi->s_group_info[i]);
ext4_kvfree(sbi->s_group_info);
kvfree(sbi->s_group_info);
}
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
if (sbi->s_buddy_cache)
iput(sbi->s_buddy_cache);
iput(sbi->s_buddy_cache);
if (sbi->s_mb_stats) {
ext4_msg(sb, KERN_INFO,
"mballoc: %u blocks %u reqs (%u success)",
......
......@@ -592,7 +592,7 @@ int ext4_ext_migrate(struct inode *inode)
/*
* set the i_blocks count to zero
* so that the ext4_delete_inode does the
* so that the ext4_evict_inode() does the
* right job
*
* We don't need to take the i_lock because
......
......@@ -273,6 +273,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
int replaced_count = 0;
int from = data_offset_in_page << orig_inode->i_blkbits;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
struct super_block *sb = orig_inode->i_sb;
/*
* It needs twice the amount of ordinary journal buffers because
......@@ -405,10 +406,13 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
page_cache_release(pagep[1]);
stop_journal:
ext4_journal_stop(handle);
if (*err == -ENOSPC &&
ext4_should_retry_alloc(sb, &retries))
goto again;
/* Buffer was busy because probably is pinned to journal transaction,
* force transaction commit may help to free it. */
if (*err == -EBUSY && ext4_should_retry_alloc(orig_inode->i_sb,
&retries))
if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
goto again;
return replaced_count;
......
......@@ -2814,7 +2814,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
ext4_orphan_add(handle, inode);
inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
retval = 0;
end_unlink:
brelse(bh);
......
......@@ -856,7 +856,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
n_group_desc[gdb_num] = gdb_bh;
EXT4_SB(sb)->s_group_desc = n_group_desc;
EXT4_SB(sb)->s_gdb_count++;
ext4_kvfree(o_group_desc);
kvfree(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
err = ext4_handle_dirty_super(handle, sb);
......@@ -866,7 +866,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
return err;
exit_inode:
ext4_kvfree(n_group_desc);
kvfree(n_group_desc);
brelse(iloc.bh);
exit_dind:
brelse(dind);
......@@ -909,7 +909,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
n_group_desc[gdb_num] = gdb_bh;
EXT4_SB(sb)->s_group_desc = n_group_desc;
EXT4_SB(sb)->s_gdb_count++;
ext4_kvfree(o_group_desc);
kvfree(o_group_desc);
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
if (unlikely(err))
......
......@@ -176,15 +176,6 @@ void *ext4_kvzalloc(size_t size, gfp_t flags)
return ret;
}
void ext4_kvfree(void *ptr)
{
if (is_vmalloc_addr(ptr))
vfree(ptr);
else
kfree(ptr);
}
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
{
......@@ -811,8 +802,8 @@ static void ext4_put_super(struct super_block *sb)
for (i = 0; i < sbi->s_gdb_count; i++)
brelse(sbi->s_group_desc[i]);
ext4_kvfree(sbi->s_group_desc);
ext4_kvfree(sbi->s_flex_groups);
kvfree(sbi->s_group_desc);
kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
......@@ -880,10 +871,10 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
spin_lock_init(&ei->i_prealloc_lock);
ext4_es_init_tree(&ei->i_es_tree);
rwlock_init(&ei->i_es_lock);
INIT_LIST_HEAD(&ei->i_es_lru);
INIT_LIST_HEAD(&ei->i_es_list);
ei->i_es_all_nr = 0;
ei->i_es_lru_nr = 0;
ei->i_touch_when = 0;
ei->i_es_shk_nr = 0;
ei->i_es_shrink_lblk = 0;
ei->i_reserved_data_blocks = 0;
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
......@@ -973,7 +964,6 @@ void ext4_clear_inode(struct inode *inode)
dquot_drop(inode);
ext4_discard_preallocations(inode);
ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
ext4_es_lru_del(inode);
if (EXT4_I(inode)->jinode) {
jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
EXT4_I(inode)->jinode);
......@@ -1153,7 +1143,7 @@ enum {
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
Opt_max_dir_size_kb,
Opt_max_dir_size_kb, Opt_nojournal_checksum,
};
static const match_table_t tokens = {
......@@ -1187,6 +1177,7 @@ static const match_table_t tokens = {
{Opt_journal_dev, "journal_dev=%u"},
{Opt_journal_path, "journal_path=%s"},
{Opt_journal_checksum, "journal_checksum"},
{Opt_nojournal_checksum, "nojournal_checksum"},
{Opt_journal_async_commit, "journal_async_commit"},
{Opt_abort, "abort"},
{Opt_data_journal, "data=journal"},
......@@ -1368,6 +1359,8 @@ static const struct mount_opts {
MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
MOPT_EXT4_ONLY | MOPT_SET},
{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
......@@ -1709,6 +1702,12 @@ static int parse_options(char *options, struct super_block *sb,
return 0;
}
}
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
"in data=ordered mode");
return 0;
}
return 1;
}
......@@ -1946,7 +1945,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
memcpy(new_groups, sbi->s_flex_groups,
(sbi->s_flex_groups_allocated *
sizeof(struct flex_groups)));
ext4_kvfree(sbi->s_flex_groups);
kvfree(sbi->s_flex_groups);
}
sbi->s_flex_groups = new_groups;
sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
......@@ -3317,7 +3316,7 @@ int ext4_calculate_overhead(struct super_block *sb)
struct ext4_super_block *es = sbi->s_es;
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
ext4_fsblk_t overhead = 0;
char *buf = (char *) get_zeroed_page(GFP_KERNEL);
char *buf = (char *) get_zeroed_page(GFP_NOFS);
if (!buf)
return -ENOMEM;
......@@ -3345,8 +3344,8 @@ int ext4_calculate_overhead(struct super_block *sb)
memset(buf, 0, PAGE_SIZE);
cond_resched();
}
/* Add the journal blocks as well */
if (sbi->s_journal)
/* Add the internal journal blocks as well */
if (sbi->s_journal && !sbi->journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
sbi->s_overhead = overhead;
......@@ -4232,7 +4231,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
failed_mount6:
ext4_mb_release(sb);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
......@@ -4261,7 +4260,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
ext4_kvfree(sbi->s_group_desc);
kvfree(sbi->s_group_desc);
failed_mount:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
......@@ -4862,6 +4861,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
test_opt(sb, JOURNAL_CHECKSUM)) {
ext4_msg(sb, KERN_ERR, "changing journal_checksum "
"during remount not supported");
err = -EINVAL;
goto restore_opts;
}
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
if (test_opt2(sb, EXPLICIT_DELALLOC)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
......
......@@ -1714,8 +1714,7 @@ int jbd2_journal_destroy(journal_t *journal)
if (journal->j_proc_entry)
jbd2_stats_proc_exit(journal);
if (journal->j_inode)
iput(journal->j_inode);
iput(journal->j_inode);
if (journal->j_revoke)
jbd2_journal_destroy_revoke(journal);
if (journal->j_chksum_driver)
......
......@@ -43,15 +43,13 @@ struct extent_status;
{ EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \
{ EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \
{ EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \
{ EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }, \
{ EXT4_GET_BLOCKS_NO_PUT_HOLE, "NO_PUT_HOLE" })
{ EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" })
#define show_mflags(flags) __print_flags(flags, "", \
{ EXT4_MAP_NEW, "N" }, \
{ EXT4_MAP_MAPPED, "M" }, \
{ EXT4_MAP_UNWRITTEN, "U" }, \
{ EXT4_MAP_BOUNDARY, "B" }, \
{ EXT4_MAP_FROM_CLUSTER, "C" })
{ EXT4_MAP_BOUNDARY, "B" })
#define show_free_flags(flags) __print_flags(flags, "|", \
{ EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \
......@@ -2452,15 +2450,14 @@ TRACE_EVENT(ext4_collapse_range,
TRACE_EVENT(ext4_es_shrink,
TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
int skip_precached, int nr_skipped, int retried),
int nr_skipped, int retried),
TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried),
TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, nr_shrunk )
__field( unsigned long long, scan_time )
__field( int, skip_precached )
__field( int, nr_skipped )
__field( int, retried )
),
......@@ -2469,16 +2466,14 @@ TRACE_EVENT(ext4_es_shrink,
__entry->dev = sb->s_dev;
__entry->nr_shrunk = nr_shrunk;
__entry->scan_time = div_u64(scan_time, 1000);
__entry->skip_precached = skip_precached;
__entry->nr_skipped = nr_skipped;
__entry->retried = retried;
),
TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d "
TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu "
"nr_skipped %d retried %d",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk,
__entry->scan_time, __entry->skip_precached,
__entry->nr_skipped, __entry->retried)
__entry->scan_time, __entry->nr_skipped, __entry->retried)
);
#endif /* _TRACE_EXT4_H */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment