Commit bc2c6421 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "The first major feature for ext4 this merge window is the largedir
  feature, which allows ext4 directories to support over 2 billion
  directory entries (assuming ~64 byte file names; in practice, users
  will run into practical performance limits first.) This feature was
  originally written by the Lustre team, and credit goes to Artem
  Blagodarenko from Seagate for getting this feature upstream.

  The second major major feature allows ext4 to support extended
  attribute values up to 64k. This feature was also originally from
  Lustre, and has been enhanced by Tahsin Erdogan from Google with a
  deduplication feature so that if multiple files have the same xattr
  value (for example, Windows ACL's stored by Samba), only one copy will
  be stored on disk for encoding and caching efficiency.

  We also have the usual set of bug fixes, cleanups, and optimizations"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (47 commits)
  ext4: fix spelling mistake: "prellocated" -> "preallocated"
  ext4: fix __ext4_new_inode() journal credits calculation
  ext4: skip ext4_init_security() and encryption on ea_inodes
  fs: generic_block_bmap(): initialize all of the fields in the temp bh
  ext4: change fast symlink test to not rely on i_blocks
  ext4: require key for truncate(2) of encrypted file
  ext4: don't bother checking for encryption key in ->mmap()
  ext4: check return value of kstrtoull correctly in reserved_clusters_store
  ext4: fix off-by-one fsmap error on 1k block filesystems
  ext4: return EFSBADCRC if a bad checksum error is found in ext4_find_entry()
  ext4: return EIO on read error in ext4_find_entry
  ext4: forbid encrypting root directory
  ext4: send parallel discards on commit completions
  ext4: avoid unnecessary stalls in ext4_evict_inode()
  ext4: add nombcache mount option
  ext4: strong binding of xattr inode references
  ext4: eliminate xattr entry e_hash recalculation for removes
  ext4: reserve space for xattr entries/names
  quota: add get_inode_usage callback to transfer multi-inode charges
  ext4: xattr inode deduplication
  ...
parents 58f587cb ff950156
......@@ -3031,11 +3031,11 @@ EXPORT_SYMBOL(block_write_full_page);
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block)
{
struct buffer_head tmp;
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
tmp.b_size = i_blocksize(inode);
struct buffer_head tmp = {
.b_size = i_blocksize(inode),
};
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
......
......@@ -256,6 +256,7 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child,
memcpy(ctx.master_key_descriptor, ci->ci_master_key,
FS_KEY_DESCRIPTOR_SIZE);
get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE);
BUILD_BUG_ON(sizeof(ctx) != FSCRYPT_SET_CONTEXT_MAX_SIZE);
res = parent->i_sb->s_cop->set_context(child, &ctx,
sizeof(ctx), fs_data);
if (res)
......
......@@ -113,7 +113,7 @@ struct ext2_sb_info {
* of the mount options.
*/
spinlock_t s_lock;
struct mb_cache *s_mb_cache;
struct mb_cache *s_ea_block_cache;
};
static inline spinlock_t *
......
......@@ -147,9 +147,9 @@ static void ext2_put_super (struct super_block * sb)
ext2_quota_off_umount(sb);
if (sbi->s_mb_cache) {
ext2_xattr_destroy_cache(sbi->s_mb_cache);
sbi->s_mb_cache = NULL;
if (sbi->s_ea_block_cache) {
ext2_xattr_destroy_cache(sbi->s_ea_block_cache);
sbi->s_ea_block_cache = NULL;
}
if (!(sb->s_flags & MS_RDONLY)) {
struct ext2_super_block *es = sbi->s_es;
......@@ -1131,9 +1131,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
}
#ifdef CONFIG_EXT2_FS_XATTR
sbi->s_mb_cache = ext2_xattr_create_cache();
if (!sbi->s_mb_cache) {
ext2_msg(sb, KERN_ERR, "Failed to create an mb_cache");
sbi->s_ea_block_cache = ext2_xattr_create_cache();
if (!sbi->s_ea_block_cache) {
ext2_msg(sb, KERN_ERR, "Failed to create ea_block_cache");
goto failed_mount3;
}
#endif
......@@ -1182,8 +1182,8 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_id);
goto failed_mount;
failed_mount3:
if (sbi->s_mb_cache)
ext2_xattr_destroy_cache(sbi->s_mb_cache);
if (sbi->s_ea_block_cache)
ext2_xattr_destroy_cache(sbi->s_ea_block_cache);
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
......
......@@ -121,6 +121,8 @@ const struct xattr_handler *ext2_xattr_handlers[] = {
NULL
};
#define EA_BLOCK_CACHE(inode) (EXT2_SB(inode->i_sb)->s_ea_block_cache)
static inline const struct xattr_handler *
ext2_xattr_handler(int name_index)
{
......@@ -150,7 +152,7 @@ ext2_xattr_get(struct inode *inode, int name_index, const char *name,
size_t name_len, size;
char *end;
int error;
struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;
struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
name_index, name, buffer, (long)buffer_size);
......@@ -195,7 +197,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
goto found;
entry = next;
}
if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
if (ext2_xattr_cache_insert(ea_block_cache, bh))
ea_idebug(inode, "cache insert failed");
error = -ENODATA;
goto cleanup;
......@@ -208,7 +210,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
le16_to_cpu(entry->e_value_offs) + size > inode->i_sb->s_blocksize)
goto bad_block;
if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
if (ext2_xattr_cache_insert(ea_block_cache, bh))
ea_idebug(inode, "cache insert failed");
if (buffer) {
error = -ERANGE;
......@@ -246,7 +248,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
char *end;
size_t rest = buffer_size;
int error;
struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;
struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
buffer, (long)buffer_size);
......@@ -281,7 +283,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
goto bad_block;
entry = next;
}
if (ext2_xattr_cache_insert(ext2_mb_cache, bh))
if (ext2_xattr_cache_insert(ea_block_cache, bh))
ea_idebug(inode, "cache insert failed");
/* list the attribute names */
......@@ -493,8 +495,8 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
* This must happen under buffer lock for
* ext2_xattr_set2() to reliably detect modified block
*/
mb_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache,
hash, bh->b_blocknr);
mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
bh->b_blocknr);
/* keep the buffer locked while modifying it. */
} else {
......@@ -627,7 +629,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
struct super_block *sb = inode->i_sb;
struct buffer_head *new_bh = NULL;
int error;
struct mb_cache *ext2_mb_cache = EXT2_SB(sb)->s_mb_cache;
struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
if (header) {
new_bh = ext2_xattr_cache_find(inode, header);
......@@ -655,7 +657,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
don't need to change the reference count. */
new_bh = old_bh;
get_bh(new_bh);
ext2_xattr_cache_insert(ext2_mb_cache, new_bh);
ext2_xattr_cache_insert(ea_block_cache, new_bh);
} else {
/* We need to allocate a new block */
ext2_fsblk_t goal = ext2_group_first_block_no(sb,
......@@ -676,7 +678,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
memcpy(new_bh->b_data, header, new_bh->b_size);
set_buffer_uptodate(new_bh);
unlock_buffer(new_bh);
ext2_xattr_cache_insert(ext2_mb_cache, new_bh);
ext2_xattr_cache_insert(ea_block_cache, new_bh);
ext2_xattr_update_super_block(sb);
}
......@@ -721,8 +723,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
* This must happen under buffer lock for
* ext2_xattr_set2() to reliably detect freed block
*/
mb_cache_entry_delete_block(ext2_mb_cache,
hash, old_bh->b_blocknr);
mb_cache_entry_delete(ea_block_cache, hash,
old_bh->b_blocknr);
/* Free the old block. */
ea_bdebug(old_bh, "freeing");
ext2_free_blocks(inode, old_bh->b_blocknr, 1);
......@@ -795,8 +797,8 @@ ext2_xattr_delete_inode(struct inode *inode)
* This must happen under buffer lock for ext2_xattr_set2() to
* reliably detect freed block
*/
mb_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache,
hash, bh->b_blocknr);
mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
bh->b_blocknr);
ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
get_bh(bh);
bforget(bh);
......@@ -897,21 +899,21 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
{
__u32 hash = le32_to_cpu(header->h_hash);
struct mb_cache_entry *ce;
struct mb_cache *ext2_mb_cache = EXT2_SB(inode->i_sb)->s_mb_cache;
struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
if (!header->h_hash)
return NULL; /* never share */
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
again:
ce = mb_cache_entry_find_first(ext2_mb_cache, hash);
ce = mb_cache_entry_find_first(ea_block_cache, hash);
while (ce) {
struct buffer_head *bh;
bh = sb_bread(inode->i_sb, ce->e_block);
bh = sb_bread(inode->i_sb, ce->e_value);
if (!bh) {
ext2_error(inode->i_sb, "ext2_xattr_cache_find",
"inode %ld: block %ld read error",
inode->i_ino, (unsigned long) ce->e_block);
inode->i_ino, (unsigned long) ce->e_value);
} else {
lock_buffer(bh);
/*
......@@ -924,27 +926,27 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
* entry is still hashed is reliable.
*/
if (hlist_bl_unhashed(&ce->e_hash_list)) {
mb_cache_entry_put(ext2_mb_cache, ce);
mb_cache_entry_put(ea_block_cache, ce);
unlock_buffer(bh);
brelse(bh);
goto again;
} else if (le32_to_cpu(HDR(bh)->h_refcount) >
EXT2_XATTR_REFCOUNT_MAX) {
ea_idebug(inode, "block %ld refcount %d>%d",
(unsigned long) ce->e_block,
(unsigned long) ce->e_value,
le32_to_cpu(HDR(bh)->h_refcount),
EXT2_XATTR_REFCOUNT_MAX);
} else if (!ext2_xattr_cmp(header, HDR(bh))) {
ea_bdebug(bh, "b_count=%d",
atomic_read(&(bh->b_count)));
mb_cache_entry_touch(ext2_mb_cache, ce);
mb_cache_entry_put(ext2_mb_cache, ce);
mb_cache_entry_touch(ea_block_cache, ce);
mb_cache_entry_put(ea_block_cache, ce);
return bh;
}
unlock_buffer(bh);
brelse(bh);
}
ce = mb_cache_entry_find_next(ext2_mb_cache, ce);
ce = mb_cache_entry_find_next(ea_block_cache, ce);
}
return NULL;
}
......
......@@ -183,7 +183,7 @@ ext4_get_acl(struct inode *inode, int type)
*/
static int
__ext4_set_acl(handle_t *handle, struct inode *inode, int type,
struct posix_acl *acl)
struct posix_acl *acl, int xattr_flags)
{
int name_index;
void *value = NULL;
......@@ -218,7 +218,7 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
}
error = ext4_xattr_set_handle(handle, inode, name_index, "",
value, size, 0);
value, size, xattr_flags);
kfree(value);
if (!error)
......@@ -231,18 +231,23 @@ int
ext4_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
handle_t *handle;
int error, retries = 0;
int error, credits, retries = 0;
size_t acl_size = acl ? ext4_acl_size(acl->a_count) : 0;
error = dquot_initialize(inode);
if (error)
return error;
retry:
handle = ext4_journal_start(inode, EXT4_HT_XATTR,
ext4_jbd2_credits_xattr(inode));
error = ext4_xattr_set_credits(inode, acl_size, false /* is_create */,
&credits);
if (error)
return error;
handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
error = __ext4_set_acl(handle, inode, type, acl);
error = __ext4_set_acl(handle, inode, type, acl, 0 /* xattr_flags */);
ext4_journal_stop(handle);
if (error == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
......@@ -267,13 +272,13 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
if (default_acl) {
error = __ext4_set_acl(handle, inode, ACL_TYPE_DEFAULT,
default_acl);
default_acl, XATTR_CREATE);
posix_acl_release(default_acl);
}
if (acl) {
if (!error)
error = __ext4_set_acl(handle, inode, ACL_TYPE_ACCESS,
acl);
acl, XATTR_CREATE);
posix_acl_release(acl);
}
return error;
......
......@@ -1114,6 +1114,7 @@ struct ext4_inode_info {
/*
* Mount flags set via mount options or defaults
*/
#define EXT4_MOUNT_NO_MBCACHE 0x00001 /* Do not use mbcache */
#define EXT4_MOUNT_GRPID 0x00004 /* Create files with directory's group */
#define EXT4_MOUNT_DEBUG 0x00008 /* Some debugging messages */
#define EXT4_MOUNT_ERRORS_CONT 0x00010 /* Continue on errors */
......@@ -1444,6 +1445,8 @@ struct ext4_sb_info {
unsigned int *s_mb_maxs;
unsigned int s_group_info_size;
unsigned int s_mb_free_pending;
struct list_head s_freed_data_list; /* List of blocks to be freed
after commit completed */
/* tunables */
unsigned long s_stripe;
......@@ -1516,7 +1519,8 @@ struct ext4_sb_info {
struct list_head s_es_list; /* List of inodes with reclaimable extents */
long s_es_nr_inode;
struct ext4_es_stats s_es_stats;
struct mb_cache *s_mb_cache;
struct mb_cache *s_ea_block_cache;
struct mb_cache *s_ea_inode_cache;
spinlock_t s_es_lock ____cacheline_aligned_in_smp;
/* Ratelimit ext4 messages. */
......@@ -1797,10 +1801,12 @@ EXT4_FEATURE_INCOMPAT_FUNCS(encrypt, ENCRYPT)
EXT4_FEATURE_INCOMPAT_EXTENTS| \
EXT4_FEATURE_INCOMPAT_64BIT| \
EXT4_FEATURE_INCOMPAT_FLEX_BG| \
EXT4_FEATURE_INCOMPAT_EA_INODE| \
EXT4_FEATURE_INCOMPAT_MMP | \
EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
EXT4_FEATURE_INCOMPAT_ENCRYPT | \
EXT4_FEATURE_INCOMPAT_CSUM_SEED)
EXT4_FEATURE_INCOMPAT_CSUM_SEED | \
EXT4_FEATURE_INCOMPAT_LARGEDIR)
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
......@@ -2098,6 +2104,12 @@ static inline struct ext4_inode *ext4_raw_inode(struct ext4_iloc *iloc)
return (struct ext4_inode *) (iloc->bh->b_data + iloc->offset);
}
static inline bool ext4_is_quota_file(struct inode *inode)
{
return IS_NOQUOTA(inode) &&
!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL);
}
/*
* This structure is stuffed into the struct file's private_data field
* for directories. It is where we put information so that we can do
......@@ -2126,6 +2138,16 @@ ext4_group_first_block_no(struct super_block *sb, ext4_group_t group_no)
*/
#define ERR_BAD_DX_DIR (-(MAX_ERRNO - 1))
/* htree levels for ext4 */
#define EXT4_HTREE_LEVEL_COMPAT 2
#define EXT4_HTREE_LEVEL 3
static inline int ext4_dir_htree_level(struct super_block *sb)
{
return ext4_has_feature_largedir(sb) ?
EXT4_HTREE_LEVEL : EXT4_HTREE_LEVEL_COMPAT;
}
/*
* Timeout and state flag for lazy initialization inode thread.
*/
......@@ -2389,16 +2411,17 @@ extern int ext4fs_dirhash(const char *name, int len, struct
/* ialloc.c */
extern struct inode *__ext4_new_inode(handle_t *, struct inode *, umode_t,
const struct qstr *qstr, __u32 goal,
uid_t *owner, int handle_type,
unsigned int line_no, int nblocks);
uid_t *owner, __u32 i_flags,
int handle_type, unsigned int line_no,
int nblocks);
#define ext4_new_inode(handle, dir, mode, qstr, goal, owner) \
#define ext4_new_inode(handle, dir, mode, qstr, goal, owner, i_flags) \
__ext4_new_inode((handle), (dir), (mode), (qstr), (goal), (owner), \
0, 0, 0)
i_flags, 0, 0, 0)
#define ext4_new_inode_start_handle(dir, mode, qstr, goal, owner, \
type, nblocks) \
__ext4_new_inode(NULL, (dir), (mode), (qstr), (goal), (owner), \
(type), __LINE__, (nblocks))
0, (type), __LINE__, (nblocks))
extern void ext4_free_inode(handle_t *, struct inode *);
......@@ -2433,6 +2456,7 @@ extern int ext4_mb_add_groupinfo(struct super_block *sb,
extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_fsblk_t block, unsigned long count);
extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
extern void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid);
/* inode.c */
int ext4_inode_is_fast_symlink(struct inode *inode);
......@@ -2704,19 +2728,20 @@ extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
extern int ext4_register_li_request(struct super_block *sb,
ext4_group_t first_not_zeroed);
static inline int ext4_has_group_desc_csum(struct super_block *sb)
{
return ext4_has_feature_gdt_csum(sb) ||
EXT4_SB(sb)->s_chksum_driver != NULL;
}
static inline int ext4_has_metadata_csum(struct super_block *sb)
{
WARN_ON_ONCE(ext4_has_feature_metadata_csum(sb) &&
!EXT4_SB(sb)->s_chksum_driver);
return (EXT4_SB(sb)->s_chksum_driver != NULL);
return ext4_has_feature_metadata_csum(sb) &&
(EXT4_SB(sb)->s_chksum_driver != NULL);
}
static inline int ext4_has_group_desc_csum(struct super_block *sb)
{
return ext4_has_feature_gdt_csum(sb) || ext4_has_metadata_csum(sb);
}
static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
{
return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
......@@ -2756,13 +2781,15 @@ static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
es->s_r_blocks_count_hi = cpu_to_le32(blk >> 32);
}
static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
static inline loff_t ext4_isize(struct super_block *sb,
struct ext4_inode *raw_inode)
{
if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
if (ext4_has_feature_largedir(sb) ||
S_ISREG(le16_to_cpu(raw_inode->i_mode)))
return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
le32_to_cpu(raw_inode->i_size_lo);
else
return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
}
static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
......
......@@ -77,7 +77,14 @@
#define EXT4_RESERVE_TRANS_BLOCKS 12U
#define EXT4_INDEX_EXTRA_TRANS_BLOCKS 8
/*
* Number of credits needed if we need to insert an entry into a
* directory. For each new index block, we need 4 blocks (old index
* block, new index block, bitmap block, bg summary). For normal
* htree directories there are 2 levels; if the largedir feature
* enabled it's 3 levels.
*/
#define EXT4_INDEX_EXTRA_TRANS_BLOCKS 12U
#ifdef CONFIG_QUOTA
/* Amount of blocks needed for quota update - we know that the structure was
......@@ -104,20 +111,6 @@
#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
static inline int ext4_jbd2_credits_xattr(struct inode *inode)
{
int credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb);
/*
* In case of inline data, we may push out the data to a block,
* so we need to reserve credits for this eventuality
*/
if (ext4_has_inline_data(inode))
credits += ext4_writepage_trans_blocks(inode) + 1;
return credits;
}
/*
* Ext4 handle operation types -- for logging purposes
*/
......
......@@ -2488,7 +2488,8 @@ int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
static inline int get_default_free_blocks_flags(struct inode *inode)
{
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
return EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
else if (ext4_should_journal_data(inode))
return EXT4_FREE_BLOCKS_FORGET;
......
......@@ -364,13 +364,6 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
if (ext4_encrypted_inode(inode)) {
int err = fscrypt_get_encryption_info(inode);
if (err)
return 0;
if (!fscrypt_has_encryption_key(inode))
return -ENOKEY;
}
file_accessed(file);
if (IS_DAX(file_inode(file))) {
vma->vm_ops = &ext4_dax_vm_ops;
......
......@@ -480,6 +480,7 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t start_fsb;
ext4_fsblk_t end_fsb;
ext4_fsblk_t bofs;
ext4_fsblk_t eofs;
ext4_group_t start_ag;
ext4_group_t end_ag;
......@@ -487,9 +488,12 @@ static int ext4_getfsmap_datadev(struct super_block *sb,
ext4_grpblk_t last_cluster;
int error = 0;
bofs = le32_to_cpu(sbi->s_es->s_first_data_block);
eofs = ext4_blocks_count(sbi->s_es);
if (keys[0].fmr_physical >= eofs)
return 0;
else if (keys[0].fmr_physical < bofs)
keys[0].fmr_physical = bofs;
if (keys[1].fmr_physical >= eofs)
keys[1].fmr_physical = eofs - 1;
start_fsb = keys[0].fmr_physical;
......
......@@ -294,7 +294,6 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
* as writing the quota to disk may need the lock as well.
*/
dquot_initialize(inode);
ext4_xattr_delete_inode(handle, inode);
dquot_free_inode(inode);
dquot_drop(inode);
......@@ -743,8 +742,9 @@ static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
*/
struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
umode_t mode, const struct qstr *qstr,
__u32 goal, uid_t *owner, int handle_type,
unsigned int line_no, int nblocks)
__u32 goal, uid_t *owner, __u32 i_flags,
int handle_type, unsigned int line_no,
int nblocks)
{
struct super_block *sb;
struct buffer_head *inode_bitmap_bh = NULL;
......@@ -766,30 +766,69 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (!dir || !dir->i_nlink)
return ERR_PTR(-EPERM);
if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
sb = dir->i_sb;
sbi = EXT4_SB(sb);
if (unlikely(ext4_forced_shutdown(sbi)))
return ERR_PTR(-EIO);
if ((ext4_encrypted_inode(dir) ||
DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
if ((ext4_encrypted_inode(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) &&
!(i_flags & EXT4_EA_INODE_FL)) {
err = fscrypt_get_encryption_info(dir);
if (err)
return ERR_PTR(err);
if (!fscrypt_has_encryption_key(dir))
return ERR_PTR(-ENOKEY);
if (!handle)
nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb);
encrypt = 1;
}
sb = dir->i_sb;
if (!handle && sbi->s_journal && !(i_flags & EXT4_EA_INODE_FL)) {
#ifdef CONFIG_EXT4_FS_POSIX_ACL
struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
if (p) {
int acl_size = p->a_count * sizeof(ext4_acl_entry);
nblocks += (S_ISDIR(mode) ? 2 : 1) *
__ext4_xattr_set_credits(sb, NULL /* inode */,
NULL /* block_bh */, acl_size,
true /* is_create */);
posix_acl_release(p);
}
#endif
#ifdef CONFIG_SECURITY
{
int num_security_xattrs = 1;
#ifdef CONFIG_INTEGRITY
num_security_xattrs++;
#endif
/*
* We assume that security xattrs are never
* more than 1k. In practice they are under
* 128 bytes.
*/
nblocks += num_security_xattrs *
__ext4_xattr_set_credits(sb, NULL /* inode */,
NULL /* block_bh */, 1024,
true /* is_create */);
}
#endif
if (encrypt)
nblocks += __ext4_xattr_set_credits(sb,
NULL /* inode */, NULL /* block_bh */,
FSCRYPT_SET_CONTEXT_MAX_SIZE,
true /* is_create */);
}
ngroups = ext4_get_groups_count(sb);
trace_ext4_request_inode(dir, mode);
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
ei = EXT4_I(inode);
sbi = EXT4_SB(sb);
/*
* Initialize owners and quota early so that we don't have to account
......@@ -1053,6 +1092,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
/* Don't inherit extent flag from directory, amongst others. */
ei->i_flags =
ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
ei->i_flags |= i_flags;
ei->i_file_acl = 0;
ei->i_dtime = 0;
ei->i_block_group = group;
......@@ -1109,13 +1149,15 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
goto fail_free_drop;
}
err = ext4_init_acl(handle, inode, dir);
if (err)
goto fail_free_drop;
if (!(ei->i_flags & EXT4_EA_INODE_FL)) {
err = ext4_init_acl(handle, inode, dir);
if (err)
goto fail_free_drop;
err = ext4_init_security(handle, inode, dir, qstr);
if (err)
goto fail_free_drop;
err = ext4_init_security(handle, inode, dir, qstr);
if (err)
goto fail_free_drop;
}
if (ext4_has_feature_extents(sb)) {
/* set extent flag only for directory, file and normal symlink*/
......
......@@ -829,7 +829,8 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
int flags = EXT4_FREE_BLOCKS_VALIDATED;
int err;
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode) ||
ext4_test_inode_flag(inode, EXT4_INODE_EA_INODE))
flags |= EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_METADATA;
else if (ext4_should_journal_data(inode))
flags |= EXT4_FREE_BLOCKS_FORGET;
......
......@@ -61,7 +61,7 @@ static int get_max_inline_xattr_value_size(struct inode *inode,
/* Compute min_offs. */
for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
if (!entry->e_value_block && entry->e_value_size) {
if (!entry->e_value_inum && entry->e_value_size) {
size_t offs = le16_to_cpu(entry->e_value_offs);
if (offs < min_offs)
min_offs = offs;
......
......@@ -144,16 +144,12 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
/*
* Test whether an inode is a fast symlink.
* A fast symlink has its symlink data stored in ext4_inode_info->i_data.
*/
int ext4_inode_is_fast_symlink(struct inode *inode)
{
int ea_blocks = EXT4_I(inode)->i_file_acl ?
EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
if (ext4_has_inline_data(inode))
return 0;
return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
return S_ISLNK(inode->i_mode) && inode->i_size &&
(inode->i_size < EXT4_N_BLOCKS * 4);
}
/*
......@@ -189,6 +185,8 @@ void ext4_evict_inode(struct inode *inode)
{
handle_t *handle;
int err;
int extra_credits = 3;
struct ext4_xattr_inode_array *ea_inode_array = NULL;
trace_ext4_evict_inode(inode);
......@@ -213,7 +211,8 @@ void ext4_evict_inode(struct inode *inode)
*/
if (inode->i_ino != EXT4_JOURNAL_INO &&
ext4_should_journal_data(inode) &&
(S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
(S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
inode->i_data.nrpages) {
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
......@@ -238,8 +237,12 @@ void ext4_evict_inode(struct inode *inode)
* protection against it
*/
sb_start_intwrite(inode->i_sb);
if (!IS_NOQUOTA(inode))
extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
ext4_blocks_for_truncate(inode)+3);
ext4_blocks_for_truncate(inode)+extra_credits);
if (IS_ERR(handle)) {
ext4_std_error(inode->i_sb, PTR_ERR(handle));
/*
......@@ -254,6 +257,16 @@ void ext4_evict_inode(struct inode *inode)
if (IS_SYNC(inode))
ext4_handle_sync(handle);
/*
* Set inode->i_size to 0 before calling ext4_truncate(). We need
* special handling of symlinks here because i_size is used to
* determine whether ext4_inode_info->i_data contains symlink data or
* block mappings. Setting i_size to 0 will remove its fast symlink
* status. Erase i_data so that it becomes a valid empty block map.
*/
if (ext4_inode_is_fast_symlink(inode))
memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
inode->i_size = 0;
err = ext4_mark_inode_dirty(handle, inode);
if (err) {
......@@ -271,25 +284,17 @@ void ext4_evict_inode(struct inode *inode)
}
}
/*
* ext4_ext_truncate() doesn't reserve any slop when it
* restarts journal transactions; therefore there may not be
* enough credits left in the handle to remove the inode from
* the orphan list and set the dtime field.
*/
if (!ext4_handle_has_enough_credits(handle, 3)) {
err = ext4_journal_extend(handle, 3);
if (err > 0)
err = ext4_journal_restart(handle, 3);
if (err != 0) {
ext4_warning(inode->i_sb,
"couldn't extend journal (err %d)", err);
stop_handle:
ext4_journal_stop(handle);
ext4_orphan_del(NULL, inode);
sb_end_intwrite(inode->i_sb);
goto no_delete;
}
/* Remove xattr references. */
err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
extra_credits);
if (err) {
ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
stop_handle:
ext4_journal_stop(handle);
ext4_orphan_del(NULL, inode);
sb_end_intwrite(inode->i_sb);
ext4_xattr_inode_array_free(ea_inode_array);
goto no_delete;
}
/*
......@@ -317,6 +322,7 @@ void ext4_evict_inode(struct inode *inode)
ext4_free_inode(handle, inode);
ext4_journal_stop(handle);
sb_end_intwrite(inode->i_sb);
ext4_xattr_inode_array_free(ea_inode_array);
return;
no_delete:
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
......@@ -710,7 +716,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
if (map->m_flags & EXT4_MAP_NEW &&
!(map->m_flags & EXT4_MAP_UNWRITTEN) &&
!(flags & EXT4_GET_BLOCKS_ZERO) &&
!IS_NOQUOTA(inode) &&
!ext4_is_quota_file(inode) &&
ext4_should_order_data(inode)) {
if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
ret = ext4_jbd2_inode_add_wait(handle, inode);
......@@ -4712,7 +4718,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
if (ext4_has_feature_64bit(sb))
ei->i_file_acl |=
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(raw_inode);
inode->i_size = ext4_isize(sb, raw_inode);
if ((size = i_size_read(inode)) < 0) {
EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
ret = -EFSCORRUPTED;
......@@ -4846,6 +4852,15 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
}
brelse(iloc.bh);
ext4_set_inode_flags(inode);
if (ei->i_flags & EXT4_EA_INODE_FL) {
ext4_xattr_inode_set_class(inode);
inode_lock(inode);
inode->i_flags |= S_NOQUOTA;
inode_unlock(inode);
}
unlock_new_inode(inode);
return inode;
......@@ -5037,7 +5052,7 @@ static int ext4_do_update_inode(handle_t *handle,
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
if (ei->i_disksize != ext4_isize(raw_inode)) {
if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
ext4_isize_set(raw_inode, ei->i_disksize);
need_datasync = 1;
}
......@@ -5287,7 +5302,14 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
error = PTR_ERR(handle);
goto err_out;
}
/* dquot_transfer() calls back ext4_get_inode_usage() which
* counts xattr inode references.
*/
down_read(&EXT4_I(inode)->xattr_sem);
error = dquot_transfer(inode, attr);
up_read(&EXT4_I(inode)->xattr_sem);
if (error) {
ext4_journal_stop(handle);
return error;
......@@ -5307,6 +5329,14 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
loff_t oldsize = inode->i_size;
int shrink = (attr->ia_size <= inode->i_size);
if (ext4_encrypted_inode(inode)) {
error = fscrypt_get_encryption_info(inode);
if (error)
return error;
if (!fscrypt_has_encryption_key(inode))
return -ENOKEY;
}
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
......
......@@ -218,7 +218,7 @@ static int ext4_ioctl_setflags(struct inode *inode,
unsigned int jflag;
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
if (ext4_is_quota_file(inode))
goto flags_out;
oldflags = ei->i_flags;
......@@ -342,7 +342,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
err = -EPERM;
inode_lock(inode);
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
if (ext4_is_quota_file(inode))
goto out_unlock;
err = ext4_get_inode_loc(inode, &iloc);
......@@ -373,7 +373,13 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
if (!IS_ERR(transfer_to[PRJQUOTA])) {
/* __dquot_transfer() calls back ext4_get_inode_usage() which
* counts xattr inode references.
*/
down_read(&EXT4_I(inode)->xattr_sem);
err = __dquot_transfer(inode, transfer_to);
up_read(&EXT4_I(inode)->xattr_sem);
dqput(transfer_to[PRJQUOTA]);
if (err)
goto out_dirty;
......
......@@ -367,8 +367,6 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group);
static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
ext4_group_t group);
static void ext4_free_data_callback(struct super_block *sb,
struct ext4_journal_cb_entry *jce, int rc);
static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
{
......@@ -2639,6 +2637,7 @@ int ext4_mb_init(struct super_block *sb)
spin_lock_init(&sbi->s_md_lock);
spin_lock_init(&sbi->s_bal_lock);
sbi->s_mb_free_pending = 0;
INIT_LIST_HEAD(&sbi->s_freed_data_list);
sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
......@@ -2782,7 +2781,8 @@ int ext4_mb_release(struct super_block *sb)
}
static inline int ext4_issue_discard(struct super_block *sb,
ext4_group_t block_group, ext4_grpblk_t cluster, int count)
ext4_group_t block_group, ext4_grpblk_t cluster, int count,
struct bio **biop)
{
ext4_fsblk_t discard_block;
......@@ -2791,18 +2791,18 @@ static inline int ext4_issue_discard(struct super_block *sb,
count = EXT4_C2B(EXT4_SB(sb), count);
trace_ext4_discard_blocks(sb,
(unsigned long long) discard_block, count);
return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
if (biop) {
return __blkdev_issue_discard(sb->s_bdev,
(sector_t)discard_block << (sb->s_blocksize_bits - 9),
(sector_t)count << (sb->s_blocksize_bits - 9),
GFP_NOFS, 0, biop);
} else
return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
}
/*
* This function is called by the jbd2 layer once the commit has finished,
* so we know we can free the blocks that were released with that commit.
*/
static void ext4_free_data_callback(struct super_block *sb,
struct ext4_journal_cb_entry *jce,
int rc)
static void ext4_free_data_in_buddy(struct super_block *sb,
struct ext4_free_data *entry)
{
struct ext4_free_data *entry = (struct ext4_free_data *)jce;
struct ext4_buddy e4b;
struct ext4_group_info *db;
int err, count = 0, count2 = 0;
......@@ -2810,18 +2810,6 @@ static void ext4_free_data_callback(struct super_block *sb,
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
entry->efd_count, entry->efd_group, entry);
if (test_opt(sb, DISCARD)) {
err = ext4_issue_discard(sb, entry->efd_group,
entry->efd_start_cluster,
entry->efd_count);
if (err && err != -EOPNOTSUPP)
ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%d block:%d count:%d failed"
" with %d", entry->efd_group,
entry->efd_start_cluster,
entry->efd_count, err);
}
err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
......@@ -2862,6 +2850,56 @@ static void ext4_free_data_callback(struct super_block *sb,
mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
}
/*
* This function is called by the jbd2 layer once the commit has finished,
* so we know we can free the blocks that were released with that commit.
*/
void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_free_data *entry, *tmp;
struct bio *discard_bio = NULL;
struct list_head freed_data_list;
struct list_head *cut_pos = NULL;
int err;
INIT_LIST_HEAD(&freed_data_list);
spin_lock(&sbi->s_md_lock);
list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) {
if (entry->efd_tid != commit_tid)
break;
cut_pos = &entry->efd_list;
}
if (cut_pos)
list_cut_position(&freed_data_list, &sbi->s_freed_data_list,
cut_pos);
spin_unlock(&sbi->s_md_lock);
if (test_opt(sb, DISCARD)) {
list_for_each_entry(entry, &freed_data_list, efd_list) {
err = ext4_issue_discard(sb, entry->efd_group,
entry->efd_start_cluster,
entry->efd_count,
&discard_bio);
if (err && err != -EOPNOTSUPP) {
ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%d block:%d count:%d failed"
" with %d", entry->efd_group,
entry->efd_start_cluster,
entry->efd_count, err);
} else if (err == -EOPNOTSUPP)
break;
}
if (discard_bio)
submit_bio_wait(discard_bio);
}
list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list)
ext4_free_data_in_buddy(sb, entry);
}
int __init ext4_init_mballoc(void)
{
ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
......@@ -3529,7 +3567,7 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_set_bits(bitmap, start, len);
preallocated += len;
}
mb_debug(1, "prellocated %u for group %u\n", preallocated, group);
mb_debug(1, "preallocated %u for group %u\n", preallocated, group);
}
static void ext4_mb_pa_callback(struct rcu_head *head)
......@@ -4464,7 +4502,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
trace_ext4_request_blocks(ar);
/* Allow to use superuser reservation for quota file */
if (IS_NOQUOTA(ar->inode))
if (ext4_is_quota_file(ar->inode))
ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
......@@ -4583,14 +4621,28 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
* are contiguous, AND the extents were freed by the same transaction,
* AND the blocks are associated with the same group.
*/
static int can_merge(struct ext4_free_data *entry1,
struct ext4_free_data *entry2)
static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi,
struct ext4_free_data *entry,
struct ext4_free_data *new_entry,
struct rb_root *entry_rb_root)
{
if ((entry1->efd_tid == entry2->efd_tid) &&
(entry1->efd_group == entry2->efd_group) &&
((entry1->efd_start_cluster + entry1->efd_count) == entry2->efd_start_cluster))
return 1;
return 0;
if ((entry->efd_tid != new_entry->efd_tid) ||
(entry->efd_group != new_entry->efd_group))
return;
if (entry->efd_start_cluster + entry->efd_count ==
new_entry->efd_start_cluster) {
new_entry->efd_start_cluster = entry->efd_start_cluster;
new_entry->efd_count += entry->efd_count;
} else if (new_entry->efd_start_cluster + new_entry->efd_count ==
entry->efd_start_cluster) {
new_entry->efd_count += entry->efd_count;
} else
return;
spin_lock(&sbi->s_md_lock);
list_del(&entry->efd_list);
spin_unlock(&sbi->s_md_lock);
rb_erase(&entry->efd_node, entry_rb_root);
kmem_cache_free(ext4_free_data_cachep, entry);
}
static noinline_for_stack int
......@@ -4646,29 +4698,19 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
node = rb_prev(new_node);
if (node) {
entry = rb_entry(node, struct ext4_free_data, efd_node);
if (can_merge(entry, new_entry) &&
ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
new_entry->efd_start_cluster = entry->efd_start_cluster;
new_entry->efd_count += entry->efd_count;
rb_erase(node, &(db->bb_free_root));
kmem_cache_free(ext4_free_data_cachep, entry);
}
ext4_try_merge_freed_extent(sbi, entry, new_entry,
&(db->bb_free_root));
}
node = rb_next(new_node);
if (node) {
entry = rb_entry(node, struct ext4_free_data, efd_node);
if (can_merge(new_entry, entry) &&
ext4_journal_callback_try_del(handle, &entry->efd_jce)) {
new_entry->efd_count += entry->efd_count;
rb_erase(node, &(db->bb_free_root));
kmem_cache_free(ext4_free_data_cachep, entry);
}
ext4_try_merge_freed_extent(sbi, entry, new_entry,
&(db->bb_free_root));
}
/* Add the extent to transaction's private list */
new_entry->efd_jce.jce_func = ext4_free_data_callback;
spin_lock(&sbi->s_md_lock);
_ext4_journal_callback_add(handle, &new_entry->efd_jce);
list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list);
sbi->s_mb_free_pending += clusters;
spin_unlock(&sbi->s_md_lock);
return 0;
......@@ -4871,7 +4913,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
* them with group lock_held
*/
if (test_opt(sb, DISCARD)) {
err = ext4_issue_discard(sb, block_group, bit, count);
err = ext4_issue_discard(sb, block_group, bit, count,
NULL);
if (err && err != -EOPNOTSUPP)
ext4_msg(sb, KERN_WARNING, "discard request in"
" group:%d block:%d count:%lu failed"
......@@ -5094,7 +5137,7 @@ __acquires(bitlock)
*/
mb_mark_used(e4b, &ex);
ext4_unlock_group(sb, group);
ret = ext4_issue_discard(sb, group, start, count);
ret = ext4_issue_discard(sb, group, start, count, NULL);
ext4_lock_group(sb, group);
mb_free_blocks(NULL, e4b, start, ex.fe_len);
return ret;
......
......@@ -78,10 +78,8 @@ do { \
struct ext4_free_data {
/* MUST be the first member */
struct ext4_journal_cb_entry efd_jce;
/* ext4_free_data private data starts from here */
/* this links the free block information from sb_info */
struct list_head efd_list;
/* this links the free block information from group_info */
struct rb_node efd_node;
......
......@@ -475,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
owner[0] = i_uid_read(inode);
owner[1] = i_gid_read(inode);
tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
S_IFREG, NULL, goal, owner);
S_IFREG, NULL, goal, owner, 0);
if (IS_ERR(tmp_inode)) {
retval = PTR_ERR(tmp_inode);
ext4_journal_stop(handle);
......
......@@ -484,7 +484,7 @@ mext_check_arguments(struct inode *orig_inode,
return -EBUSY;
}
if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) {
if (ext4_is_quota_file(orig_inode) && ext4_is_quota_file(donor_inode)) {
ext4_debug("ext4 move extent: The argument files should "
"not be quota files [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
......
......@@ -513,7 +513,7 @@ ext4_next_entry(struct ext4_dir_entry_2 *p, unsigned long blocksize)
static inline ext4_lblk_t dx_get_block(struct dx_entry *entry)
{
return le32_to_cpu(entry->block) & 0x00ffffff;
return le32_to_cpu(entry->block) & 0x0fffffff;
}
static inline void dx_set_block(struct dx_entry *entry, ext4_lblk_t value)
......@@ -739,6 +739,7 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
u32 hash;
memset(frame_in, 0, EXT4_HTREE_LEVEL * sizeof(frame_in[0]));
frame->bh = ext4_read_dirblock(dir, 0, INDEX);
if (IS_ERR(frame->bh))
return (struct dx_frame *) frame->bh;
......@@ -768,9 +769,15 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
}
indirect = root->info.indirect_levels;
if (indirect > 1) {
ext4_warning_inode(dir, "Unimplemented hash depth: %#06x",
root->info.indirect_levels);
if (indirect >= ext4_dir_htree_level(dir->i_sb)) {
ext4_warning(dir->i_sb,
"Directory (ino: %lu) htree depth %#06x exceed"
"supported value", dir->i_ino,
ext4_dir_htree_level(dir->i_sb));
if (ext4_dir_htree_level(dir->i_sb) < EXT4_HTREE_LEVEL) {
ext4_warning(dir->i_sb, "Enable large directory "
"feature to access it");
}
goto fail;
}
......@@ -859,12 +866,19 @@ dx_probe(struct ext4_filename *fname, struct inode *dir,
static void dx_release(struct dx_frame *frames)
{
struct dx_root_info *info;
int i;
if (frames[0].bh == NULL)
return;
if (((struct dx_root *)frames[0].bh->b_data)->info.indirect_levels)
brelse(frames[1].bh);
brelse(frames[0].bh);
info = &((struct dx_root *)frames[0].bh->b_data)->info;
for (i = 0; i <= info->indirect_levels; i++) {
if (frames[i].bh == NULL)
break;
brelse(frames[i].bh);
frames[i].bh = NULL;
}
}
/*
......@@ -1050,7 +1064,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
{
struct dx_hash_info hinfo;
struct ext4_dir_entry_2 *de;
struct dx_frame frames[2], *frame;
struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
struct inode *dir;
ext4_lblk_t block;
int count = 0;
......@@ -1428,11 +1442,11 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
goto next;
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
/* read error, skip block & hope for the best */
EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
(unsigned long) block);
brelse(bh);
goto next;
ret = ERR_PTR(-EIO);
goto cleanup_and_exit;
}
if (!buffer_verified(bh) &&
!is_dx_internal_node(dir, block,
......@@ -1442,7 +1456,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
EXT4_ERROR_INODE(dir, "checksumming directory "
"block %lu", (unsigned long)block);
brelse(bh);
goto next;
ret = ERR_PTR(-EFSBADCRC);
goto cleanup_and_exit;
}
set_buffer_verified(bh);
i = search_dirblock(bh, dir, &fname,
......@@ -1485,7 +1500,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
struct ext4_dir_entry_2 **res_dir)
{
struct super_block * sb = dir->i_sb;
struct dx_frame frames[2], *frame;
struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
struct buffer_head *bh;
ext4_lblk_t block;
int retval;
......@@ -1889,7 +1904,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
*/
dir->i_mtime = dir->i_ctime = current_time(dir);
ext4_update_dx_flag(dir);
dir->i_version++;
inode_inc_iversion(dir);
ext4_mark_inode_dirty(handle, dir);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_dirent_node(handle, dir, bh);
......@@ -1908,7 +1923,7 @@ static int make_indexed_dir(handle_t *handle, struct ext4_filename *fname,
{
struct buffer_head *bh2;
struct dx_root *root;
struct dx_frame frames[2], *frame;
struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
struct dx_entry *entries;
struct ext4_dir_entry_2 *de, *de2;
struct ext4_dir_entry_tail *t;
......@@ -2127,13 +2142,16 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
struct inode *dir, struct inode *inode)
{
struct dx_frame frames[2], *frame;
struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
struct dx_entry *entries, *at;
struct buffer_head *bh;
struct super_block *sb = dir->i_sb;
struct ext4_dir_entry_2 *de;
int restart;
int err;
again:
restart = 0;
frame = dx_probe(fname, dir, NULL, frames);
if (IS_ERR(frame))
return PTR_ERR(frame);
......@@ -2155,24 +2173,44 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
if (err != -ENOSPC)
goto cleanup;
err = 0;
/* Block full, should compress but for now just split */
dxtrace(printk(KERN_DEBUG "using %u of %u node entries\n",
dx_get_count(entries), dx_get_limit(entries)));
/* Need to split index? */
if (dx_get_count(entries) == dx_get_limit(entries)) {
ext4_lblk_t newblock;
unsigned icount = dx_get_count(entries);
int levels = frame - frames;
int levels = frame - frames + 1;
unsigned int icount;
int add_level = 1;
struct dx_entry *entries2;
struct dx_node *node2;
struct buffer_head *bh2;
if (levels && (dx_get_count(frames->entries) ==
dx_get_limit(frames->entries))) {
ext4_warning_inode(dir, "Directory index full!");
while (frame > frames) {
if (dx_get_count((frame - 1)->entries) <
dx_get_limit((frame - 1)->entries)) {
add_level = 0;
break;
}
frame--; /* split higher index block */
at = frame->at;
entries = frame->entries;
restart = 1;
}
if (add_level && levels == ext4_dir_htree_level(sb)) {
ext4_warning(sb, "Directory (ino: %lu) index full, "
"reach max htree level :%d",
dir->i_ino, levels);
if (ext4_dir_htree_level(sb) < EXT4_HTREE_LEVEL) {
ext4_warning(sb, "Large directory feature is "
"not enabled on this "
"filesystem");
}
err = -ENOSPC;
goto cleanup;
}
icount = dx_get_count(entries);
bh2 = ext4_append(handle, dir, &newblock);
if (IS_ERR(bh2)) {
err = PTR_ERR(bh2);
......@@ -2187,7 +2225,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
err = ext4_journal_get_write_access(handle, frame->bh);
if (err)
goto journal_error;
if (levels) {
if (!add_level) {
unsigned icount1 = icount/2, icount2 = icount - icount1;
unsigned hash2 = dx_get_hash(entries + icount1);
dxtrace(printk(KERN_DEBUG "Split index %i/%i\n",
......@@ -2195,7 +2233,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
err = ext4_journal_get_write_access(handle,
frames[0].bh);
(frame - 1)->bh);
if (err)
goto journal_error;
......@@ -2211,17 +2249,25 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
frame->entries = entries = entries2;
swap(frame->bh, bh2);
}
dx_insert_block(frames + 0, hash2, newblock);
dxtrace(dx_show_index("node", frames[1].entries));
dx_insert_block((frame - 1), hash2, newblock);
dxtrace(dx_show_index("node", frame->entries));
dxtrace(dx_show_index("node",
((struct dx_node *) bh2->b_data)->entries));
err = ext4_handle_dirty_dx_node(handle, dir, bh2);
if (err)
goto journal_error;
brelse (bh2);
err = ext4_handle_dirty_dx_node(handle, dir,
(frame - 1)->bh);
if (err)
goto journal_error;
if (restart) {
err = ext4_handle_dirty_dx_node(handle, dir,
frame->bh);
goto journal_error;
}
} else {
dxtrace(printk(KERN_DEBUG
"Creating second level index...\n"));
struct dx_root *dxroot;
memcpy((char *) entries2, (char *) entries,
icount * sizeof(struct dx_entry));
dx_set_limit(entries2, dx_node_limit(dir));
......@@ -2229,22 +2275,18 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
/* Set up root */
dx_set_count(entries, 1);
dx_set_block(entries + 0, newblock);
((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
/* Add new access path frame */
frame = frames + 1;
frame->at = at = at - entries + entries2;
frame->entries = entries = entries2;
frame->bh = bh2;
err = ext4_journal_get_write_access(handle,
frame->bh);
dxroot = (struct dx_root *)frames[0].bh->b_data;
dxroot->info.indirect_levels += 1;
dxtrace(printk(KERN_DEBUG
"Creating %d level index...\n",
info->indirect_levels));
err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
if (err)
goto journal_error;
}
err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
if (err) {
ext4_std_error(inode->i_sb, err);
goto cleanup;
err = ext4_handle_dirty_dx_node(handle, dir, bh2);
brelse(bh2);
restart = 1;
goto journal_error;
}
}
de = do_split(handle, dir, &bh, frame, &fname->hinfo);
......@@ -2256,10 +2298,15 @@ static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
goto cleanup;
journal_error:
ext4_std_error(dir->i_sb, err);
ext4_std_error(dir->i_sb, err); /* this is a no-op if err == 0 */
cleanup:
brelse(bh);
dx_release(frames);
/* @restart is true means htree-path has been changed, we need to
* repeat dx_probe() to find out valid htree-path
*/
if (restart && err == 0)
goto again;
return err;
}
......@@ -2296,7 +2343,7 @@ int ext4_generic_delete_entry(handle_t *handle,
blocksize);
else
de->inode = 0;
dir->i_version++;
inode_inc_iversion(dir);
return 0;
}
i += ext4_rec_len_from_disk(de->rec_len, blocksize);
......
......@@ -373,6 +373,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
struct ext4_journal_cb_entry *jce;
BUG_ON(txn->t_state == T_FINISHED);
ext4_process_freed_data(sb, txn->t_tid);
spin_lock(&sbi->s_md_lock);
while (!list_empty(&txn->t_private_list)) {
jce = list_entry(txn->t_private_list.next,
......@@ -927,9 +930,13 @@ static void ext4_put_super(struct super_block *sb)
invalidate_bdev(sbi->journal_bdev);
ext4_blkdev_remove(sbi);
}
if (sbi->s_mb_cache) {
ext4_xattr_destroy_cache(sbi->s_mb_cache);
sbi->s_mb_cache = NULL;
if (sbi->s_ea_inode_cache) {
ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
sbi->s_ea_inode_cache = NULL;
}
if (sbi->s_ea_block_cache) {
ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
sbi->s_ea_block_cache = NULL;
}
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
......@@ -1143,7 +1150,16 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
void *fs_data)
{
handle_t *handle = fs_data;
int res, res2, retries = 0;
int res, res2, credits, retries = 0;
/*
* Encrypting the root directory is not allowed because e2fsck expects
* lost+found to exist and be unencrypted, and encrypting the root
* directory would imply encrypting the lost+found directory as well as
* the filename "lost+found" itself.
*/
if (inode->i_ino == EXT4_ROOT_INO)
return -EPERM;
res = ext4_convert_inline_data(inode);
if (res)
......@@ -1178,8 +1194,12 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
if (res)
return res;
retry:
handle = ext4_journal_start(inode, EXT4_HT_MISC,
ext4_jbd2_credits_xattr(inode));
res = ext4_xattr_set_credits(inode, len, false /* is_create */,
&credits);
if (res)
return res;
handle = ext4_journal_start(inode, EXT4_HT_MISC, credits);
if (IS_ERR(handle))
return PTR_ERR(handle);
......@@ -1256,16 +1276,17 @@ static struct dquot **ext4_get_dquots(struct inode *inode)
}
static const struct dquot_operations ext4_quota_operations = {
.get_reserved_space = ext4_get_reserved_space,
.write_dquot = ext4_write_dquot,
.acquire_dquot = ext4_acquire_dquot,
.release_dquot = ext4_release_dquot,
.mark_dirty = ext4_mark_dquot_dirty,
.write_info = ext4_write_info,
.alloc_dquot = dquot_alloc,
.destroy_dquot = dquot_destroy,
.get_projid = ext4_get_projid,
.get_next_id = ext4_get_next_id,
.get_reserved_space = ext4_get_reserved_space,
.write_dquot = ext4_write_dquot,
.acquire_dquot = ext4_acquire_dquot,
.release_dquot = ext4_release_dquot,
.mark_dirty = ext4_mark_dquot_dirty,
.write_info = ext4_write_info,
.alloc_dquot = dquot_alloc,
.destroy_dquot = dquot_destroy,
.get_projid = ext4_get_projid,
.get_inode_usage = ext4_get_inode_usage,
.get_next_id = ext4_get_next_id,
};
static const struct quotactl_ops ext4_qctl_operations = {
......@@ -1328,7 +1349,7 @@ enum {
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
Opt_max_dir_size_kb, Opt_nojournal_checksum,
Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
};
static const match_table_t tokens = {
......@@ -1411,6 +1432,8 @@ static const match_table_t tokens = {
{Opt_noinit_itable, "noinit_itable"},
{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
{Opt_test_dummy_encryption, "test_dummy_encryption"},
{Opt_nombcache, "nombcache"},
{Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
{Opt_removed, "check=none"}, /* mount option from ext2/3 */
{Opt_removed, "nocheck"}, /* mount option from ext2/3 */
{Opt_removed, "reservation"}, /* mount option from ext2/3 */
......@@ -1618,6 +1641,7 @@ static const struct mount_opts {
{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
{Opt_max_dir_size_kb, 0, MOPT_GTE0},
{Opt_test_dummy_encryption, 0, MOPT_GTE0},
{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
{Opt_err, 0, 0}
};
......@@ -3445,7 +3469,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
/* Load the checksum driver */
if (ext4_has_feature_metadata_csum(sb)) {
if (ext4_has_feature_metadata_csum(sb) ||
ext4_has_feature_ea_inode(sb)) {
sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
if (IS_ERR(sbi->s_chksum_driver)) {
ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
......@@ -3467,7 +3492,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
/* Precompute checksum seed for all metadata */
if (ext4_has_feature_csum_seed(sb))
sbi->s_csum_seed = le32_to_cpu(es->s_checksum_seed);
else if (ext4_has_metadata_csum(sb))
else if (ext4_has_metadata_csum(sb) || ext4_has_feature_ea_inode(sb))
sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
sizeof(es->s_uuid));
......@@ -3597,6 +3622,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"The Hurd can't support 64-bit file systems");
goto failed_mount;
}
/*
* ea_inode feature uses l_i_version field which is not
* available in HURD_COMPAT mode.
*/
if (ext4_has_feature_ea_inode(sb)) {
ext4_msg(sb, KERN_ERR,
"ea_inode feature is not supported for Hurd");
goto failed_mount;
}
}
if (IS_EXT2_SB(sb)) {
......@@ -4061,10 +4096,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
no_journal:
sbi->s_mb_cache = ext4_xattr_create_cache();
if (!sbi->s_mb_cache) {
ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
goto failed_mount_wq;
if (!test_opt(sb, NO_MBCACHE)) {
sbi->s_ea_block_cache = ext4_xattr_create_cache();
if (!sbi->s_ea_block_cache) {
ext4_msg(sb, KERN_ERR,
"Failed to create ea_block_cache");
goto failed_mount_wq;
}
if (ext4_has_feature_ea_inode(sb)) {
sbi->s_ea_inode_cache = ext4_xattr_create_cache();
if (!sbi->s_ea_inode_cache) {
ext4_msg(sb, KERN_ERR,
"Failed to create ea_inode_cache");
goto failed_mount_wq;
}
}
}
if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
......@@ -4296,9 +4343,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (EXT4_SB(sb)->rsv_conversion_wq)
destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
failed_mount_wq:
if (sbi->s_mb_cache) {
ext4_xattr_destroy_cache(sbi->s_mb_cache);
sbi->s_mb_cache = NULL;
if (sbi->s_ea_inode_cache) {
ext4_xattr_destroy_cache(sbi->s_ea_inode_cache);
sbi->s_ea_inode_cache = NULL;
}
if (sbi->s_ea_block_cache) {
ext4_xattr_destroy_cache(sbi->s_ea_block_cache);
sbi->s_ea_block_cache = NULL;
}
if (sbi->s_journal) {
jbd2_journal_destroy(sbi->s_journal);
......@@ -4957,6 +5008,12 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
}
if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
err = -EINVAL;
goto restore_opts;
}
if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
"dax flag with busy inodes while remounting");
......
......@@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a,
int ret;
ret = kstrtoull(skip_spaces(buf), 0, &val);
if (!ret || val >= clusters)
if (ret || val >= clusters)
return -EINVAL;
atomic64_set(&sbi->s_resv_clusters, val);
......
This diff is collapsed.
......@@ -44,7 +44,7 @@ struct ext4_xattr_entry {
__u8 e_name_len; /* length of name */
__u8 e_name_index; /* attribute name index */
__le16 e_value_offs; /* offset in disk block of value */
__le32 e_value_block; /* disk block attribute is stored on (n/i) */
__le32 e_value_inum; /* inode in which the value is stored */
__le32 e_value_size; /* size of attribute value */
__le32 e_hash; /* hash value of name and value */
char e_name[0]; /* attribute name */
......@@ -69,6 +69,13 @@ struct ext4_xattr_entry {
EXT4_I(inode)->i_extra_isize))
#define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
/*
* The minimum size of EA value when you start storing it in an external inode
* size of block - size of header - size of 1 entry - 4 null bytes
*/
#define EXT4_XATTR_MIN_LARGE_EA_SIZE(b) \
((b) - EXT4_XATTR_LEN(3) - sizeof(struct ext4_xattr_header) - 4)
#define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data))
#define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr))
#define BFIRST(bh) ENTRY(BHDR(bh)+1)
......@@ -77,10 +84,11 @@ struct ext4_xattr_entry {
#define EXT4_ZERO_XATTR_VALUE ((void *)-1)
struct ext4_xattr_info {
int name_index;
const char *name;
const void *value;
size_t value_len;
int name_index;
int in_inode;
};
struct ext4_xattr_search {
......@@ -96,6 +104,11 @@ struct ext4_xattr_ibody_find {
struct ext4_iloc iloc;
};
struct ext4_xattr_inode_array {
unsigned int count; /* # of used items in the array */
struct inode *inodes[0];
};
extern const struct xattr_handler ext4_xattr_user_handler;
extern const struct xattr_handler ext4_xattr_trusted_handler;
extern const struct xattr_handler ext4_xattr_security_handler;
......@@ -139,8 +152,16 @@ extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
extern int ext4_xattr_set_credits(struct inode *inode, size_t value_len,
bool is_create, int *credits);
extern int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
struct buffer_head *block_bh, size_t value_len,
bool is_create);
extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
extern int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
struct ext4_xattr_inode_array **array,
int extra_credits);
extern void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *array);
extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
struct ext4_inode *raw_inode, handle_t *handle);
......@@ -169,3 +190,11 @@ static inline int ext4_init_security(handle_t *handle, struct inode *inode,
return 0;
}
#endif
#ifdef CONFIG_LOCKDEP
extern void ext4_xattr_inode_set_class(struct inode *ea_inode);
#else
static inline void ext4_xattr_inode_set_class(struct inode *ea_inode) { }
#endif
extern int ext4_get_inode_usage(struct inode *inode, qsize_t *usage);
......@@ -10,13 +10,14 @@
/*
* Mbcache is a simple key-value store. Keys need not be unique, however
* key-value pairs are expected to be unique (we use this fact in
* mb_cache_entry_delete_block()).
* mb_cache_entry_delete()).
*
* Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
* They use hash of a block contents as a key and block number as a value.
* That's why keys need not be unique (different xattr blocks may end up having
* the same hash). However block number always uniquely identifies a cache
* entry.
* Ext4 also uses it for deduplication of xattr values stored in inodes.
* They use hash of data as a key and provide a value that may represent a
* block or inode number. That's why keys need not be unique (hash of different
* data may be the same). However user provided value always uniquely
* identifies a cache entry.
*
* We provide functions for creation and removal of entries, search by key,
* and a special "delete entry with given key-value pair" operation. Fixed
......@@ -62,15 +63,15 @@ static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
* @cache - cache where the entry should be created
* @mask - gfp mask with which the entry should be allocated
* @key - key of the entry
* @block - block that contains data
* @reusable - is the block reusable by other inodes?
* @value - value of the entry
* @reusable - is the entry reusable by others?
*
* Creates entry in @cache with key @key and records that data is stored in
* block @block. The function returns -EBUSY if entry with the same key
* and for the same block already exists in cache. Otherwise 0 is returned.
* Creates entry in @cache with key @key and value @value. The function returns
* -EBUSY if entry with the same key and value already exists in cache.
* Otherwise 0 is returned.
*/
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
sector_t block, bool reusable)
u64 value, bool reusable)
{
struct mb_cache_entry *entry, *dup;
struct hlist_bl_node *dup_node;
......@@ -91,12 +92,12 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
/* One ref for hash, one ref returned */
atomic_set(&entry->e_refcnt, 1);
entry->e_key = key;
entry->e_block = block;
entry->e_value = value;
entry->e_reusable = reusable;
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
if (dup->e_key == key && dup->e_block == block) {
if (dup->e_key == key && dup->e_value == value) {
hlist_bl_unlock(head);
kmem_cache_free(mb_entry_cache, entry);
return -EBUSY;
......@@ -187,13 +188,13 @@ struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
EXPORT_SYMBOL(mb_cache_entry_find_next);
/*
* mb_cache_entry_get - get a cache entry by block number (and key)
* mb_cache_entry_get - get a cache entry by value (and key)
* @cache - cache we work with
* @key - key of block number @block
* @block - block number
* @key - key
* @value - value
*/
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
sector_t block)
u64 value)
{
struct hlist_bl_node *node;
struct hlist_bl_head *head;
......@@ -202,7 +203,7 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
if (entry->e_key == key && entry->e_block == block) {
if (entry->e_key == key && entry->e_value == value) {
atomic_inc(&entry->e_refcnt);
goto out;
}
......@@ -214,15 +215,14 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
}
EXPORT_SYMBOL(mb_cache_entry_get);
/* mb_cache_entry_delete_block - remove information about block from cache
/* mb_cache_entry_delete - remove a cache entry
* @cache - cache we work with
* @key - key of block @block
* @block - block number
* @key - key
* @value - value
*
* Remove entry from cache @cache with key @key with data stored in @block.
* Remove entry from cache @cache with key @key and value @value.
*/
void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
sector_t block)
void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
{
struct hlist_bl_node *node;
struct hlist_bl_head *head;
......@@ -231,7 +231,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
head = mb_cache_entry_head(cache, key);
hlist_bl_lock(head);
hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
if (entry->e_key == key && entry->e_block == block) {
if (entry->e_key == key && entry->e_value == value) {
/* We keep hash list reference to keep entry alive */
hlist_bl_del_init(&entry->e_hash_list);
hlist_bl_unlock(head);
......@@ -248,7 +248,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
}
hlist_bl_unlock(head);
}
EXPORT_SYMBOL(mb_cache_entry_delete_block);
EXPORT_SYMBOL(mb_cache_entry_delete);
/* mb_cache_entry_touch - cache entry got used
* @cache - cache the entry belongs to
......
......@@ -1910,6 +1910,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
{
qsize_t space, cur_space;
qsize_t rsv_space = 0;
qsize_t inode_usage = 1;
struct dquot *transfer_from[MAXQUOTAS] = {};
int cnt, ret = 0;
char is_valid[MAXQUOTAS] = {};
......@@ -1919,6 +1920,13 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
if (IS_NOQUOTA(inode))
return 0;
if (inode->i_sb->dq_op->get_inode_usage) {
ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
if (ret)
return ret;
}
/* Initialize the arrays */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
warn_to[cnt].w_type = QUOTA_NL_NOWARN;
......@@ -1946,7 +1954,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
continue;
is_valid[cnt] = 1;
transfer_from[cnt] = i_dquot(inode)[cnt];
ret = check_idq(transfer_to[cnt], 1, &warn_to[cnt]);
ret = check_idq(transfer_to[cnt], inode_usage, &warn_to[cnt]);
if (ret)
goto over_quota;
ret = check_bdq(transfer_to[cnt], space, 0, &warn_to[cnt]);
......@@ -1963,7 +1971,7 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
/* Due to IO error we might not have transfer_from[] structure */
if (transfer_from[cnt]) {
int wtype;
wtype = info_idq_free(transfer_from[cnt], 1);
wtype = info_idq_free(transfer_from[cnt], inode_usage);
if (wtype != QUOTA_NL_NOWARN)
prepare_warning(&warn_from_inodes[cnt],
transfer_from[cnt], wtype);
......@@ -1971,13 +1979,13 @@ int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
if (wtype != QUOTA_NL_NOWARN)
prepare_warning(&warn_from_space[cnt],
transfer_from[cnt], wtype);
dquot_decr_inodes(transfer_from[cnt], 1);
dquot_decr_inodes(transfer_from[cnt], inode_usage);
dquot_decr_space(transfer_from[cnt], cur_space);
dquot_free_reserved_space(transfer_from[cnt],
rsv_space);
}
dquot_incr_inodes(transfer_to[cnt], 1);
dquot_incr_inodes(transfer_to[cnt], inode_usage);
dquot_incr_space(transfer_to[cnt], cur_space);
dquot_resv_space(transfer_to[cnt], rsv_space);
......
......@@ -83,6 +83,9 @@ struct fscrypt_operations {
unsigned (*max_namelen)(struct inode *);
};
/* Maximum value for the third parameter of fscrypt_operations.set_context(). */
#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
if (inode->i_sb->s_cop->dummy_context &&
......
......@@ -19,15 +19,15 @@ struct mb_cache_entry {
u32 e_key;
u32 e_referenced:1;
u32 e_reusable:1;
/* Block number of hashed block - stable during lifetime of the entry */
sector_t e_block;
/* User provided value - stable during lifetime of the entry */
u64 e_value;
};
struct mb_cache *mb_cache_create(int bucket_bits);
void mb_cache_destroy(struct mb_cache *cache);
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
sector_t block, bool reusable);
u64 value, bool reusable);
void __mb_cache_entry_free(struct mb_cache_entry *entry);
static inline int mb_cache_entry_put(struct mb_cache *cache,
struct mb_cache_entry *entry)
......@@ -38,10 +38,9 @@ static inline int mb_cache_entry_put(struct mb_cache *cache,
return 1;
}
void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
sector_t block);
void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
sector_t block);
u64 value);
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
u32 key);
struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
......
......@@ -332,6 +332,8 @@ struct dquot_operations {
* quota code only */
qsize_t *(*get_reserved_space) (struct inode *);
int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
/* Get number of inodes that were charged for a given inode */
int (*get_inode_usage) (struct inode *, qsize_t *);
/* Get next ID with active quota structure */
int (*get_next_id) (struct super_block *sb, struct kqid *qid);
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment