Commit c207adc1 authored by David Sterba's avatar David Sterba

btrfs: uninline some static inline helpers from tree-log.h

The helpers are doing an initialization or release work, none of which
is performance critical that it would require a static inline, so move
them to the .c file.
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2be1f2bf
......@@ -2818,6 +2818,52 @@ static void wait_for_writer(struct btrfs_root *root)
finish_wait(&root->log_writer_wait, &wait);
}
void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct inode *inode)
{
ctx->log_ret = 0;
ctx->log_transid = 0;
ctx->log_new_dentries = false;
ctx->logging_new_name = false;
ctx->logging_new_delayed_dentries = false;
ctx->logged_before = false;
ctx->inode = inode;
INIT_LIST_HEAD(&ctx->list);
INIT_LIST_HEAD(&ctx->ordered_extents);
INIT_LIST_HEAD(&ctx->conflict_inodes);
ctx->num_conflict_inodes = 0;
ctx->logging_conflict_inodes = false;
ctx->scratch_eb = NULL;
}
void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx)
{
struct btrfs_inode *inode = BTRFS_I(ctx->inode);
if (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
!test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
return;
/*
* Don't care about allocation failure. This is just for optimization,
* if we fail to allocate here, we will try again later if needed.
*/
ctx->scratch_eb = alloc_dummy_extent_buffer(inode->root->fs_info, 0);
}
void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx)
{
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_extent *tmp;
ASSERT(inode_is_locked(ctx->inode));
list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
list_del_init(&ordered->log_list);
btrfs_put_ordered_extent(ordered);
}
}
static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
......
......@@ -55,51 +55,9 @@ struct btrfs_log_ctx {
struct extent_buffer *scratch_eb;
};
static inline void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx,
struct inode *inode)
{
ctx->log_ret = 0;
ctx->log_transid = 0;
ctx->log_new_dentries = false;
ctx->logging_new_name = false;
ctx->logging_new_delayed_dentries = false;
ctx->logged_before = false;
ctx->inode = inode;
INIT_LIST_HEAD(&ctx->list);
INIT_LIST_HEAD(&ctx->ordered_extents);
INIT_LIST_HEAD(&ctx->conflict_inodes);
ctx->num_conflict_inodes = 0;
ctx->logging_conflict_inodes = false;
ctx->scratch_eb = NULL;
}
static inline void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx)
{
struct btrfs_inode *inode = BTRFS_I(ctx->inode);
if (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
!test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags))
return;
/*
* Don't care about allocation failure. This is just for optimization,
* if we fail to allocate here, we will try again later if needed.
*/
ctx->scratch_eb = alloc_dummy_extent_buffer(inode->root->fs_info, 0);
}
static inline void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx)
{
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_extent *tmp;
ASSERT(inode_is_locked(ctx->inode));
list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) {
list_del_init(&ordered->log_list);
btrfs_put_ordered_extent(ordered);
}
}
void btrfs_init_log_ctx(struct btrfs_log_ctx *ctx, struct inode *inode);
void btrfs_init_log_ctx_scratch_eb(struct btrfs_log_ctx *ctx);
void btrfs_release_log_ctx_extents(struct btrfs_log_ctx *ctx);
static inline void btrfs_set_log_full_commit(struct btrfs_trans_handle *trans)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment