Commit cbc0e928 authored by Filipe Manana's avatar Filipe Manana Committed by Josef Bacik

Btrfs: remove unneeded field / smaller extent_map structure

We don't need to have an unsigned int field in the extent_map struct
to tell us whether the extent map is in the inode's extent_map tree or
not. We can use the rb_node struct field and the RB_CLEAR_NODE and
RB_EMPTY_NODE macros to achieve the same task.

This reduces sizeof(struct extent_map) from 152 bytes to 144 bytes (on a
64 bits system).
Signed-off-by: default avatarFilipe David Borba Manana <fdmanana@gmail.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.cz>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
parent e84752d4
...@@ -2763,7 +2763,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, ...@@ -2763,7 +2763,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
if (em_cached && *em_cached) { if (em_cached && *em_cached) {
em = *em_cached; em = *em_cached;
if (em->in_tree && start >= em->start && if (extent_map_in_tree(em) && start >= em->start &&
start < extent_map_end(em)) { start < extent_map_end(em)) {
atomic_inc(&em->refs); atomic_inc(&em->refs);
return em; return em;
......
...@@ -51,7 +51,7 @@ struct extent_map *alloc_extent_map(void) ...@@ -51,7 +51,7 @@ struct extent_map *alloc_extent_map(void)
em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS);
if (!em) if (!em)
return NULL; return NULL;
em->in_tree = 0; RB_CLEAR_NODE(&em->rb_node);
em->flags = 0; em->flags = 0;
em->compress_type = BTRFS_COMPRESS_NONE; em->compress_type = BTRFS_COMPRESS_NONE;
em->generation = 0; em->generation = 0;
...@@ -73,7 +73,7 @@ void free_extent_map(struct extent_map *em) ...@@ -73,7 +73,7 @@ void free_extent_map(struct extent_map *em)
return; return;
WARN_ON(atomic_read(&em->refs) == 0); WARN_ON(atomic_read(&em->refs) == 0);
if (atomic_dec_and_test(&em->refs)) { if (atomic_dec_and_test(&em->refs)) {
WARN_ON(em->in_tree); WARN_ON(extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list)); WARN_ON(!list_empty(&em->list));
kmem_cache_free(extent_map_cache, em); kmem_cache_free(extent_map_cache, em);
} }
...@@ -99,8 +99,6 @@ static int tree_insert(struct rb_root *root, struct extent_map *em) ...@@ -99,8 +99,6 @@ static int tree_insert(struct rb_root *root, struct extent_map *em)
parent = *p; parent = *p;
entry = rb_entry(parent, struct extent_map, rb_node); entry = rb_entry(parent, struct extent_map, rb_node);
WARN_ON(!entry->in_tree);
if (em->start < entry->start) if (em->start < entry->start)
p = &(*p)->rb_left; p = &(*p)->rb_left;
else if (em->start >= extent_map_end(entry)) else if (em->start >= extent_map_end(entry))
...@@ -128,7 +126,6 @@ static int tree_insert(struct rb_root *root, struct extent_map *em) ...@@ -128,7 +126,6 @@ static int tree_insert(struct rb_root *root, struct extent_map *em)
if (end > entry->start && em->start < extent_map_end(entry)) if (end > entry->start && em->start < extent_map_end(entry))
return -EEXIST; return -EEXIST;
em->in_tree = 1;
rb_link_node(&em->rb_node, orig_parent, p); rb_link_node(&em->rb_node, orig_parent, p);
rb_insert_color(&em->rb_node, root); rb_insert_color(&em->rb_node, root);
return 0; return 0;
...@@ -153,8 +150,6 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, ...@@ -153,8 +150,6 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
prev = n; prev = n;
prev_entry = entry; prev_entry = entry;
WARN_ON(!entry->in_tree);
if (offset < entry->start) if (offset < entry->start)
n = n->rb_left; n = n->rb_left;
else if (offset >= extent_map_end(entry)) else if (offset >= extent_map_end(entry))
...@@ -240,12 +235,12 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) ...@@ -240,12 +235,12 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->len += merge->len; em->len += merge->len;
em->block_len += merge->block_len; em->block_len += merge->block_len;
em->block_start = merge->block_start; em->block_start = merge->block_start;
merge->in_tree = 0;
em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start; em->mod_len = (em->mod_len + em->mod_start) - merge->mod_start;
em->mod_start = merge->mod_start; em->mod_start = merge->mod_start;
em->generation = max(em->generation, merge->generation); em->generation = max(em->generation, merge->generation);
rb_erase(&merge->rb_node, &tree->map); rb_erase(&merge->rb_node, &tree->map);
RB_CLEAR_NODE(&merge->rb_node);
free_extent_map(merge); free_extent_map(merge);
} }
} }
...@@ -257,7 +252,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) ...@@ -257,7 +252,7 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->len += merge->len; em->len += merge->len;
em->block_len += merge->block_len; em->block_len += merge->block_len;
rb_erase(&merge->rb_node, &tree->map); rb_erase(&merge->rb_node, &tree->map);
merge->in_tree = 0; RB_CLEAR_NODE(&merge->rb_node);
em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start; em->mod_len = (merge->mod_start + merge->mod_len) - em->mod_start;
em->generation = max(em->generation, merge->generation); em->generation = max(em->generation, merge->generation);
free_extent_map(merge); free_extent_map(merge);
...@@ -319,7 +314,7 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, ...@@ -319,7 +314,7 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em) void clear_em_logging(struct extent_map_tree *tree, struct extent_map *em)
{ {
clear_bit(EXTENT_FLAG_LOGGING, &em->flags); clear_bit(EXTENT_FLAG_LOGGING, &em->flags);
if (em->in_tree) if (extent_map_in_tree(em))
try_merge_map(tree, em); try_merge_map(tree, em);
} }
...@@ -434,6 +429,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) ...@@ -434,6 +429,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
rb_erase(&em->rb_node, &tree->map); rb_erase(&em->rb_node, &tree->map);
if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags)) if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
list_del_init(&em->list); list_del_init(&em->list);
em->in_tree = 0; RB_CLEAR_NODE(&em->rb_node);
return ret; return ret;
} }
...@@ -33,7 +33,6 @@ struct extent_map { ...@@ -33,7 +33,6 @@ struct extent_map {
unsigned long flags; unsigned long flags;
struct block_device *bdev; struct block_device *bdev;
atomic_t refs; atomic_t refs;
unsigned int in_tree;
unsigned int compress_type; unsigned int compress_type;
struct list_head list; struct list_head list;
}; };
...@@ -44,6 +43,11 @@ struct extent_map_tree { ...@@ -44,6 +43,11 @@ struct extent_map_tree {
rwlock_t lock; rwlock_t lock;
}; };
static inline int extent_map_in_tree(const struct extent_map *em)
{
return !RB_EMPTY_NODE(&em->rb_node);
}
static inline u64 extent_map_end(struct extent_map *em) static inline u64 extent_map_end(struct extent_map *em)
{ {
if (em->start + em->len < em->start) if (em->start + em->len < em->start)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment