Commit 34d52cb6 authored by Li Zefan's avatar Li Zefan

Btrfs: Make free space cache code generic

So we can re-use the code to cache free inode numbers.

The change is quite straightforward. Two new structures are introduced.

- struct btrfs_free_space_ctl

  We move those variables that are used for caching free space from
  struct btrfs_block_group_cache to this new struct.

- struct btrfs_free_space_op

  We do block group specific work (e.g. calculation of extents threshold)
  through functions registered in this struct.

And then we can remove references to struct btrfs_block_group_cache.
Signed-off-by: default avatarLi Zefan <lizf@cn.fujitsu.com>
parent f38b6e75
...@@ -830,9 +830,6 @@ struct btrfs_block_group_cache { ...@@ -830,9 +830,6 @@ struct btrfs_block_group_cache {
u64 bytes_super; u64 bytes_super;
u64 flags; u64 flags;
u64 sectorsize; u64 sectorsize;
int extents_thresh;
int free_extents;
int total_bitmaps;
unsigned int ro:1; unsigned int ro:1;
unsigned int dirty:1; unsigned int dirty:1;
unsigned int iref:1; unsigned int iref:1;
...@@ -847,9 +844,7 @@ struct btrfs_block_group_cache { ...@@ -847,9 +844,7 @@ struct btrfs_block_group_cache {
struct btrfs_space_info *space_info; struct btrfs_space_info *space_info;
/* free space cache stuff */ /* free space cache stuff */
spinlock_t tree_lock; struct btrfs_free_space_ctl *free_space_ctl;
struct rb_root free_space_offset;
u64 free_space;
/* block group cache stuff */ /* block group cache stuff */
struct rb_node cache_node; struct rb_node cache_node;
......
...@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) ...@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
WARN_ON(cache->pinned > 0); WARN_ON(cache->pinned > 0);
WARN_ON(cache->reserved > 0); WARN_ON(cache->reserved > 0);
WARN_ON(cache->reserved_pinned > 0); WARN_ON(cache->reserved_pinned > 0);
kfree(cache->free_space_ctl);
kfree(cache); kfree(cache);
} }
} }
...@@ -4893,7 +4894,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, ...@@ -4893,7 +4894,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
return 0; return 0;
wait_event(caching_ctl->wait, block_group_cache_done(cache) || wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
(cache->free_space >= num_bytes)); (cache->free_space_ctl->free_space >= num_bytes));
put_caching_control(caching_ctl); put_caching_control(caching_ctl);
return 0; return 0;
...@@ -8551,10 +8552,16 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -8551,10 +8552,16 @@ int btrfs_read_block_groups(struct btrfs_root *root)
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
GFP_NOFS);
if (!cache->free_space_ctl) {
kfree(cache);
ret = -ENOMEM;
goto error;
}
atomic_set(&cache->count, 1); atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock); spin_lock_init(&cache->lock);
spin_lock_init(&cache->tree_lock);
cache->fs_info = info; cache->fs_info = info;
INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list); INIT_LIST_HEAD(&cache->cluster_list);
...@@ -8562,14 +8569,6 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -8562,14 +8569,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
if (need_clear) if (need_clear)
cache->disk_cache_state = BTRFS_DC_CLEAR; cache->disk_cache_state = BTRFS_DC_CLEAR;
/*
* we only want to have 32k of ram per block group for keeping
* track of free space, and if we pass 1/2 of that we want to
* start converting things over to using bitmaps
*/
cache->extents_thresh = ((1024 * 32) / 2) /
sizeof(struct btrfs_free_space);
read_extent_buffer(leaf, &cache->item, read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]), btrfs_item_ptr_offset(leaf, path->slots[0]),
sizeof(cache->item)); sizeof(cache->item));
...@@ -8580,6 +8579,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) ...@@ -8580,6 +8579,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
cache->flags = btrfs_block_group_flags(&cache->item); cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize; cache->sectorsize = root->sectorsize;
btrfs_init_free_space_ctl(cache);
/* /*
* We need to exclude the super stripes now so that the space * We need to exclude the super stripes now so that the space
* info has super bytes accounted for, otherwise we'll think * info has super bytes accounted for, otherwise we'll think
...@@ -8666,6 +8667,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, ...@@ -8666,6 +8667,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache = kzalloc(sizeof(*cache), GFP_NOFS); cache = kzalloc(sizeof(*cache), GFP_NOFS);
if (!cache) if (!cache)
return -ENOMEM; return -ENOMEM;
cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
GFP_NOFS);
if (!cache->free_space_ctl) {
kfree(cache);
return -ENOMEM;
}
cache->key.objectid = chunk_offset; cache->key.objectid = chunk_offset;
cache->key.offset = size; cache->key.offset = size;
...@@ -8673,19 +8680,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, ...@@ -8673,19 +8680,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->sectorsize = root->sectorsize; cache->sectorsize = root->sectorsize;
cache->fs_info = root->fs_info; cache->fs_info = root->fs_info;
/*
* we only want to have 32k of ram per block group for keeping track
* of free space, and if we pass 1/2 of that we want to start
* converting things over to using bitmaps
*/
cache->extents_thresh = ((1024 * 32) / 2) /
sizeof(struct btrfs_free_space);
atomic_set(&cache->count, 1); atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock); spin_lock_init(&cache->lock);
spin_lock_init(&cache->tree_lock);
INIT_LIST_HEAD(&cache->list); INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list); INIT_LIST_HEAD(&cache->cluster_list);
btrfs_init_free_space_ctl(cache);
btrfs_set_block_group_used(&cache->item, bytes_used); btrfs_set_block_group_used(&cache->item, bytes_used);
btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
cache->flags = type; cache->flags = type;
......
This diff is collapsed.
...@@ -27,6 +27,25 @@ struct btrfs_free_space { ...@@ -27,6 +27,25 @@ struct btrfs_free_space {
struct list_head list; struct list_head list;
}; };
struct btrfs_free_space_ctl {
spinlock_t tree_lock;
struct rb_root free_space_offset;
u64 free_space;
int extents_thresh;
int free_extents;
int total_bitmaps;
int unit;
u64 start;
struct btrfs_free_space_op *op;
void *private;
};
struct btrfs_free_space_op {
void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl);
bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
};
struct inode *lookup_free_space_inode(struct btrfs_root *root, struct inode *lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_block_group_cache struct btrfs_block_group_cache
*block_group, struct btrfs_path *path); *block_group, struct btrfs_path *path);
...@@ -45,6 +64,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, ...@@ -45,6 +64,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group_cache *block_group,
struct btrfs_path *path); struct btrfs_path *path);
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 bytenr, u64 size); u64 bytenr, u64 size);
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment