Commit 11e3235b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.10-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs updates from David Sterba:
 "Mostly core updates with a few user visible bits and fixes.

  Hilights:

   - fsync performance improvements
      - less contention of log mutex (throughput +4%, latency -14%,
        dbench with 32 clients)
      - skip unnecessary commits for link and rename (throughput +6%,
        latency -30%, rename latency -75%, dbench with 16 clients)
      - make fast fsync wait only for writeback (throughput +10..40%,
        runtime -1..-20%, dbench with 1 to 64 clients on various
        file/block sizes)

   - direct io is now implemented using the iomap infrastructure, that's
     the main part, we still have a workaround that requires an iomap
     API update, coming in 5.10

   - new sysfs exports:
      - information about the exclusive filesystem operation status
        (balance, device add/remove/replace, ...)
      - supported send stream version

  Core:

   - use ticket space reservations for data, fair policy using the same
     infrastructure as metadata

   - preparatory work to switch locking from our custom tree locks to
     standard rwsem, now the locking context is propagated to all
     callers, actual switch is expected to happen in the next dev cycle

   - seed device structures are now using list API

   - extent tracepoints print proper tree id

   - unified range checks for extent buffer helpers

   - send: avoid using temporary buffer for copying data

   - remove unnecessary RCU protection from space infos

   - remove unused readpage callback for metadata, enabling several
     cleanups

   - replace indirect function calls for end io hooks and remove
     extent_io_ops completely

  Fixes:

   - more lockdep warning fixes

   - fix qgroup reservation for delayed inode and an occasional
     reservation leak for preallocated files

   - fix device replace of a seed device

   - fix metadata reservation for fallocate that leads to transaction
     aborts

   - reschedule if necessary when logging directory items or when
     cloning lots of extents

   - tree-checker: fix false alert caused by legacy btrfs root item

   - send: fix rename/link conflicts for orphanized inodes

   - properly initialize device stats for seed devices

   - skip devices without magic signature when mounting

  Other:

   - error handling improvements, BUG_ONs replaced by proper handling,
     fuzz fixes

   - various function parameter cleanups

   - various W=1 cleanups

   - error/info messages improved

  Mishaps:

   - commit 62cf5391 ("btrfs: move btrfs_rm_dev_replace_free_srcdev
     outside of all locks") is a rebase leftover after the patch got
     merged to 5.9-rc8 as a466c85e ("btrfs: move
     btrfs_rm_dev_replace_free_srcdev outside of all locks"), the
     remaining part is trivial and the patch is in the middle of the
     series so I'm keeping it there instead of rebasing"

* tag 'for-5.10-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (161 commits)
  btrfs: rename BTRFS_INODE_ORDERED_DATA_CLOSE flag
  btrfs: annotate device name rcu_string with __rcu
  btrfs: skip devices without magic signature when mounting
  btrfs: cleanup cow block on error
  btrfs: remove BTRFS_INODE_READDIO_NEED_LOCK
  fs: remove no longer used dio_end_io()
  btrfs: return error if we're unable to read device stats
  btrfs: init device stats for seed devices
  btrfs: remove struct extent_io_ops
  btrfs: call submit_bio_hook directly for metadata pages
  btrfs: stop calling submit_bio_hook for data inodes
  btrfs: don't opencode is_data_inode in end_bio_extent_readpage
  btrfs: call submit_bio_hook directly in submit_one_bio
  btrfs: remove extent_io_ops::readpage_end_io_hook
  btrfs: replace readpage_end_io_hook with direct calls
  btrfs: send, recompute reference path after orphanization of a directory
  btrfs: send, orphanize first all conflicting inodes when processing references
  btrfs: tree-checker: fix false alert caused by legacy btrfs root item
  btrfs: use unaligned helpers for stack and header set/get helpers
  btrfs: free-space-cache: use unaligned helpers to access data
  ...
parents c024a811 1fd4033d
...@@ -14,6 +14,7 @@ config BTRFS_FS ...@@ -14,6 +14,7 @@ config BTRFS_FS
select LZO_DECOMPRESS select LZO_DECOMPRESS
select ZSTD_COMPRESS select ZSTD_COMPRESS
select ZSTD_DECOMPRESS select ZSTD_DECOMPRESS
select FS_IOMAP
select RAID6_PQ select RAID6_PQ
select XOR_BLOCKS select XOR_BLOCKS
select SRCU select SRCU
......
...@@ -2997,7 +2997,6 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache, ...@@ -2997,7 +2997,6 @@ int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
while (!list_empty(&pending_edge)) { while (!list_empty(&pending_edge)) {
struct btrfs_backref_node *upper; struct btrfs_backref_node *upper;
struct btrfs_backref_node *lower; struct btrfs_backref_node *lower;
struct rb_node *rb_node;
edge = list_first_entry(&pending_edge, edge = list_first_entry(&pending_edge,
struct btrfs_backref_edge, list[UPPER]); struct btrfs_backref_edge, list[UPPER]);
......
...@@ -1766,16 +1766,10 @@ static void link_block_group(struct btrfs_block_group *cache) ...@@ -1766,16 +1766,10 @@ static void link_block_group(struct btrfs_block_group *cache)
{ {
struct btrfs_space_info *space_info = cache->space_info; struct btrfs_space_info *space_info = cache->space_info;
int index = btrfs_bg_flags_to_raid_index(cache->flags); int index = btrfs_bg_flags_to_raid_index(cache->flags);
bool first = false;
down_write(&space_info->groups_sem); down_write(&space_info->groups_sem);
if (list_empty(&space_info->block_groups[index]))
first = true;
list_add_tail(&cache->list, &space_info->block_groups[index]); list_add_tail(&cache->list, &space_info->block_groups[index]);
up_write(&space_info->groups_sem); up_write(&space_info->groups_sem);
if (first)
btrfs_sysfs_add_block_group_type(cache);
} }
static struct btrfs_block_group *btrfs_create_block_group_cache( static struct btrfs_block_group *btrfs_create_block_group_cache(
...@@ -1873,7 +1867,7 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info) ...@@ -1873,7 +1867,7 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
return ret; return ret;
} }
static int read_block_group_item(struct btrfs_block_group *cache, static void read_block_group_item(struct btrfs_block_group *cache,
struct btrfs_path *path, struct btrfs_path *path,
const struct btrfs_key *key) const struct btrfs_key *key)
{ {
...@@ -1887,8 +1881,6 @@ static int read_block_group_item(struct btrfs_block_group *cache, ...@@ -1887,8 +1881,6 @@ static int read_block_group_item(struct btrfs_block_group *cache,
sizeof(bgi)); sizeof(bgi));
cache->used = btrfs_stack_block_group_used(&bgi); cache->used = btrfs_stack_block_group_used(&bgi);
cache->flags = btrfs_stack_block_group_flags(&bgi); cache->flags = btrfs_stack_block_group_flags(&bgi);
return 0;
} }
static int read_one_block_group(struct btrfs_fs_info *info, static int read_one_block_group(struct btrfs_fs_info *info,
...@@ -1907,9 +1899,7 @@ static int read_one_block_group(struct btrfs_fs_info *info, ...@@ -1907,9 +1899,7 @@ static int read_one_block_group(struct btrfs_fs_info *info,
if (!cache) if (!cache)
return -ENOMEM; return -ENOMEM;
ret = read_block_group_item(cache, path, key); read_block_group_item(cache, path, key);
if (ret < 0)
goto error;
set_free_space_tree_thresholds(cache); set_free_space_tree_thresholds(cache);
...@@ -2035,8 +2025,18 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -2035,8 +2025,18 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
btrfs_release_path(path); btrfs_release_path(path);
} }
rcu_read_lock(); list_for_each_entry(space_info, &info->space_info, list) {
list_for_each_entry_rcu(space_info, &info->space_info, list) { int i;
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
if (list_empty(&space_info->block_groups[i]))
continue;
cache = list_first_entry(&space_info->block_groups[i],
struct btrfs_block_group,
list);
btrfs_sysfs_add_block_group_type(cache);
}
if (!(btrfs_get_alloc_profile(info, space_info->flags) & if (!(btrfs_get_alloc_profile(info, space_info->flags) &
(BTRFS_BLOCK_GROUP_RAID10 | (BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID1_MASK |
...@@ -2056,7 +2056,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) ...@@ -2056,7 +2056,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
list) list)
inc_block_group_ro(cache, 1); inc_block_group_ro(cache, 1);
} }
rcu_read_unlock();
btrfs_init_global_block_rsv(info); btrfs_init_global_block_rsv(info);
ret = check_chunk_block_group_mappings(info); ret = check_chunk_block_group_mappings(info);
...@@ -2097,12 +2096,16 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) ...@@ -2097,12 +2096,16 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
return; return;
while (!list_empty(&trans->new_bgs)) { while (!list_empty(&trans->new_bgs)) {
int index;
block_group = list_first_entry(&trans->new_bgs, block_group = list_first_entry(&trans->new_bgs,
struct btrfs_block_group, struct btrfs_block_group,
bg_list); bg_list);
if (ret) if (ret)
goto next; goto next;
index = btrfs_bg_flags_to_raid_index(block_group->flags);
ret = insert_block_group_item(trans, block_group); ret = insert_block_group_item(trans, block_group);
if (ret) if (ret)
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
...@@ -2111,6 +2114,16 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans) ...@@ -2111,6 +2114,16 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
if (ret) if (ret)
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
add_block_group_free_space(trans, block_group); add_block_group_free_space(trans, block_group);
/*
* If we restriped during balance, we may have added a new raid
* type, so now add the sysfs entries when it is safe to do so.
* We don't have to worry about locking here as it's handled in
* btrfs_sysfs_add_block_group_type.
*/
if (block_group->space_info->block_group_kobjs[index] == NULL)
btrfs_sysfs_add_block_group_type(block_group);
/* Already aborted the transaction if it failed. */ /* Already aborted the transaction if it failed. */
next: next:
btrfs_delayed_refs_rsv_release(fs_info, 1); btrfs_delayed_refs_rsv_release(fs_info, 1);
...@@ -2785,7 +2798,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans) ...@@ -2785,7 +2798,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
* finished yet (no block group item in the extent tree * finished yet (no block group item in the extent tree
* yet, etc). If this is the case, wait for all free * yet, etc). If this is the case, wait for all free
* space endio workers to finish and retry. This is a * space endio workers to finish and retry. This is a
* a very rare case so no need for a more efficient and * very rare case so no need for a more efficient and
* complex approach. * complex approach.
*/ */
if (ret == -ENOENT) { if (ret == -ENOENT) {
...@@ -2961,6 +2974,13 @@ int btrfs_add_reserved_bytes(struct btrfs_block_group *cache, ...@@ -2961,6 +2974,13 @@ int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
space_info, -ram_bytes); space_info, -ram_bytes);
if (delalloc) if (delalloc)
cache->delalloc_bytes += num_bytes; cache->delalloc_bytes += num_bytes;
/*
* Compression can use less space than we reserved, so wake
* tickets if that happens
*/
if (num_bytes < ram_bytes)
btrfs_try_granting_tickets(cache->fs_info, space_info);
} }
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);
...@@ -2994,6 +3014,8 @@ void btrfs_free_reserved_bytes(struct btrfs_block_group *cache, ...@@ -2994,6 +3014,8 @@ void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
if (delalloc) if (delalloc)
cache->delalloc_bytes -= num_bytes; cache->delalloc_bytes -= num_bytes;
spin_unlock(&cache->lock); spin_unlock(&cache->lock);
btrfs_try_granting_tickets(cache->fs_info, space_info);
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);
} }
...@@ -3002,12 +3024,10 @@ static void force_metadata_allocation(struct btrfs_fs_info *info) ...@@ -3002,12 +3024,10 @@ static void force_metadata_allocation(struct btrfs_fs_info *info)
struct list_head *head = &info->space_info; struct list_head *head = &info->space_info;
struct btrfs_space_info *found; struct btrfs_space_info *found;
rcu_read_lock(); list_for_each_entry(found, head, list) {
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_METADATA) if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
found->force_alloc = CHUNK_ALLOC_FORCE; found->force_alloc = CHUNK_ALLOC_FORCE;
} }
rcu_read_unlock();
} }
static int should_alloc_chunk(struct btrfs_fs_info *fs_info, static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
...@@ -3338,14 +3358,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) ...@@ -3338,14 +3358,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
} }
spin_unlock(&info->block_group_cache_lock); spin_unlock(&info->block_group_cache_lock);
/*
* Now that all the block groups are freed, go through and free all the
* space_info structs. This is only called during the final stages of
* unmount, and so we know nobody is using them. We call
* synchronize_rcu() once before we start, just to be on the safe side.
*/
synchronize_rcu();
btrfs_release_global_block_rsv(info); btrfs_release_global_block_rsv(info);
while (!list_empty(&info->space_info)) { while (!list_empty(&info->space_info)) {
......
...@@ -21,14 +21,18 @@ ...@@ -21,14 +21,18 @@
* new data the application may have written before commit. * new data the application may have written before commit.
*/ */
enum { enum {
BTRFS_INODE_ORDERED_DATA_CLOSE, BTRFS_INODE_FLUSH_ON_CLOSE,
BTRFS_INODE_DUMMY, BTRFS_INODE_DUMMY,
BTRFS_INODE_IN_DEFRAG, BTRFS_INODE_IN_DEFRAG,
BTRFS_INODE_HAS_ASYNC_EXTENT, BTRFS_INODE_HAS_ASYNC_EXTENT,
/*
* Always set under the VFS' inode lock, otherwise it can cause races
* during fsync (we start as a fast fsync and then end up in a full
* fsync racing with ordered extent completion).
*/
BTRFS_INODE_NEEDS_FULL_SYNC, BTRFS_INODE_NEEDS_FULL_SYNC,
BTRFS_INODE_COPY_EVERYTHING, BTRFS_INODE_COPY_EVERYTHING,
BTRFS_INODE_IN_DELALLOC_LIST, BTRFS_INODE_IN_DELALLOC_LIST,
BTRFS_INODE_READDIO_NEED_LOCK,
BTRFS_INODE_HAS_PROPS, BTRFS_INODE_HAS_PROPS,
BTRFS_INODE_SNAPSHOT_FLUSH, BTRFS_INODE_SNAPSHOT_FLUSH,
}; };
...@@ -212,6 +216,11 @@ struct btrfs_inode { ...@@ -212,6 +216,11 @@ struct btrfs_inode {
struct inode vfs_inode; struct inode vfs_inode;
}; };
static inline u32 btrfs_inode_sectorsize(const struct btrfs_inode *inode)
{
return inode->root->fs_info->sectorsize;
}
static inline struct btrfs_inode *BTRFS_I(const struct inode *inode) static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
{ {
return container_of(inode, struct btrfs_inode, vfs_inode); return container_of(inode, struct btrfs_inode, vfs_inode);
...@@ -324,23 +333,6 @@ struct btrfs_dio_private { ...@@ -324,23 +333,6 @@ struct btrfs_dio_private {
u8 csums[]; u8 csums[];
}; };
/*
* Disable DIO read nolock optimization, so new dio readers will be forced
* to grab i_mutex. It is used to avoid the endless truncate due to
* nonlocked dio read.
*/
static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode)
{
set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
smp_mb();
}
static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode)
{
smp_mb__before_atomic();
clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
}
/* Array of bytes with variable length, hexadecimal format 0x1234 */ /* Array of bytes with variable length, hexadecimal format 0x1234 */
#define CSUM_FMT "0x%*phN" #define CSUM_FMT "0x%*phN"
#define CSUM_FMT_VALUE(size, bytes) size, bytes #define CSUM_FMT_VALUE(size, bytes) size, bytes
......
...@@ -29,41 +29,6 @@ ...@@ -29,41 +29,6 @@
#include "extent_io.h" #include "extent_io.h"
#include "extent_map.h" #include "extent_map.h"
int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, unsigned char *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
size_t destlen);
struct list_head *zlib_alloc_workspace(unsigned int level);
void zlib_free_workspace(struct list_head *ws);
struct list_head *zlib_get_workspace(unsigned int level);
int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, unsigned char *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
size_t destlen);
struct list_head *lzo_alloc_workspace(unsigned int level);
void lzo_free_workspace(struct list_head *ws);
int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
unsigned long *total_in, unsigned long *total_out);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress(struct list_head *ws, unsigned char *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
size_t destlen);
void zstd_init_workspace_manager(void);
void zstd_cleanup_workspace_manager(void);
struct list_head *zstd_alloc_workspace(unsigned int level);
void zstd_free_workspace(struct list_head *ws);
struct list_head *zstd_get_workspace(unsigned int level);
void zstd_put_workspace(struct list_head *ws);
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
const char* btrfs_compress_type2str(enum btrfs_compression_type type) const char* btrfs_compress_type2str(enum btrfs_compression_type type)
......
...@@ -144,4 +144,39 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len); ...@@ -144,4 +144,39 @@ bool btrfs_compress_is_valid_type(const char *str, size_t len);
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end); int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
unsigned long *total_in, unsigned long *total_out);
int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zlib_decompress(struct list_head *ws, unsigned char *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
size_t destlen);
struct list_head *zlib_alloc_workspace(unsigned int level);
void zlib_free_workspace(struct list_head *ws);
struct list_head *zlib_get_workspace(unsigned int level);
int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
unsigned long *total_in, unsigned long *total_out);
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int lzo_decompress(struct list_head *ws, unsigned char *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
size_t destlen);
struct list_head *lzo_alloc_workspace(unsigned int level);
void lzo_free_workspace(struct list_head *ws);
int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
u64 start, struct page **pages, unsigned long *out_pages,
unsigned long *total_in, unsigned long *total_out);
int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
int zstd_decompress(struct list_head *ws, unsigned char *data_in,
struct page *dest_page, unsigned long start_byte, size_t srclen,
size_t destlen);
void zstd_init_workspace_manager(void);
void zstd_cleanup_workspace_manager(void);
struct list_head *zstd_alloc_workspace(unsigned int level);
void zstd_free_workspace(struct list_head *ws);
struct list_head *zstd_get_workspace(unsigned int level);
void zstd_put_workspace(struct list_head *ws);
#endif #endif
This diff is collapsed.
This diff is collapsed.
...@@ -115,126 +115,15 @@ int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes) ...@@ -115,126 +115,15 @@ int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes)
{ {
struct btrfs_root *root = inode->root; struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_space_info *data_sinfo = fs_info->data_sinfo; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_DATA;
u64 used;
int ret = 0;
int need_commit = 2;
int have_pinned_space;
/* Make sure bytes are sectorsize aligned */ /* Make sure bytes are sectorsize aligned */
bytes = ALIGN(bytes, fs_info->sectorsize); bytes = ALIGN(bytes, fs_info->sectorsize);
if (btrfs_is_free_space_inode(inode)) { if (btrfs_is_free_space_inode(inode))
need_commit = 0; flush = BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE;
ASSERT(current->journal_info);
}
again:
/* Make sure we have enough space to handle the data first */
spin_lock(&data_sinfo->lock);
used = btrfs_space_info_used(data_sinfo, true);
if (used + bytes > data_sinfo->total_bytes) {
struct btrfs_trans_handle *trans;
/*
* If we don't have enough free bytes in this space then we need
* to alloc a new chunk.
*/
if (!data_sinfo->full) {
u64 alloc_target;
data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
spin_unlock(&data_sinfo->lock);
alloc_target = btrfs_data_alloc_profile(fs_info);
/*
* It is ugly that we don't call nolock join
* transaction for the free space inode case here.
* But it is safe because we only do the data space
* reservation for the free space cache in the
* transaction context, the common join transaction
* just increase the counter of the current transaction
* handler, doesn't try to acquire the trans_lock of
* the fs.
*/
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_chunk_alloc(trans, alloc_target,
CHUNK_ALLOC_NO_FORCE);
btrfs_end_transaction(trans);
if (ret < 0) {
if (ret != -ENOSPC)
return ret;
else {
have_pinned_space = 1;
goto commit_trans;
}
}
goto again;
}
/*
* If we don't have enough pinned space to deal with this
* allocation, and no removed chunk in current transaction,
* don't bother committing the transaction.
*/
have_pinned_space = __percpu_counter_compare(
&data_sinfo->total_bytes_pinned,
used + bytes - data_sinfo->total_bytes,
BTRFS_TOTAL_BYTES_PINNED_BATCH);
spin_unlock(&data_sinfo->lock);
/* Commit the current transaction and try again */
commit_trans:
if (need_commit) {
need_commit--;
if (need_commit > 0) {
btrfs_start_delalloc_roots(fs_info, -1);
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0,
(u64)-1);
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
return PTR_ERR(trans);
if (have_pinned_space >= 0 ||
test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
&trans->transaction->flags) ||
need_commit > 0) {
ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
/*
* The cleaner kthread might still be doing iput
* operations. Wait for it to finish so that
* more space is released. We don't need to
* explicitly run the delayed iputs here because
* the commit_transaction would have woken up
* the cleaner.
*/
ret = btrfs_wait_on_delayed_iputs(fs_info);
if (ret)
return ret;
goto again;
} else {
btrfs_end_transaction(trans);
}
}
trace_btrfs_space_reservation(fs_info,
"space_info:enospc",
data_sinfo->flags, bytes, 1);
return -ENOSPC;
}
btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, bytes);
spin_unlock(&data_sinfo->lock);
return 0; return btrfs_reserve_data_bytes(fs_info, bytes, flush);
} }
int btrfs_check_data_free_space(struct btrfs_inode *inode, int btrfs_check_data_free_space(struct btrfs_inode *inode,
...@@ -277,9 +166,7 @@ void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info, ...@@ -277,9 +166,7 @@ void btrfs_free_reserved_data_space_noquota(struct btrfs_fs_info *fs_info,
ASSERT(IS_ALIGNED(len, fs_info->sectorsize)); ASSERT(IS_ALIGNED(len, fs_info->sectorsize));
data_sinfo = fs_info->data_sinfo; data_sinfo = fs_info->data_sinfo;
spin_lock(&data_sinfo->lock); btrfs_space_info_free_bytes_may_use(fs_info, data_sinfo, len);
btrfs_space_info_update_bytes_may_use(fs_info, data_sinfo, -len);
spin_unlock(&data_sinfo->lock);
} }
/* /*
......
...@@ -627,8 +627,7 @@ static int btrfs_delayed_inode_reserve_metadata( ...@@ -627,8 +627,7 @@ static int btrfs_delayed_inode_reserve_metadata(
*/ */
if (!src_rsv || (!trans->bytes_reserved && if (!src_rsv || (!trans->bytes_reserved &&
src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
ret = btrfs_qgroup_reserve_meta_prealloc(root, ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
fs_info->nodesize, true);
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
...@@ -769,8 +768,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root, ...@@ -769,8 +768,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
} }
/* insert the keys of the items */ /* insert the keys of the items */
setup_items_for_insert(root, path, keys, data_size, setup_items_for_insert(root, path, keys, data_size, nitems);
total_data_size, total_size, nitems);
/* insert the dir index items */ /* insert the dir index items */
slot = path->slots[0]; slot = path->slots[0];
......
...@@ -64,10 +64,6 @@ ...@@ -64,10 +64,6 @@
static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
int scrub_ret); int scrub_ret);
static void btrfs_dev_replace_update_device_in_mapping_tree(
struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev,
struct btrfs_device *tgtdev);
static int btrfs_dev_replace_kthread(void *data); static int btrfs_dev_replace_kthread(void *data);
int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info) int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
...@@ -224,13 +220,12 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, ...@@ -224,13 +220,12 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
{ {
struct btrfs_device *device; struct btrfs_device *device;
struct block_device *bdev; struct block_device *bdev;
struct list_head *devices;
struct rcu_string *name; struct rcu_string *name;
u64 devid = BTRFS_DEV_REPLACE_DEVID; u64 devid = BTRFS_DEV_REPLACE_DEVID;
int ret = 0; int ret = 0;
*device_out = NULL; *device_out = NULL;
if (fs_info->fs_devices->seeding) { if (srcdev->fs_devices->seeding) {
btrfs_err(fs_info, "the filesystem is a seed filesystem!"); btrfs_err(fs_info, "the filesystem is a seed filesystem!");
return -EINVAL; return -EINVAL;
} }
...@@ -244,8 +239,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, ...@@ -244,8 +239,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
sync_blockdev(bdev); sync_blockdev(bdev);
devices = &fs_info->fs_devices->devices; list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
list_for_each_entry(device, devices, dev_list) {
if (device->bdev == bdev) { if (device->bdev == bdev) {
btrfs_err(fs_info, btrfs_err(fs_info,
"target device is in the filesystem!"); "target device is in the filesystem!");
...@@ -512,7 +506,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, ...@@ -512,7 +506,7 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0);
up_write(&dev_replace->rwsem); up_write(&dev_replace->rwsem);
ret = btrfs_sysfs_add_devices_dir(tgt_device->fs_devices, tgt_device); ret = btrfs_sysfs_add_device(tgt_device);
if (ret) if (ret)
btrfs_err(fs_info, "kobj add dev failed %d", ret); btrfs_err(fs_info, "kobj add dev failed %d", ret);
...@@ -630,6 +624,32 @@ static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev, ...@@ -630,6 +624,32 @@ static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev,
return ret; return ret;
} }
static void btrfs_dev_replace_update_device_in_mapping_tree(
struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev,
struct btrfs_device *tgtdev)
{
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
struct extent_map *em;
struct map_lookup *map;
u64 start = 0;
int i;
write_lock(&em_tree->lock);
do {
em = lookup_extent_mapping(em_tree, start, (u64)-1);
if (!em)
break;
map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++)
if (srcdev == map->stripes[i].dev)
map->stripes[i].dev = tgtdev;
start = em->start + em->len;
free_extent_map(em);
} while (start);
write_unlock(&em_tree->lock);
}
static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
int scrub_ret) int scrub_ret)
{ {
...@@ -661,7 +681,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, ...@@ -661,7 +681,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the * flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished * copy operation is declared as being finished
*/ */
ret = btrfs_start_delalloc_roots(fs_info, -1); ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
if (ret) { if (ret) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret; return ret;
...@@ -781,7 +801,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, ...@@ -781,7 +801,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex);
/* replace the sysfs entry */ /* replace the sysfs entry */
btrfs_sysfs_remove_devices_dir(fs_info->fs_devices, src_device); btrfs_sysfs_remove_device(src_device);
btrfs_sysfs_update_devid(tgt_device); btrfs_sysfs_update_devid(tgt_device);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &src_device->dev_state)) if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &src_device->dev_state))
btrfs_scratch_superblocks(fs_info, src_device->bdev, btrfs_scratch_superblocks(fs_info, src_device->bdev,
...@@ -799,32 +819,6 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, ...@@ -799,32 +819,6 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
return 0; return 0;
} }
static void btrfs_dev_replace_update_device_in_mapping_tree(
struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev,
struct btrfs_device *tgtdev)
{
struct extent_map_tree *em_tree = &fs_info->mapping_tree;
struct extent_map *em;
struct map_lookup *map;
u64 start = 0;
int i;
write_lock(&em_tree->lock);
do {
em = lookup_extent_mapping(em_tree, start, (u64)-1);
if (!em)
break;
map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++)
if (srcdev == map->stripes[i].dev)
map->stripes[i].dev = tgtdev;
start = em->start + em->len;
free_extent_map(em);
} while (start);
write_unlock(&em_tree->lock);
}
/* /*
* Read progress of device replace status according to the state and last * Read progress of device replace status according to the state and last
* stored position. The value format is the same as for * stored position. The value format is the same as for
...@@ -1025,7 +1019,7 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info) ...@@ -1025,7 +1019,7 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
* should never allow both to start and pause. We don't want to allow * should never allow both to start and pause. We don't want to allow
* dev-replace to start anyway. * dev-replace to start anyway.
*/ */
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) {
down_write(&dev_replace->rwsem); down_write(&dev_replace->rwsem);
dev_replace->replace_state = dev_replace->replace_state =
BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED; BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
...@@ -1062,7 +1056,7 @@ static int btrfs_dev_replace_kthread(void *data) ...@@ -1062,7 +1056,7 @@ static int btrfs_dev_replace_kthread(void *data)
ret = btrfs_dev_replace_finishing(fs_info, ret); ret = btrfs_dev_replace_finishing(fs_info, ret);
WARN_ON(ret && ret != -ECANCELED); WARN_ON(ret && ret != -ECANCELED);
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); btrfs_exclop_finish(fs_info);
return 0; return 0;
} }
......
...@@ -50,7 +50,6 @@ ...@@ -50,7 +50,6 @@
BTRFS_SUPER_FLAG_METADUMP |\ BTRFS_SUPER_FLAG_METADUMP |\
BTRFS_SUPER_FLAG_METADUMP_V2) BTRFS_SUPER_FLAG_METADUMP_V2)
static const struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work *work); static void end_workqueue_fn(struct btrfs_work *work);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root); static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
...@@ -204,53 +203,6 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, ...@@ -204,53 +203,6 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb,
#endif #endif
/*
* extents on the btree inode are pretty simple, there's one extent
* that covers the entire device
*/
struct extent_map *btree_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset,
u64 start, u64 len)
{
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em;
int ret;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, start, len);
if (em) {
read_unlock(&em_tree->lock);
goto out;
}
read_unlock(&em_tree->lock);
em = alloc_extent_map();
if (!em) {
em = ERR_PTR(-ENOMEM);
goto out;
}
em->start = 0;
em->len = (u64)-1;
em->block_len = (u64)-1;
em->block_start = 0;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
if (ret == -EEXIST) {
free_extent_map(em);
em = lookup_extent_mapping(em_tree, start, len);
if (!em)
em = ERR_PTR(-EIO);
} else if (ret) {
free_extent_map(em);
em = ERR_PTR(ret);
}
write_unlock(&em_tree->lock);
out:
return em;
}
/* /*
* Compute the csum of a btree block and store the result to provided buffer. * Compute the csum of a btree block and store the result to provided buffer.
*/ */
...@@ -545,38 +497,35 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page) ...@@ -545,38 +497,35 @@ static int csum_dirty_buffer(struct btrfs_fs_info *fs_info, struct page *page)
static int check_tree_block_fsid(struct extent_buffer *eb) static int check_tree_block_fsid(struct extent_buffer *eb)
{ {
struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_fs_info *fs_info = eb->fs_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
u8 fsid[BTRFS_FSID_SIZE]; u8 fsid[BTRFS_FSID_SIZE];
int ret = 1; u8 *metadata_uuid;
read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid), read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid),
BTRFS_FSID_SIZE); BTRFS_FSID_SIZE);
while (fs_devices) { /*
u8 *metadata_uuid; * Checking the incompat flag is only valid for the current fs. For
* seed devices it's forbidden to have their uuid changed so reading
* ->fsid in this case is fine
*/
if (btrfs_fs_incompat(fs_info, METADATA_UUID))
metadata_uuid = fs_devices->metadata_uuid;
else
metadata_uuid = fs_devices->fsid;
/* if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE))
* Checking the incompat flag is only valid for the current return 0;
* fs. For seed devices it's forbidden to have their uuid
* changed so reading ->fsid in this case is fine
*/
if (fs_devices == fs_info->fs_devices &&
btrfs_fs_incompat(fs_info, METADATA_UUID))
metadata_uuid = fs_devices->metadata_uuid;
else
metadata_uuid = fs_devices->fsid;
if (!memcmp(fsid, metadata_uuid, BTRFS_FSID_SIZE)) { list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
ret = 0; if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE))
break; return 0;
}
fs_devices = fs_devices->seed; return 1;
}
return ret;
} }
static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio, u64 phy_offset,
u64 phy_offset, struct page *page, struct page *page, u64 start, u64 end,
u64 start, u64 end, int mirror) int mirror)
{ {
u64 found_start; u64 found_start;
int found_level; int found_level;
...@@ -864,9 +813,8 @@ static int check_async_write(struct btrfs_fs_info *fs_info, ...@@ -864,9 +813,8 @@ static int check_async_write(struct btrfs_fs_info *fs_info,
return 1; return 1;
} }
static blk_status_t btree_submit_bio_hook(struct inode *inode, struct bio *bio, blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
int mirror_num, int mirror_num, unsigned long bio_flags)
unsigned long bio_flags)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int async = check_async_write(fs_info, BTRFS_I(inode)); int async = check_async_write(fs_info, BTRFS_I(inode));
...@@ -951,11 +899,6 @@ static int btree_writepages(struct address_space *mapping, ...@@ -951,11 +899,6 @@ static int btree_writepages(struct address_space *mapping,
return btree_write_cache_pages(mapping, wbc); return btree_write_cache_pages(mapping, wbc);
} }
static int btree_readpage(struct file *file, struct page *page)
{
return extent_read_full_page(page, btree_get_extent, 0);
}
static int btree_releasepage(struct page *page, gfp_t gfp_flags) static int btree_releasepage(struct page *page, gfp_t gfp_flags)
{ {
if (PageWriteback(page) || PageDirty(page)) if (PageWriteback(page) || PageDirty(page))
...@@ -995,7 +938,6 @@ static int btree_set_page_dirty(struct page *page) ...@@ -995,7 +938,6 @@ static int btree_set_page_dirty(struct page *page)
} }
static const struct address_space_operations btree_aops = { static const struct address_space_operations btree_aops = {
.readpage = btree_readpage,
.writepages = btree_writepages, .writepages = btree_writepages,
.releasepage = btree_releasepage, .releasepage = btree_releasepage,
.invalidatepage = btree_invalidatepage, .invalidatepage = btree_invalidatepage,
...@@ -1208,7 +1150,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, ...@@ -1208,7 +1150,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.type = BTRFS_ROOT_ITEM_KEY;
root->root_key.offset = 0; root->root_key.offset = 0;
leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf)) { if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf); ret = PTR_ERR(leaf);
leaf = NULL; leaf = NULL;
...@@ -1280,7 +1223,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans, ...@@ -1280,7 +1223,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
*/ */
leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
NULL, 0, 0, 0); NULL, 0, 0, 0, BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf)) { if (IS_ERR(leaf)) {
btrfs_put_root(root); btrfs_put_root(root);
return ERR_CAST(leaf); return ERR_CAST(leaf);
...@@ -1505,10 +1448,12 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info) ...@@ -1505,10 +1448,12 @@ void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
struct btrfs_root *root; struct btrfs_root *root;
while (!list_empty(&fs_info->allocated_roots)) { while (!list_empty(&fs_info->allocated_roots)) {
char buf[BTRFS_ROOT_NAME_BUF_LEN];
root = list_first_entry(&fs_info->allocated_roots, root = list_first_entry(&fs_info->allocated_roots,
struct btrfs_root, leak_list); struct btrfs_root, leak_list);
btrfs_err(fs_info, "leaked root %llu-%llu refcount %d", btrfs_err(fs_info, "leaked root %s refcount %d",
root->root_key.objectid, root->root_key.offset, btrfs_root_name(root->root_key.objectid, buf),
refcount_read(&root->refs)); refcount_read(&root->refs));
while (refcount_read(&root->refs) > 1) while (refcount_read(&root->refs) > 1)
btrfs_put_root(root); btrfs_put_root(root);
...@@ -2115,12 +2060,10 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info) ...@@ -2115,12 +2060,10 @@ static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node); RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree, extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree,
IO_TREE_INODE_IO, inode); IO_TREE_BTREE_INODE_IO, inode);
BTRFS_I(inode)->io_tree.track_uptodate = false; BTRFS_I(inode)->io_tree.track_uptodate = false;
extent_map_tree_init(&BTRFS_I(inode)->extent_tree); extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root); BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root);
memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key)); memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
...@@ -2626,18 +2569,17 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) ...@@ -2626,18 +2569,17 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
level = btrfs_super_root_level(sb); level = btrfs_super_root_level(sb);
tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb), tree_root->node = read_tree_block(fs_info, btrfs_super_root(sb),
generation, level, NULL); generation, level, NULL);
if (IS_ERR(tree_root->node) || if (IS_ERR(tree_root->node)) {
!extent_buffer_uptodate(tree_root->node)) {
handle_error = true; handle_error = true;
ret = PTR_ERR(tree_root->node);
tree_root->node = NULL;
btrfs_warn(fs_info, "couldn't read tree root");
continue;
if (IS_ERR(tree_root->node)) { } else if (!extent_buffer_uptodate(tree_root->node)) {
ret = PTR_ERR(tree_root->node); handle_error = true;
tree_root->node = NULL; ret = -EIO;
} else if (!extent_buffer_uptodate(tree_root->node)) { btrfs_warn(fs_info, "error while reading tree root");
ret = -EUCLEAN;
}
btrfs_warn(fs_info, "failed to read tree root");
continue; continue;
} }
...@@ -2753,7 +2695,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) ...@@ -2753,7 +2695,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
fs_info->check_integrity_print_mask = 0; fs_info->check_integrity_print_mask = 0;
#endif #endif
btrfs_init_balance(fs_info); btrfs_init_balance(fs_info);
btrfs_init_async_reclaim_work(&fs_info->async_reclaim_work); btrfs_init_async_reclaim_work(fs_info);
spin_lock_init(&fs_info->block_group_cache_lock); spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT; fs_info->block_group_cache_tree = RB_ROOT;
...@@ -2928,7 +2870,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -2928,7 +2870,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
} }
/* /*
* Verify the type first, if that or the the checksum value are * Verify the type first, if that or the checksum value are
* corrupted, we'll find out * corrupted, we'll find out
*/ */
csum_type = btrfs_super_csum_type(disk_super); csum_type = btrfs_super_csum_type(disk_super);
...@@ -3482,8 +3424,12 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, ...@@ -3482,8 +3424,12 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
return ERR_CAST(page); return ERR_CAST(page);
super = page_address(page); super = page_address(page);
if (btrfs_super_bytenr(super) != bytenr || if (btrfs_super_magic(super) != BTRFS_MAGIC) {
btrfs_super_magic(super) != BTRFS_MAGIC) { btrfs_release_disk_super(super);
return ERR_PTR(-ENODATA);
}
if (btrfs_super_bytenr(super) != bytenr) {
btrfs_release_disk_super(super); btrfs_release_disk_super(super);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
...@@ -4056,6 +4002,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) ...@@ -4056,6 +4002,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
btrfs_cleanup_defrag_inodes(fs_info); btrfs_cleanup_defrag_inodes(fs_info);
cancel_work_sync(&fs_info->async_reclaim_work); cancel_work_sync(&fs_info->async_reclaim_work);
cancel_work_sync(&fs_info->async_data_reclaim_work);
/* Cancel or finish ongoing discard work */ /* Cancel or finish ongoing discard work */
btrfs_discard_cleanup(fs_info); btrfs_discard_cleanup(fs_info);
...@@ -4687,9 +4634,3 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) ...@@ -4687,9 +4634,3 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
return 0; return 0;
} }
static const struct extent_io_ops btree_extent_io_ops = {
/* mandatory callbacks */
.submit_bio_hook = btree_submit_bio_hook,
.readpage_end_io_hook = btree_readpage_end_io_hook,
};
...@@ -76,7 +76,11 @@ void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info); ...@@ -76,7 +76,11 @@ void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info); void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root); struct btrfs_root *root);
int btrfs_validate_metadata_buffer(struct btrfs_io_bio *io_bio, u64 phy_offset,
struct page *page, u64 start, u64 end,
int mirror);
blk_status_t btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info); struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
#endif #endif
...@@ -123,9 +127,6 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, ...@@ -123,9 +127,6 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid); u64 objectid);
int btree_lock_page_hook(struct page *page, void *data, int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *)); void (*flush_fn)(void *));
struct extent_map *btree_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset,
u64 start, u64 len);
int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags); int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags);
int __init btrfs_end_io_wq_init(void); int __init btrfs_end_io_wq_init(void);
void __cold btrfs_end_io_wq_exit(void); void __cold btrfs_end_io_wq_exit(void);
......
...@@ -40,6 +40,7 @@ struct io_failure_record; ...@@ -40,6 +40,7 @@ struct io_failure_record;
enum { enum {
IO_TREE_FS_PINNED_EXTENTS, IO_TREE_FS_PINNED_EXTENTS,
IO_TREE_FS_EXCLUDED_EXTENTS, IO_TREE_FS_EXCLUDED_EXTENTS,
IO_TREE_BTREE_INODE_IO,
IO_TREE_INODE_IO, IO_TREE_INODE_IO,
IO_TREE_INODE_IO_FAILURE, IO_TREE_INODE_IO_FAILURE,
IO_TREE_RELOC_BLOCKS, IO_TREE_RELOC_BLOCKS,
...@@ -48,6 +49,7 @@ enum { ...@@ -48,6 +49,7 @@ enum {
IO_TREE_INODE_FILE_EXTENT, IO_TREE_INODE_FILE_EXTENT,
IO_TREE_LOG_CSUM_RANGE, IO_TREE_LOG_CSUM_RANGE,
IO_TREE_SELFTEST, IO_TREE_SELFTEST,
IO_TREE_DEVICE_ALLOC_STATE,
}; };
struct extent_io_tree { struct extent_io_tree {
...@@ -61,7 +63,6 @@ struct extent_io_tree { ...@@ -61,7 +63,6 @@ struct extent_io_tree {
u8 owner; u8 owner;
spinlock_t lock; spinlock_t lock;
const struct extent_io_ops *ops;
}; };
struct extent_state { struct extent_state {
......
This diff is collapsed.
This diff is collapsed.
...@@ -74,18 +74,6 @@ typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio, ...@@ -74,18 +74,6 @@ typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio,
typedef blk_status_t (extent_submit_bio_start_t)(void *private_data, typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
struct bio *bio, u64 bio_offset); struct bio *bio, u64 bio_offset);
struct extent_io_ops {
/*
* The following callbacks must be always defined, the function
* pointer will be called unconditionally.
*/
submit_bio_hook_t *submit_bio_hook;
int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
struct page *page, u64 start, u64 end,
int mirror);
};
#define INLINE_EXTENT_BUFFER_PAGES 16 #define INLINE_EXTENT_BUFFER_PAGES 16
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE) #define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE)
struct extent_buffer { struct extent_buffer {
...@@ -102,7 +90,7 @@ struct extent_buffer { ...@@ -102,7 +90,7 @@ struct extent_buffer {
int blocking_writers; int blocking_writers;
atomic_t blocking_readers; atomic_t blocking_readers;
bool lock_nested; bool lock_recursed;
/* >= 0 if eb belongs to a log tree, -1 otherwise */ /* >= 0 if eb belongs to a log tree, -1 otherwise */
short log_index; short log_index;
...@@ -193,8 +181,11 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, ...@@ -193,8 +181,11 @@ typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode,
int try_release_extent_mapping(struct page *page, gfp_t mask); int try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page); int try_release_extent_buffer(struct page *page);
int extent_read_full_page(struct page *page, get_extent_t *get_extent, int __must_check submit_one_bio(struct bio *bio, int mirror_num,
int mirror_num); unsigned long bio_flags);
int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
struct bio **bio, unsigned long *bio_flags,
unsigned int read_flags, u64 *prev_em_start);
int extent_write_full_page(struct page *page, struct writeback_control *wbc); int extent_write_full_page(struct page *page, struct writeback_control *wbc);
int extent_write_locked_range(struct inode *inode, u64 start, u64 end, int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
int mode); int mode);
...@@ -203,7 +194,7 @@ int extent_writepages(struct address_space *mapping, ...@@ -203,7 +194,7 @@ int extent_writepages(struct address_space *mapping,
int btree_write_cache_pages(struct address_space *mapping, int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc); struct writeback_control *wbc);
void extent_readahead(struct readahead_control *rac); void extent_readahead(struct readahead_control *rac);
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len); u64 start, u64 len);
void set_page_extent_mapped(struct page *page); void set_page_extent_mapped(struct page *page);
......
...@@ -318,8 +318,8 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, ...@@ -318,8 +318,8 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
if (page_offsets) if (page_offsets)
offset = page_offset(bvec.bv_page) + bvec.bv_offset; offset = page_offset(bvec.bv_page) + bvec.bv_offset;
count = btrfs_find_ordered_sum(inode, offset, disk_bytenr, count = btrfs_find_ordered_sum(BTRFS_I(inode), offset,
csum, nblocks); disk_bytenr, csum, nblocks);
if (count) if (count)
goto found; goto found;
......
This diff is collapsed.
...@@ -413,8 +413,6 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate) ...@@ -413,8 +413,6 @@ static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation) static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
{ {
__le64 *val;
io_ctl_map_page(io_ctl, 1); io_ctl_map_page(io_ctl, 1);
/* /*
...@@ -429,14 +427,13 @@ static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation) ...@@ -429,14 +427,13 @@ static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
io_ctl->size -= sizeof(u64) * 2; io_ctl->size -= sizeof(u64) * 2;
} }
val = io_ctl->cur; put_unaligned_le64(generation, io_ctl->cur);
*val = cpu_to_le64(generation);
io_ctl->cur += sizeof(u64); io_ctl->cur += sizeof(u64);
} }
static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation) static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
{ {
__le64 *gen; u64 cache_gen;
/* /*
* Skip the crc area. If we don't check crcs then we just have a 64bit * Skip the crc area. If we don't check crcs then we just have a 64bit
...@@ -451,11 +448,11 @@ static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation) ...@@ -451,11 +448,11 @@ static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
io_ctl->size -= sizeof(u64) * 2; io_ctl->size -= sizeof(u64) * 2;
} }
gen = io_ctl->cur; cache_gen = get_unaligned_le64(io_ctl->cur);
if (le64_to_cpu(*gen) != generation) { if (cache_gen != generation) {
btrfs_err_rl(io_ctl->fs_info, btrfs_err_rl(io_ctl->fs_info,
"space cache generation (%llu) does not match inode (%llu)", "space cache generation (%llu) does not match inode (%llu)",
*gen, generation); cache_gen, generation);
io_ctl_unmap_page(io_ctl); io_ctl_unmap_page(io_ctl);
return -EIO; return -EIO;
} }
...@@ -525,8 +522,8 @@ static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes, ...@@ -525,8 +522,8 @@ static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
return -ENOSPC; return -ENOSPC;
entry = io_ctl->cur; entry = io_ctl->cur;
entry->offset = cpu_to_le64(offset); put_unaligned_le64(offset, &entry->offset);
entry->bytes = cpu_to_le64(bytes); put_unaligned_le64(bytes, &entry->bytes);
entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
BTRFS_FREE_SPACE_EXTENT; BTRFS_FREE_SPACE_EXTENT;
io_ctl->cur += sizeof(struct btrfs_free_space_entry); io_ctl->cur += sizeof(struct btrfs_free_space_entry);
...@@ -599,8 +596,8 @@ static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl, ...@@ -599,8 +596,8 @@ static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
} }
e = io_ctl->cur; e = io_ctl->cur;
entry->offset = le64_to_cpu(e->offset); entry->offset = get_unaligned_le64(&e->offset);
entry->bytes = le64_to_cpu(e->bytes); entry->bytes = get_unaligned_le64(&e->bytes);
*type = e->type; *type = e->type;
io_ctl->cur += sizeof(struct btrfs_free_space_entry); io_ctl->cur += sizeof(struct btrfs_free_space_entry);
io_ctl->size -= sizeof(struct btrfs_free_space_entry); io_ctl->size -= sizeof(struct btrfs_free_space_entry);
...@@ -1353,7 +1350,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ...@@ -1353,7 +1350,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
/* /*
* at this point the pages are under IO and we're happy, * at this point the pages are under IO and we're happy,
* The caller is responsible for waiting on them and updating the * The caller is responsible for waiting on them and updating
* the cache and the inode * the cache and the inode
*/ */
io_ctl->entries = entries; io_ctl->entries = entries;
......
This diff is collapsed.
...@@ -378,6 +378,18 @@ static int check_xflags(unsigned int flags) ...@@ -378,6 +378,18 @@ static int check_xflags(unsigned int flags)
return 0; return 0;
} }
bool btrfs_exclop_start(struct btrfs_fs_info *fs_info,
enum btrfs_exclusive_operation type)
{
return !cmpxchg(&fs_info->exclusive_operation, BTRFS_EXCLOP_NONE, type);
}
void btrfs_exclop_finish(struct btrfs_fs_info *fs_info)
{
WRITE_ONCE(fs_info->exclusive_operation, BTRFS_EXCLOP_NONE);
sysfs_notify(&fs_info->fs_devices->fsid_kobj, NULL, "exclusive_operation");
}
/* /*
* Set the xflags from the internal inode flags. The remaining items of fsxattr * Set the xflags from the internal inode flags. The remaining items of fsxattr
* are zeroed. * are zeroed.
...@@ -618,7 +630,7 @@ static noinline int create_subvol(struct inode *dir, ...@@ -618,7 +630,7 @@ static noinline int create_subvol(struct inode *dir,
trans = btrfs_start_transaction(root, 0); trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
ret = PTR_ERR(trans); ret = PTR_ERR(trans);
btrfs_subvolume_release_metadata(fs_info, &block_rsv); btrfs_subvolume_release_metadata(root, &block_rsv);
goto fail_free; goto fail_free;
} }
trans->block_rsv = &block_rsv; trans->block_rsv = &block_rsv;
...@@ -628,7 +640,8 @@ static noinline int create_subvol(struct inode *dir, ...@@ -628,7 +640,8 @@ static noinline int create_subvol(struct inode *dir,
if (ret) if (ret)
goto fail; goto fail;
leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0); leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0,
BTRFS_NESTING_NORMAL);
if (IS_ERR(leaf)) { if (IS_ERR(leaf)) {
ret = PTR_ERR(leaf); ret = PTR_ERR(leaf);
goto fail; goto fail;
...@@ -742,7 +755,7 @@ static noinline int create_subvol(struct inode *dir, ...@@ -742,7 +755,7 @@ static noinline int create_subvol(struct inode *dir,
kfree(root_item); kfree(root_item);
trans->block_rsv = NULL; trans->block_rsv = NULL;
trans->bytes_reserved = 0; trans->bytes_reserved = 0;
btrfs_subvolume_release_metadata(fs_info, &block_rsv); btrfs_subvolume_release_metadata(root, &block_rsv);
err = btrfs_commit_transaction(trans); err = btrfs_commit_transaction(trans);
if (err && !ret) if (err && !ret)
...@@ -856,7 +869,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, ...@@ -856,7 +869,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
if (ret && pending_snapshot->snap) if (ret && pending_snapshot->snap)
pending_snapshot->snap->anon_dev = 0; pending_snapshot->snap->anon_dev = 0;
btrfs_put_root(pending_snapshot->snap); btrfs_put_root(pending_snapshot->snap);
btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv);
free_pending: free_pending:
if (pending_snapshot->anon_dev) if (pending_snapshot->anon_dev)
free_anon_bdev(pending_snapshot->anon_dev); free_anon_bdev(pending_snapshot->anon_dev);
...@@ -1306,7 +1319,7 @@ static int cluster_pages_for_defrag(struct inode *inode, ...@@ -1306,7 +1319,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
break; break;
unlock_page(page); unlock_page(page);
btrfs_start_ordered_extent(inode, ordered, 1); btrfs_start_ordered_extent(ordered, 1);
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
lock_page(page); lock_page(page);
/* /*
...@@ -1638,7 +1651,7 @@ static noinline int btrfs_ioctl_resize(struct file *file, ...@@ -1638,7 +1651,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
if (ret) if (ret)
return ret; return ret;
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_RESIZE)) {
mnt_drop_write_file(file); mnt_drop_write_file(file);
return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
} }
...@@ -1752,7 +1765,7 @@ static noinline int btrfs_ioctl_resize(struct file *file, ...@@ -1752,7 +1765,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
out_free: out_free:
kfree(vol_args); kfree(vol_args);
out: out:
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); btrfs_exclop_finish(fs_info);
mnt_drop_write_file(file); mnt_drop_write_file(file);
return ret; return ret;
} }
...@@ -3126,7 +3139,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg) ...@@ -3126,7 +3139,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_ADD))
return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; return BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
vol_args = memdup_user(arg, sizeof(*vol_args)); vol_args = memdup_user(arg, sizeof(*vol_args));
...@@ -3143,7 +3156,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg) ...@@ -3143,7 +3156,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
kfree(vol_args); kfree(vol_args);
out: out:
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); btrfs_exclop_finish(fs_info);
return ret; return ret;
} }
...@@ -3172,7 +3185,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) ...@@ -3172,7 +3185,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
goto out; goto out;
} }
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REMOVE)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
goto out; goto out;
} }
...@@ -3183,7 +3196,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) ...@@ -3183,7 +3196,7 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
ret = btrfs_rm_device(fs_info, vol_args->name, 0); ret = btrfs_rm_device(fs_info, vol_args->name, 0);
} }
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); btrfs_exclop_finish(fs_info);
if (!ret) { if (!ret) {
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID)
...@@ -3214,7 +3227,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) ...@@ -3214,7 +3227,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
if (ret) if (ret)
return ret; return ret;
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REMOVE)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
goto out_drop_write; goto out_drop_write;
} }
...@@ -3232,7 +3245,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) ...@@ -3232,7 +3245,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
btrfs_info(fs_info, "disk deleted %s", vol_args->name); btrfs_info(fs_info, "disk deleted %s", vol_args->name);
kfree(vol_args); kfree(vol_args);
out: out:
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); btrfs_exclop_finish(fs_info);
out_drop_write: out_drop_write:
mnt_drop_write_file(file); mnt_drop_write_file(file);
...@@ -3462,15 +3475,12 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info, ...@@ -3462,15 +3475,12 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *tmp; struct btrfs_space_info *tmp;
info = NULL; info = NULL;
rcu_read_lock(); list_for_each_entry(tmp, &fs_info->space_info, list) {
list_for_each_entry_rcu(tmp, &fs_info->space_info,
list) {
if (tmp->flags == types[i]) { if (tmp->flags == types[i]) {
info = tmp; info = tmp;
break; break;
} }
} }
rcu_read_unlock();
if (!info) if (!info)
continue; continue;
...@@ -3518,15 +3528,12 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info, ...@@ -3518,15 +3528,12 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
break; break;
info = NULL; info = NULL;
rcu_read_lock(); list_for_each_entry(tmp, &fs_info->space_info, list) {
list_for_each_entry_rcu(tmp, &fs_info->space_info,
list) {
if (tmp->flags == types[i]) { if (tmp->flags == types[i]) {
info = tmp; info = tmp;
break; break;
} }
} }
rcu_read_unlock();
if (!info) if (!info)
continue; continue;
...@@ -3736,11 +3743,11 @@ static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info, ...@@ -3736,11 +3743,11 @@ static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
ret = -EROFS; ret = -EROFS;
goto out; goto out;
} }
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_DEV_REPLACE)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
} else { } else {
ret = btrfs_dev_replace_by_ioctl(fs_info, p); ret = btrfs_dev_replace_by_ioctl(fs_info, p);
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); btrfs_exclop_finish(fs_info);
} }
break; break;
case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS: case BTRFS_IOCTL_DEV_REPLACE_CMD_STATUS:
...@@ -3951,7 +3958,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg) ...@@ -3951,7 +3958,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
return ret; return ret;
again: again:
if (!test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) { if (btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
mutex_lock(&fs_info->balance_mutex); mutex_lock(&fs_info->balance_mutex);
need_unlock = true; need_unlock = true;
goto locked; goto locked;
...@@ -3997,7 +4004,6 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg) ...@@ -3997,7 +4004,6 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
} }
locked: locked:
BUG_ON(!test_bit(BTRFS_FS_EXCL_OP, &fs_info->flags));
if (arg) { if (arg) {
bargs = memdup_user(arg, sizeof(*bargs)); bargs = memdup_user(arg, sizeof(*bargs));
...@@ -4052,10 +4058,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg) ...@@ -4052,10 +4058,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
do_balance: do_balance:
/* /*
* Ownership of bctl and filesystem flag BTRFS_FS_EXCL_OP goes to * Ownership of bctl and exclusive operation goes to btrfs_balance.
* btrfs_balance. bctl is freed in reset_balance_state, or, if * bctl is freed in reset_balance_state, or, if restriper was paused
* restriper was paused all the way until unmount, in free_fs_info. * all the way until unmount, in free_fs_info. The flag should be
* The flag should be cleared after reset_balance_state. * cleared after reset_balance_state.
*/ */
need_unlock = false; need_unlock = false;
...@@ -4074,7 +4080,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg) ...@@ -4074,7 +4080,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
out_unlock: out_unlock:
mutex_unlock(&fs_info->balance_mutex); mutex_unlock(&fs_info->balance_mutex);
if (need_unlock) if (need_unlock)
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags); btrfs_exclop_finish(fs_info);
out: out:
mnt_drop_write_file(file); mnt_drop_write_file(file);
return ret; return ret;
...@@ -4897,7 +4903,7 @@ long btrfs_ioctl(struct file *file, unsigned int ...@@ -4897,7 +4903,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_SYNC: { case BTRFS_IOC_SYNC: {
int ret; int ret;
ret = btrfs_start_delalloc_roots(fs_info, -1); ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
if (ret) if (ret)
return ret; return ret;
ret = btrfs_sync_fs(inode->i_sb, 1); ret = btrfs_sync_fs(inode->i_sb, 1);
......
...@@ -57,8 +57,8 @@ ...@@ -57,8 +57,8 @@
* performance reasons. * performance reasons.
* *
* *
* Lock nesting * Lock recursion
* ------------ * --------------
* *
* A write operation on a tree might indirectly start a look up on the same * A write operation on a tree might indirectly start a look up on the same
* tree. This can happen when btrfs_cow_block locks the tree and needs to * tree. This can happen when btrfs_cow_block locks the tree and needs to
...@@ -201,7 +201,7 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb) ...@@ -201,7 +201,7 @@ void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
* lock, but it won't change to or away from us. If we have the write * lock, but it won't change to or away from us. If we have the write
* lock, we are the owner and it'll never change. * lock, we are the owner and it'll never change.
*/ */
if (eb->lock_nested && current->pid == eb->lock_owner) if (eb->lock_recursed && current->pid == eb->lock_owner)
return; return;
btrfs_assert_tree_read_locked(eb); btrfs_assert_tree_read_locked(eb);
atomic_inc(&eb->blocking_readers); atomic_inc(&eb->blocking_readers);
...@@ -225,7 +225,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) ...@@ -225,7 +225,7 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
* lock, but it won't change to or away from us. If we have the write * lock, but it won't change to or away from us. If we have the write
* lock, we are the owner and it'll never change. * lock, we are the owner and it'll never change.
*/ */
if (eb->lock_nested && current->pid == eb->lock_owner) if (eb->lock_recursed && current->pid == eb->lock_owner)
return; return;
if (eb->blocking_writers == 0) { if (eb->blocking_writers == 0) {
btrfs_assert_spinning_writers_put(eb); btrfs_assert_spinning_writers_put(eb);
...@@ -244,7 +244,8 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb) ...@@ -244,7 +244,8 @@ void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
* *
* The rwlock is held upon exit. * The rwlock is held upon exit.
*/ */
void btrfs_tree_read_lock(struct extent_buffer *eb) void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest,
bool recurse)
{ {
u64 start_ns = 0; u64 start_ns = 0;
...@@ -263,8 +264,9 @@ void btrfs_tree_read_lock(struct extent_buffer *eb) ...@@ -263,8 +264,9 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
* depends on this as it may be called on a partly * depends on this as it may be called on a partly
* (write-)locked tree. * (write-)locked tree.
*/ */
BUG_ON(eb->lock_nested); WARN_ON(!recurse);
eb->lock_nested = true; BUG_ON(eb->lock_recursed);
eb->lock_recursed = true;
read_unlock(&eb->lock); read_unlock(&eb->lock);
trace_btrfs_tree_read_lock(eb, start_ns); trace_btrfs_tree_read_lock(eb, start_ns);
return; return;
...@@ -279,6 +281,11 @@ void btrfs_tree_read_lock(struct extent_buffer *eb) ...@@ -279,6 +281,11 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
trace_btrfs_tree_read_lock(eb, start_ns); trace_btrfs_tree_read_lock(eb, start_ns);
} }
void btrfs_tree_read_lock(struct extent_buffer *eb)
{
__btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, false);
}
/* /*
* Lock extent buffer for read, optimistically expecting that there are no * Lock extent buffer for read, optimistically expecting that there are no
* contending blocking writers. If there are, don't wait. * contending blocking writers. If there are, don't wait.
...@@ -362,11 +369,11 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) ...@@ -362,11 +369,11 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
/* /*
* if we're nested, we have the write lock. No new locking * if we're nested, we have the write lock. No new locking
* is needed as long as we are the lock owner. * is needed as long as we are the lock owner.
* The write unlock will do a barrier for us, and the lock_nested * The write unlock will do a barrier for us, and the lock_recursed
* field only matters to the lock owner. * field only matters to the lock owner.
*/ */
if (eb->lock_nested && current->pid == eb->lock_owner) { if (eb->lock_recursed && current->pid == eb->lock_owner) {
eb->lock_nested = false; eb->lock_recursed = false;
return; return;
} }
btrfs_assert_tree_read_locked(eb); btrfs_assert_tree_read_locked(eb);
...@@ -388,11 +395,11 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) ...@@ -388,11 +395,11 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
/* /*
* if we're nested, we have the write lock. No new locking * if we're nested, we have the write lock. No new locking
* is needed as long as we are the lock owner. * is needed as long as we are the lock owner.
* The write unlock will do a barrier for us, and the lock_nested * The write unlock will do a barrier for us, and the lock_recursed
* field only matters to the lock owner. * field only matters to the lock owner.
*/ */
if (eb->lock_nested && current->pid == eb->lock_owner) { if (eb->lock_recursed && current->pid == eb->lock_owner) {
eb->lock_nested = false; eb->lock_recursed = false;
return; return;
} }
btrfs_assert_tree_read_locked(eb); btrfs_assert_tree_read_locked(eb);
...@@ -409,7 +416,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) ...@@ -409,7 +416,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
* *
* The rwlock is held for write upon exit. * The rwlock is held for write upon exit.
*/ */
void btrfs_tree_lock(struct extent_buffer *eb) void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest)
__acquires(&eb->lock) __acquires(&eb->lock)
{ {
u64 start_ns = 0; u64 start_ns = 0;
...@@ -434,6 +441,11 @@ void btrfs_tree_lock(struct extent_buffer *eb) ...@@ -434,6 +441,11 @@ void btrfs_tree_lock(struct extent_buffer *eb)
trace_btrfs_tree_lock(eb, start_ns); trace_btrfs_tree_lock(eb, start_ns);
} }
void btrfs_tree_lock(struct extent_buffer *eb)
{
__btrfs_tree_lock(eb, BTRFS_NESTING_NORMAL);
}
/* /*
* Release the write lock, either blocking or spinning (ie. there's no need * Release the write lock, either blocking or spinning (ie. there's no need
* for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking). * for an explicit blocking unlock, like btrfs_tree_read_unlock_blocking).
...@@ -552,13 +564,14 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) ...@@ -552,13 +564,14 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
* *
* Return: root extent buffer with read lock held * Return: root extent buffer with read lock held
*/ */
struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root,
bool recurse)
{ {
struct extent_buffer *eb; struct extent_buffer *eb;
while (1) { while (1) {
eb = btrfs_root_node(root); eb = btrfs_root_node(root);
btrfs_tree_read_lock(eb); __btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, recurse);
if (eb == root->node) if (eb == root->node)
break; break;
btrfs_tree_read_unlock(eb); btrfs_tree_read_unlock(eb);
......
This diff is collapsed.
This diff is collapsed.
...@@ -56,6 +56,12 @@ enum { ...@@ -56,6 +56,12 @@ enum {
BTRFS_ORDERED_TRUNCATED, BTRFS_ORDERED_TRUNCATED,
/* Regular IO for COW */ /* Regular IO for COW */
BTRFS_ORDERED_REGULAR, BTRFS_ORDERED_REGULAR,
/* Used during fsync to track already logged extents */
BTRFS_ORDERED_LOGGED,
/* We have already logged all the csums of the ordered extent */
BTRFS_ORDERED_LOGGED_CSUM,
/* We wait for this extent to complete in the current transaction */
BTRFS_ORDERED_PENDING,
}; };
struct btrfs_ordered_extent { struct btrfs_ordered_extent {
...@@ -104,6 +110,9 @@ struct btrfs_ordered_extent { ...@@ -104,6 +110,9 @@ struct btrfs_ordered_extent {
/* list of checksums for insertion when the extent io is done */ /* list of checksums for insertion when the extent io is done */
struct list_head list; struct list_head list;
/* used for fast fsyncs */
struct list_head log_list;
/* used to wait for the BTRFS_ORDERED_COMPLETE bit */ /* used to wait for the BTRFS_ORDERED_COMPLETE bit */
wait_queue_head_t wait; wait_queue_head_t wait;
...@@ -142,9 +151,9 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) ...@@ -142,9 +151,9 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
} }
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
void btrfs_remove_ordered_extent(struct inode *inode, void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
struct btrfs_ordered_extent *entry); struct btrfs_ordered_extent *entry);
int btrfs_dec_test_ordered_pending(struct inode *inode, int btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached, struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size, int uptodate); u64 file_offset, u64 io_size, int uptodate);
int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode, int btrfs_dec_test_first_ordered_pending(struct btrfs_inode *inode,
...@@ -165,17 +174,18 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, ...@@ -165,17 +174,18 @@ void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum); struct btrfs_ordered_sum *sum);
struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode, struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode,
u64 file_offset); u64 file_offset);
void btrfs_start_ordered_extent(struct inode *inode, void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry, int wait);
struct btrfs_ordered_extent *entry, int wait);
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
struct btrfs_ordered_extent * struct btrfs_ordered_extent *
btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset);
struct btrfs_ordered_extent *btrfs_lookup_ordered_range( struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
struct btrfs_inode *inode, struct btrfs_inode *inode,
u64 file_offset, u64 file_offset,
u64 len); u64 len);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
u8 *sum, int len); struct list_head *list);
int btrfs_find_ordered_sum(struct btrfs_inode *inode, u64 offset,
u64 disk_bytenr, u8 *sum, int len);
u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr,
const u64 range_start, const u64 range_len); const u64 range_start, const u64 range_len);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr,
......
This diff is collapsed.
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
#ifndef BTRFS_PRINT_TREE_H #ifndef BTRFS_PRINT_TREE_H
#define BTRFS_PRINT_TREE_H #define BTRFS_PRINT_TREE_H
/* Buffer size to contain tree name and possibly additional data (offset) */
#define BTRFS_ROOT_NAME_BUF_LEN 48
void btrfs_print_leaf(struct extent_buffer *l); void btrfs_print_leaf(struct extent_buffer *l);
void btrfs_print_tree(struct extent_buffer *c, bool follow); void btrfs_print_tree(struct extent_buffer *c, bool follow);
const char *btrfs_root_name(u64 objectid, char *buf);
#endif #endif
...@@ -2315,7 +2315,7 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info, ...@@ -2315,7 +2315,7 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
* Update qgroup rfer/excl counters. * Update qgroup rfer/excl counters.
* Rfer update is easy, codes can explain themselves. * Rfer update is easy, codes can explain themselves.
* *
* Excl update is tricky, the update is split into 2 part. * Excl update is tricky, the update is split into 2 parts.
* Part 1: Possible exclusive <-> sharing detect: * Part 1: Possible exclusive <-> sharing detect:
* | A | !A | * | A | !A |
* ------------------------------------- * -------------------------------------
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
#define BTRFS_SEND_STREAM_VERSION 1 #define BTRFS_SEND_STREAM_VERSION 1
#define BTRFS_SEND_BUF_SIZE SZ_64K #define BTRFS_SEND_BUF_SIZE SZ_64K
#define BTRFS_SEND_READ_SIZE (48 * SZ_1K)
enum btrfs_tlv_type { enum btrfs_tlv_type {
BTRFS_TLV_U8, BTRFS_TLV_U8,
......
This diff is collapsed.
...@@ -149,5 +149,7 @@ static inline void btrfs_space_info_free_bytes_may_use( ...@@ -149,5 +149,7 @@ static inline void btrfs_space_info_free_bytes_may_use(
btrfs_try_granting_tickets(fs_info, space_info); btrfs_try_granting_tickets(fs_info, space_info);
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);
} }
int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
enum btrfs_reserve_flush_enum flush);
#endif /* BTRFS_SPACE_INFO_H */ #endif /* BTRFS_SPACE_INFO_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment