Commit 85d7ab24 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-6.4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs updates from David Sterba:
 "Mostly core changes and cleanups, some notable fixes and two
  performance improvements in directory logging.

  The IO path cleanups are removing or refactoring old code, scrub main
  loop has been completely rewritten also refactoring old code.

  There are some changes to non-btrfs code, mostly trivial, the cgroup
  punt bio logic is only moved from generic code.

  Performance improvements:

   - improve logging changes in a directory during one transaction,
     avoid iterating over items and reduce lock contention (fsync time
     4x lower)

   - when logging directory entries during one transaction, reduce
     locking of subvolume trees by checking tree-log instead
     (improvement in throughput and latency for concurrent access to a
     subvolume)

  Notable fixes:

   - dev-replace:
      - properly honor read mode when requested to avoid reading from
        source device
      - target device won't be used for eventual read repair, this is
        unreliable for NODATASUM files
      - when there are unpaired (and unrepairable) metadata during
        replace, exit early with error and don't try to finish whole
        operation

   - scrub ioctl properly rejects unknown flags

   - fix global block reserve calculations

   - fix partial direct io write when there's a page fault in the
     middle, iomap will try to continue with partial request but the
     btrfs part did not match that, this can lead to zeros written
     instead of data

  Core changes:

   - io path:
      - continued cleanups and refactoring around bio handling
      - extent io submit path simplifications and cleanups
      - flush write path simplifications and cleanups
      - rework logic of passing sync mode of bio, with further cleanups

   - rewrite scrub code flow, restructure how the stripes are enumerated
     and verified in a more unified way

   - allow to set lower threshold for block group reclaim in debug mode
     to aid zoned mode testing

   - remove obsolete time-based delayed ref throttling logic when
     truncating items

   - DREW locks are not using percpu variables anymore

   - more warning fixes (-Wmaybe-uninitialized)

   - u64 division simplifications

   - error handling improvements

  Non-btrfs code changes:

   - push cgroup punt bio logic to btrfs code (there was no other user
     of that), the functionality can be now selected separately by
     BLK_CGROUP_PUNT_BIO

   - crc32c_impl removed after removing last uses in btrfs code

   - add btrfs_assertfail() to objtool table"

* tag 'for-6.4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (147 commits)
  btrfs: mark btrfs_assertfail() __noreturn
  btrfs: fix uninitialized variable warnings
  btrfs: use log root when iterating over index keys when logging directory
  btrfs: avoid iterating over all indexes when logging directory
  btrfs: dev-replace: error out if we have unrepaired metadata error during
  btrfs: remove pointless loop at btrfs_get_next_valid_item()
  btrfs: scrub: reject unsupported scrub flags
  btrfs: reinterpret async discard iops_limit=0 as no delay
  btrfs: set default discard iops_limit to 1000
  btrfs: remove unused raid56 functions which were dedicated for scrub
  btrfs: scrub: remove scrub_bio structure
  btrfs: scrub: remove scrub_block and scrub_sector structures
  btrfs: scrub: remove the old scrub recheck code
  btrfs: scrub: remove the old writeback infrastructure
  btrfs: scrub: remove scrub_parity structure
  btrfs: scrub: use scrub_stripe to implement RAID56 P/Q scrub
  btrfs: scrub: switch scrub_simple_mirror() to scrub_stripe infrastructure
  btrfs: scrub: introduce helper to queue a stripe for scrub
  btrfs: scrub: introduce error reporting functionality for scrub_stripe
  btrfs: scrub: introduce a writeback helper for scrub_stripe
  ...
parents 94fc0792 f3724631
......@@ -41,6 +41,9 @@ config BLK_RQ_ALLOC_TIME
config BLK_CGROUP_RWSTAT
bool
config BLK_CGROUP_PUNT_BIO
bool
config BLK_DEV_BSG_COMMON
tristate
......
......@@ -56,7 +56,6 @@ static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
bool blkcg_debug_stats = false;
static struct workqueue_struct *blkcg_punt_bio_wq;
#define BLKG_DESTROY_BATCH_SIZE 64
......@@ -166,7 +165,9 @@ static void __blkg_release(struct rcu_head *rcu)
{
struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
WARN_ON(!bio_list_empty(&blkg->async_bios));
#endif
/* release the blkcg and parent blkg refs this blkg has been holding */
css_put(&blkg->blkcg->css);
......@@ -188,6 +189,9 @@ static void blkg_release(struct percpu_ref *ref)
call_rcu(&blkg->rcu_head, __blkg_release);
}
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
static struct workqueue_struct *blkcg_punt_bio_wq;
static void blkg_async_bio_workfn(struct work_struct *work)
{
struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
......@@ -198,10 +202,10 @@ static void blkg_async_bio_workfn(struct work_struct *work)
bool need_plug = false;
/* as long as there are pending bios, @blkg can't go away */
spin_lock_bh(&blkg->async_bio_lock);
spin_lock(&blkg->async_bio_lock);
bio_list_merge(&bios, &blkg->async_bios);
bio_list_init(&blkg->async_bios);
spin_unlock_bh(&blkg->async_bio_lock);
spin_unlock(&blkg->async_bio_lock);
/* start plug only when bio_list contains at least 2 bios */
if (bios.head && bios.head->bi_next) {
......@@ -214,6 +218,40 @@ static void blkg_async_bio_workfn(struct work_struct *work)
blk_finish_plug(&plug);
}
/*
* When a shared kthread issues a bio for a cgroup, doing so synchronously can
* lead to priority inversions as the kthread can be trapped waiting for that
* cgroup. Use this helper instead of submit_bio to punt the actual issuing to
* a dedicated per-blkcg work item to avoid such priority inversions.
*/
void blkcg_punt_bio_submit(struct bio *bio)
{
struct blkcg_gq *blkg = bio->bi_blkg;
if (blkg->parent) {
spin_lock(&blkg->async_bio_lock);
bio_list_add(&blkg->async_bios, bio);
spin_unlock(&blkg->async_bio_lock);
queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
} else {
/* never bounce for the root cgroup */
submit_bio(bio);
}
}
EXPORT_SYMBOL_GPL(blkcg_punt_bio_submit);
static int __init blkcg_punt_bio_init(void)
{
blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
WQ_MEM_RECLAIM | WQ_FREEZABLE |
WQ_UNBOUND | WQ_SYSFS, 0);
if (!blkcg_punt_bio_wq)
return -ENOMEM;
return 0;
}
subsys_initcall(blkcg_punt_bio_init);
#endif /* CONFIG_BLK_CGROUP_PUNT_BIO */
/**
* bio_blkcg_css - return the blkcg CSS associated with a bio
* @bio: target bio
......@@ -269,10 +307,12 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
blkg->q = disk->queue;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
spin_lock_init(&blkg->async_bio_lock);
bio_list_init(&blkg->async_bios);
INIT_WORK(&blkg->async_bio_work, blkg_async_bio_workfn);
blkg->blkcg = blkcg;
#endif
u64_stats_init(&blkg->iostat.sync);
for_each_possible_cpu(cpu) {
......@@ -1688,25 +1728,6 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
}
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
bool __blkcg_punt_bio_submit(struct bio *bio)
{
struct blkcg_gq *blkg = bio->bi_blkg;
/* consume the flag first */
bio->bi_opf &= ~REQ_CGROUP_PUNT;
/* never bounce for the root cgroup */
if (!blkg->parent)
return false;
spin_lock_bh(&blkg->async_bio_lock);
bio_list_add(&blkg->async_bios, bio);
spin_unlock_bh(&blkg->async_bio_lock);
queue_work(blkcg_punt_bio_wq, &blkg->async_bio_work);
return true;
}
/*
* Scale the accumulated delay based on how long it has been since we updated
* the delay. We only call this when we are adding delay, in case it's been a
......@@ -2085,16 +2106,5 @@ bool blk_cgroup_congested(void)
return ret;
}
static int __init blkcg_init(void)
{
blkcg_punt_bio_wq = alloc_workqueue("blkcg_punt_bio",
WQ_MEM_RECLAIM | WQ_FREEZABLE |
WQ_UNBOUND | WQ_SYSFS, 0);
if (!blkcg_punt_bio_wq)
return -ENOMEM;
return 0;
}
subsys_initcall(blkcg_init);
module_param(blkcg_debug_stats, bool, 0644);
MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");
......@@ -72,9 +72,10 @@ struct blkcg_gq {
struct blkg_iostat_set iostat;
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
#ifdef CONFIG_BLK_CGROUP_PUNT_BIO
spinlock_t async_bio_lock;
struct bio_list async_bios;
#endif
union {
struct work_struct async_bio_work;
struct work_struct free_work;
......@@ -375,16 +376,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q)))
bool __blkcg_punt_bio_submit(struct bio *bio);
static inline bool blkcg_punt_bio_submit(struct bio *bio)
{
if (bio->bi_opf & REQ_CGROUP_PUNT)
return __blkcg_punt_bio_submit(bio);
else
return false;
}
static inline void blkcg_bio_issue_init(struct bio *bio)
{
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
......@@ -506,8 +497,6 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return
static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
static inline void blkg_get(struct blkcg_gq *blkg) { }
static inline void blkg_put(struct blkcg_gq *blkg) { }
static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline void blk_cgroup_bio_start(struct bio *bio) { }
static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
......
......@@ -830,9 +830,6 @@ EXPORT_SYMBOL(submit_bio_noacct);
*/
void submit_bio(struct bio *bio)
{
if (blkcg_punt_bio_submit(bio))
return;
if (bio_op(bio) == REQ_OP_READ) {
task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, bio_sectors(bio));
......
......@@ -2,6 +2,7 @@
config BTRFS_FS
tristate "Btrfs filesystem support"
select BLK_CGROUP_PUNT_BIO
select CRYPTO
select CRYPTO_CRC32C
select LIBCRC32C
......
This diff is collapsed.
......@@ -30,7 +30,10 @@ typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
* passed to btrfs_submit_bio for mapping to the physical devices.
*/
struct btrfs_bio {
/* Inode and offset into it that this I/O operates on. */
/*
* Inode and offset into it that this I/O operates on.
* Only set for data I/O.
*/
struct btrfs_inode *inode;
u64 file_offset;
......@@ -58,6 +61,9 @@ struct btrfs_bio {
atomic_t pending_ios;
struct work_struct end_io_work;
/* File system that this I/O operates on. */
struct btrfs_fs_info *fs_info;
/*
* This member must come last, bio_alloc_bioset will allocate enough
* bytes for entire btrfs_bio but relies on bio being last.
......@@ -73,11 +79,11 @@ static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
int __init btrfs_bioset_init(void);
void __cold btrfs_bioset_exit(void);
void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode,
void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info,
btrfs_bio_end_io_t end_io, void *private);
struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
struct btrfs_inode *inode,
btrfs_bio_end_io_t end_io, void *private);
struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
struct btrfs_fs_info *fs_info,
btrfs_bio_end_io_t end_io, void *private);
static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
{
......@@ -88,7 +94,11 @@ static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
/* Bio only refers to one ordered extent. */
#define REQ_BTRFS_ONE_ORDERED REQ_DRV
void btrfs_submit_bio(struct bio *bio, int mirror_num);
/* Submit using blkcg_punt_bio_submit. */
#define REQ_BTRFS_CGROUP_PUNT REQ_FS_PRIVATE
void btrfs_submit_bio(struct btrfs_bio *bbio, int mirror_num);
void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_replace);
int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
u64 length, u64 logical, struct page *page,
unsigned int pg_offset, int mirror_num);
......
......@@ -160,15 +160,6 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
cache);
/*
* If not empty, someone is still holding mutex of
* full_stripe_lock, which can only be released by caller.
* And it will definitely cause use-after-free when caller
* tries to release full stripe lock.
*
* No better way to resolve, but only to warn.
*/
WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
kfree(cache->free_space_ctl);
kfree(cache->physical_map);
kfree(cache);
......@@ -1977,12 +1968,12 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
map = em->map_lookup;
data_stripe_length = em->orig_block_len;
io_stripe_size = map->stripe_len;
io_stripe_size = BTRFS_STRIPE_LEN;
chunk_start = em->start;
/* For RAID5/6 adjust to a full IO stripe length */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
io_stripe_size = map->stripe_len * nr_data_stripes(map);
io_stripe_size = nr_data_stripes(map) << BTRFS_STRIPE_LEN_SHIFT;
buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
if (!buf) {
......@@ -1992,28 +1983,28 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
for (i = 0; i < map->num_stripes; i++) {
bool already_inserted = false;
u64 stripe_nr;
u64 offset;
u32 stripe_nr;
u32 offset;
int j;
if (!in_range(physical, map->stripes[i].physical,
data_stripe_length))
continue;
stripe_nr = physical - map->stripes[i].physical;
stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset);
stripe_nr = (physical - map->stripes[i].physical) >>
BTRFS_STRIPE_LEN_SHIFT;
offset = (physical - map->stripes[i].physical) &
BTRFS_STRIPE_LEN_MASK;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) {
stripe_nr = stripe_nr * map->num_stripes + i;
stripe_nr = div_u64(stripe_nr, map->sub_stripes);
}
BTRFS_BLOCK_GROUP_RAID10))
stripe_nr = div_u64(stripe_nr * map->num_stripes + i,
map->sub_stripes);
/*
* The remaining case would be for RAID56, multiply by
* nr_data_stripes(). Alternatively, just use rmap_len below
* instead of map->stripe_len
*/
bytenr = chunk_start + stripe_nr * io_stripe_size + offset;
/* Ensure we don't add duplicate addresses */
......@@ -2124,8 +2115,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
atomic_set(&cache->frozen, 0);
mutex_init(&cache->free_space_lock);
cache->full_stripe_locks_root.root = RB_ROOT;
mutex_init(&cache->full_stripe_locks_root.lock);
return cache;
}
......@@ -2672,7 +2661,7 @@ static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset)
}
struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 bytes_used, u64 type,
u64 type,
u64 chunk_offset, u64 size)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
......@@ -2687,7 +2676,6 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
cache->length = size;
set_free_space_tree_thresholds(cache);
cache->used = bytes_used;
cache->flags = type;
cache->cached = BTRFS_CACHE_FINISHED;
cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
......@@ -2738,9 +2726,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
#ifdef CONFIG_BTRFS_DEBUG
if (btrfs_should_fragment_free_space(cache)) {
u64 new_bytes_used = size - bytes_used;
cache->space_info->bytes_used += new_bytes_used >> 1;
cache->space_info->bytes_used += size >> 1;
fragment_free_space(cache);
}
#endif
......
......@@ -91,14 +91,6 @@ struct btrfs_caching_control {
/* Once caching_thread() finds this much free space, it will wake up waiters. */
#define CACHING_CTL_WAKE_UP SZ_2M
/*
* Tree to record all locked full stripes of a RAID5/6 block group
*/
struct btrfs_full_stripe_locks_tree {
struct rb_root root;
struct mutex lock;
};
struct btrfs_block_group {
struct btrfs_fs_info *fs_info;
struct inode *inode;
......@@ -229,9 +221,6 @@ struct btrfs_block_group {
*/
int swap_extents;
/* Record locked full stripes for RAID5/6 block group */
struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
/*
* Allocation offset for the block group to implement sequential
* allocation. This is used only on a zoned filesystem.
......@@ -302,7 +291,7 @@ void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info);
void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 bytes_used, u64 type,
u64 type,
u64 chunk_offset, u64 size);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
......
......@@ -232,9 +232,6 @@ int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent)
u64 num_bytes = 0;
int ret = -ENOSPC;
if (!block_rsv)
return 0;
spin_lock(&block_rsv->lock);
num_bytes = mult_perc(block_rsv->size, min_percent);
if (block_rsv->reserved >= num_bytes)
......@@ -245,17 +242,15 @@ int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent)
}
int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush)
{
u64 num_bytes = 0;
int ret = -ENOSPC;
if (!block_rsv)
return 0;
spin_lock(&block_rsv->lock);
num_bytes = min_reserved;
if (block_rsv->reserved >= num_bytes)
ret = 0;
else
......@@ -355,17 +350,19 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
/*
* But we also want to reserve enough space so we can do the fallback
* global reserve for an unlink, which is an additional 5 items (see the
* comment in __unlink_start_trans for what we're modifying.)
* global reserve for an unlink, which is an additional
* BTRFS_UNLINK_METADATA_UNITS items.
*
* But we also need space for the delayed ref updates from the unlink,
* so its 10, 5 for the actual operation, and 5 for the delayed ref
* updates.
* so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
* each unlink metadata item.
*/
min_items += 10;
min_items += BTRFS_UNLINK_METADATA_UNITS;
num_bytes = max_t(u64, num_bytes,
btrfs_calc_insert_metadata_size(fs_info, min_items));
btrfs_calc_insert_metadata_size(fs_info, min_items) +
btrfs_calc_delayed_ref_bytes(fs_info,
BTRFS_UNLINK_METADATA_UNITS));
spin_lock(&sinfo->lock);
spin_lock(&block_rsv->lock);
......
......@@ -65,7 +65,7 @@ int btrfs_block_rsv_add(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_percent);
int btrfs_block_rsv_refill(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
......
......@@ -142,11 +142,22 @@ struct btrfs_inode {
/* a local copy of root's last_log_commit */
int last_log_commit;
/*
* Total number of bytes pending delalloc, used by stat to calculate the
* real block usage of the file. This is used only for files.
*/
u64 delalloc_bytes;
union {
/*
* Total number of bytes pending delalloc, used by stat to
* calculate the real block usage of the file. This is used
* only for files.
*/
u64 delalloc_bytes;
/*
* The lowest possible index of the next dir index key which
* points to an inode that needs to be logged.
* This is used only for directories.
* Use the helpers btrfs_get_first_dir_index_to_log() and
* btrfs_set_first_dir_index_to_log() to access this field.
*/
u64 first_dir_index_to_log;
};
union {
/*
......@@ -247,6 +258,17 @@ struct btrfs_inode {
struct inode vfs_inode;
};
static inline u64 btrfs_get_first_dir_index_to_log(const struct btrfs_inode *inode)
{
return READ_ONCE(inode->first_dir_index_to_log);
}
static inline void btrfs_set_first_dir_index_to_log(struct btrfs_inode *inode,
u64 index)
{
WRITE_ONCE(inode->first_dir_index_to_log, index);
}
static inline struct btrfs_inode *BTRFS_I(const struct inode *inode)
{
return container_of(inode, struct btrfs_inode, vfs_inode);
......@@ -407,7 +429,8 @@ static inline void btrfs_inode_split_flags(u64 inode_item_flags,
int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
u32 pgoff, u8 *csum, const u8 * const csum_expected);
blk_status_t btrfs_extract_ordered_extent(struct btrfs_bio *bbio);
int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
struct btrfs_ordered_extent *ordered);
bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
u32 bio_offset, struct bio_vec *bv);
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
......
This diff is collapsed.
......@@ -6,8 +6,8 @@
#ifndef BTRFS_COMPRESSION_H
#define BTRFS_COMPRESSION_H
#include <linux/blk_types.h>
#include <linux/sizes.h>
#include "bio.h"
struct btrfs_inode;
......@@ -23,6 +23,7 @@ struct btrfs_inode;
/* Maximum length of compressed data stored on disk */
#define BTRFS_MAX_COMPRESSED (SZ_128K)
#define BTRFS_MAX_COMPRESSED_PAGES (BTRFS_MAX_COMPRESSED / PAGE_SIZE)
static_assert((BTRFS_MAX_COMPRESSED % PAGE_SIZE) == 0);
/* Maximum size of data before compression */
......@@ -37,9 +38,6 @@ struct compressed_bio {
/* the pages with the compressed data on them */
struct page **compressed_pages;
/* inode that owns this data */
struct inode *inode;
/* starting offset in the inode for our pages */
u64 start;
......@@ -55,14 +53,14 @@ struct compressed_bio {
/* Whether this is a write for writeback. */
bool writeback;
/* IO errors */
blk_status_t status;
union {
/* For reads, this is the bio we are copying the data into */
struct bio *orig_bio;
struct btrfs_bio *orig_bbio;
struct work_struct write_end_work;
};
/* Must be last. */
struct btrfs_bio bbio;
};
static inline unsigned int btrfs_compress_type(unsigned int type_level)
......@@ -88,16 +86,14 @@ int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
struct compressed_bio *cb, u32 decompressed);
blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
unsigned int len, u64 disk_start,
unsigned int compressed_len,
struct page **compressed_pages,
unsigned int nr_pages,
blk_opf_t write_flags,
struct cgroup_subsys_state *blkcg_css,
bool writeback);
void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num);
void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num);
unsigned int btrfs_compress_str2level(unsigned int type, const char *str);
......
......@@ -854,7 +854,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
* Search for a key in the given extent_buffer.
*
* The lower boundary for the search is specified by the slot number @first_slot.
* Use a value of 0 to search over the whole extent buffer.
* Use a value of 0 to search over the whole extent buffer. Works for both
* leaves and nodes.
*
* The slot in the extent buffer is returned via @slot. If the key exists in the
* extent buffer, then @slot will point to the slot where the key is, otherwise
......@@ -863,8 +864,8 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
* Slot may point to the total number of items (i.e. one position beyond the last
* key) if the key is bigger than the last key in the extent buffer.
*/
int btrfs_generic_bin_search(struct extent_buffer *eb, int first_slot,
const struct btrfs_key *key, int *slot)
int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
const struct btrfs_key *key, int *slot)
{
unsigned long p;
int item_size;
......@@ -959,7 +960,7 @@ struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
if (slot < 0 || slot >= btrfs_header_nritems(parent))
return ERR_PTR(-ENOENT);
BUG_ON(level == 0);
ASSERT(level);
check.level = level - 1;
check.transid = btrfs_node_ptr_generation(parent, slot);
......@@ -1064,11 +1065,14 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
return 0;
left = btrfs_read_node_slot(parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
if (pslot) {
left = btrfs_read_node_slot(parent, pslot - 1);
if (IS_ERR(left)) {
ret = PTR_ERR(left);
left = NULL;
goto enospc;
}
if (left) {
__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
wret = btrfs_cow_block(trans, root, left,
parent, pslot - 1, &left,
......@@ -1079,11 +1083,14 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
}
}
right = btrfs_read_node_slot(parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
if (pslot + 1 < btrfs_header_nritems(parent)) {
right = btrfs_read_node_slot(parent, pslot + 1);
if (IS_ERR(right)) {
ret = PTR_ERR(right);
right = NULL;
goto enospc;
}
if (right) {
__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
wret = btrfs_cow_block(trans, root, right,
parent, pslot + 1, &right,
......@@ -1240,14 +1247,14 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (!parent)
return 1;
left = btrfs_read_node_slot(parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
/* first, try to make some room in the middle buffer */
if (left) {
if (pslot) {
u32 left_nr;
left = btrfs_read_node_slot(parent, pslot - 1);
if (IS_ERR(left))
return PTR_ERR(left);
__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
left_nr = btrfs_header_nritems(left);
......@@ -1292,16 +1299,17 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(left);
free_extent_buffer(left);
}
right = btrfs_read_node_slot(parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
/*
* then try to empty the right most buffer into the middle
*/
if (right) {
if (pslot + 1 < btrfs_header_nritems(parent)) {
u32 right_nr;
right = btrfs_read_node_slot(parent, pslot + 1);
if (IS_ERR(right))
return PTR_ERR(right);
__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
right_nr = btrfs_header_nritems(right);
......@@ -1864,7 +1872,7 @@ static inline int search_for_key_slot(struct extent_buffer *eb,
return 0;
}
return btrfs_generic_bin_search(eb, search_low_slot, key, slot);
return btrfs_bin_search(eb, search_low_slot, key, slot);
}
static int search_leaf(struct btrfs_trans_handle *trans,
......@@ -2321,7 +2329,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
*/
btrfs_unlock_up_safe(p, level + 1);
ret = btrfs_bin_search(b, key, &slot);
ret = btrfs_bin_search(b, 0, key, &slot);
if (ret < 0)
goto done;
......@@ -2482,26 +2490,15 @@ int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
struct btrfs_path *path)
{
while (1) {
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
int ret;
const int slot = path->slots[0];
const struct extent_buffer *leaf = path->nodes[0];
/* This is where we start walking the path. */
if (slot >= btrfs_header_nritems(leaf)) {
/*
* If we've reached the last slot in this leaf we need
* to go to the next leaf and reset the path.
*/
ret = btrfs_next_leaf(root, path);
if (ret)
return ret;
continue;
}
/* Store the found, valid item in @key. */
btrfs_item_key_to_cpu(leaf, key, slot);
break;
ret = btrfs_next_leaf(root, path);
if (ret)
return ret;
}
btrfs_item_key_to_cpu(path->nodes[0], key, path->slots[0]);
return 0;
}
......@@ -3198,12 +3195,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_write_locked(path->nodes[1]);
right = btrfs_read_node_slot(upper, slot + 1);
/*
* slot + 1 is not valid or we fail to read the right node,
* no big deal, just return.
*/
if (IS_ERR(right))
return 1;
return PTR_ERR(right);
__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
......@@ -3417,12 +3410,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_write_locked(path->nodes[1]);
left = btrfs_read_node_slot(path->nodes[1], slot - 1);
/*
* slot - 1 is not valid or we fail to read the left node,
* no big deal, just return.
*/
if (IS_ERR(left))
return 1;
return PTR_ERR(left);
__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
......@@ -4576,7 +4565,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
while (1) {
nritems = btrfs_header_nritems(cur);
level = btrfs_header_level(cur);
sret = btrfs_bin_search(cur, min_key, &slot);
sret = btrfs_bin_search(cur, 0, min_key, &slot);
if (sret < 0) {
ret = sret;
goto out;
......
......@@ -508,22 +508,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
int __init btrfs_ctree_init(void);
void __cold btrfs_ctree_exit(void);
int btrfs_generic_bin_search(struct extent_buffer *eb, int first_slot,
const struct btrfs_key *key, int *slot);
int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
const struct btrfs_key *key, int *slot);
/*
* Simple binary search on an extent buffer. Works for both leaves and nodes, and
* always searches over the whole range of keys (slot 0 to slot 'nritems - 1').
*/
static inline int btrfs_bin_search(struct extent_buffer *eb,
const struct btrfs_key *key,
int *slot)
{
return btrfs_generic_bin_search(eb, 0, key, slot);
}
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
int *slot);
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid,
......
......@@ -358,8 +358,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes,
* racing with an ordered completion or some such that would think it
* needs to free the reservation we just made.
*/
spin_lock(&inode->lock);
nr_extents = count_max_extents(fs_info, num_bytes);
spin_lock(&inode->lock);
btrfs_mod_outstanding_extents(inode, nr_extents);
inode->csum_bytes += disk_num_bytes;
btrfs_calculate_inode_block_rsv_size(fs_info, inode);
......
......@@ -53,24 +53,6 @@ bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
return ret;
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
{
u64 num_entries =
atomic_read(&trans->transaction->delayed_refs.num_entries);
u64 avg_runtime;
u64 val;
smp_mb();
avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
val = num_entries * avg_runtime;
if (val >= NSEC_PER_SEC)
return 1;
if (val >= NSEC_PER_SEC / 2)
return 2;
return btrfs_check_space_for_delayed_refs(trans->fs_info);
}
/*
* Release a ref head's reservation.
*
......@@ -83,20 +65,9 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
const u64 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr);
u64 released = 0;
/*
* We have to check the mount option here because we could be enabling
* the free space tree for the first time and don't have the compat_ro
* option set yet.
*
* We need extra reservations if we have the free space tree because
* we'll have to modify that tree as well.
*/
if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
num_bytes *= 2;
released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
if (released)
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
......@@ -118,18 +89,8 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
if (!trans->delayed_ref_updates)
return;
num_bytes = btrfs_calc_insert_metadata_size(fs_info,
trans->delayed_ref_updates);
/*
* We have to check the mount option here because we could be enabling
* the free space tree for the first time and don't have the compat_ro
* option set yet.
*
* We need extra reservations if we have the free space tree because
* we'll have to modify that tree as well.
*/
if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
num_bytes *= 2;
num_bytes = btrfs_calc_delayed_ref_bytes(fs_info,
trans->delayed_ref_updates);
spin_lock(&delayed_rsv->lock);
delayed_rsv->size += num_bytes;
......@@ -200,7 +161,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush)
{
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
u64 num_bytes = 0;
int ret = -ENOSPC;
......@@ -217,7 +178,7 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
if (ret)
return ret;
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
0, num_bytes, 1);
return 0;
......
......@@ -253,6 +253,27 @@ extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
int __init btrfs_delayed_ref_init(void);
void __cold btrfs_delayed_ref_exit(void);
static inline u64 btrfs_calc_delayed_ref_bytes(const struct btrfs_fs_info *fs_info,
int num_delayed_refs)
{
u64 num_bytes;
num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs);
/*
* We have to check the mount option here because we could be enabling
* the free space tree for the first time and don't have the compat_ro
* option set yet.
*
* We need extra reservations if we have the free space tree because
* we'll have to modify that tree as well.
*/
if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
num_bytes *= 2;
return num_bytes;
}
static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
int action, u64 bytenr, u64 len, u64 parent)
{
......@@ -385,7 +406,6 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *src,
u64 num_bytes);
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans);
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
/*
......
This diff is collapsed.
......@@ -1894,8 +1894,7 @@ static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head(
}
static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *locked_ref,
unsigned long *run_refs)
struct btrfs_delayed_ref_head *locked_ref)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
......@@ -1917,7 +1916,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
return -EAGAIN;
}
(*run_refs)++;
ref->in_tree = 0;
rb_erase_cached(&ref->ref_node, &locked_ref->ref_tree);
RB_CLEAR_NODE(&ref->ref_node);
......@@ -1981,10 +1979,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_head *locked_ref = NULL;
ktime_t start = ktime_get();
int ret;
unsigned long count = 0;
unsigned long actual_count = 0;
delayed_refs = &trans->transaction->delayed_refs;
do {
......@@ -2014,8 +2010,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
spin_lock(&locked_ref->lock);
btrfs_merge_delayed_refs(fs_info, delayed_refs, locked_ref);
ret = btrfs_run_delayed_refs_for_head(trans, locked_ref,
&actual_count);
ret = btrfs_run_delayed_refs_for_head(trans, locked_ref);
if (ret < 0 && ret != -EAGAIN) {
/*
* Error, btrfs_run_delayed_refs_for_head already
......@@ -2046,24 +2041,6 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
cond_resched();
} while ((nr != -1 && count < nr) || locked_ref);
/*
* We don't want to include ref heads since we can have empty ref heads
* and those will drastically skew our runtime down since we just do
* accounting, no actual extent tree updates.
*/
if (actual_count > 0) {
u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
u64 avg;
/*
* We weigh the current average higher than our current runtime
* to avoid large swings in the average.
*/
spin_lock(&delayed_refs->lock);
avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
spin_unlock(&delayed_refs->lock);
}
return 0;
}
......@@ -5509,11 +5486,11 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
{
int level = wc->level;
int lookup_info = 1;
int ret;
int ret = 0;
while (level >= 0) {
ret = walk_down_proc(trans, root, path, wc, lookup_info);
if (ret > 0)
if (ret)
break;
if (level == 0)
......@@ -5528,10 +5505,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
path->slots[level]++;
continue;
} else if (ret < 0)
return ret;
break;
level = wc->level;
}
return 0;
return (ret == 1) ? 0 : ret;
}
static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
......@@ -5708,12 +5685,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
ret = walk_down_tree(trans, root, path, wc);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
err = ret;
break;
}
ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
err = ret;
break;
}
......
This diff is collapsed.
......@@ -335,48 +335,6 @@ static int search_csum_tree(struct btrfs_fs_info *fs_info,
return ret;
}
/*
* Locate the file_offset of @cur_disk_bytenr of a @bio.
*
* Bio of btrfs represents read range of
* [bi_sector << 9, bi_sector << 9 + bi_size).
* Knowing this, we can iterate through each bvec to locate the page belong to
* @cur_disk_bytenr and get the file offset.
*
* @inode is used to determine if the bvec page really belongs to @inode.
*
* Return 0 if we can't find the file offset
* Return >0 if we find the file offset and restore it to @file_offset_ret
*/
static int search_file_offset_in_bio(struct bio *bio, struct inode *inode,
u64 disk_bytenr, u64 *file_offset_ret)
{
struct bvec_iter iter;
struct bio_vec bvec;
u64 cur = bio->bi_iter.bi_sector << SECTOR_SHIFT;
int ret = 0;
bio_for_each_segment(bvec, bio, iter) {
struct page *page = bvec.bv_page;
if (cur > disk_bytenr)
break;
if (cur + bvec.bv_len <= disk_bytenr) {
cur += bvec.bv_len;
continue;
}
ASSERT(in_range(disk_bytenr, cur, bvec.bv_len));
if (page->mapping && page->mapping->host &&
page->mapping->host == inode) {
ret = 1;
*file_offset_ret = page_offset(page) + bvec.bv_offset +
disk_bytenr - cur;
break;
}
}
return ret;
}
/*
* Lookup the checksum for the read bio in csum tree.
*
......@@ -386,17 +344,15 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
{
struct btrfs_inode *inode = bbio->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct extent_io_tree *io_tree = &inode->io_tree;
struct bio *bio = &bbio->bio;
struct btrfs_path *path;
const u32 sectorsize = fs_info->sectorsize;
const u32 csum_size = fs_info->csum_size;
u32 orig_len = bio->bi_iter.bi_size;
u64 orig_disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
u64 cur_disk_bytenr;
const unsigned int nblocks = orig_len >> fs_info->sectorsize_bits;
int count = 0;
blk_status_t ret = BLK_STS_OK;
u32 bio_offset = 0;
if ((inode->flags & BTRFS_INODE_NODATASUM) ||
test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))
......@@ -447,28 +403,14 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
path->skip_locking = 1;
}
for (cur_disk_bytenr = orig_disk_bytenr;
cur_disk_bytenr < orig_disk_bytenr + orig_len;
cur_disk_bytenr += (count * sectorsize)) {
u64 search_len = orig_disk_bytenr + orig_len - cur_disk_bytenr;
unsigned int sector_offset;
u8 *csum_dst;
/*
* Although both cur_disk_bytenr and orig_disk_bytenr is u64,
* we're calculating the offset to the bio start.
*
* Bio size is limited to UINT_MAX, thus unsigned int is large
* enough to contain the raw result, not to mention the right
* shifted result.
*/
ASSERT(cur_disk_bytenr - orig_disk_bytenr < UINT_MAX);
sector_offset = (cur_disk_bytenr - orig_disk_bytenr) >>
fs_info->sectorsize_bits;
csum_dst = bbio->csum + sector_offset * csum_size;
while (bio_offset < orig_len) {
int count;
u64 cur_disk_bytenr = orig_disk_bytenr + bio_offset;
u8 *csum_dst = bbio->csum +
(bio_offset >> fs_info->sectorsize_bits) * csum_size;
count = search_csum_tree(fs_info, path, cur_disk_bytenr,
search_len, csum_dst);
orig_len - bio_offset, csum_dst);
if (count < 0) {
ret = errno_to_blk_status(count);
if (bbio->csum != bbio->csum_inline)
......@@ -493,14 +435,9 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
if (inode->root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
u64 file_offset;
int ret;
ret = search_file_offset_in_bio(bio,
&inode->vfs_inode,
cur_disk_bytenr, &file_offset);
if (ret)
set_extent_bits(io_tree, file_offset,
u64 file_offset = bbio->file_offset + bio_offset;
set_extent_bits(&inode->io_tree, file_offset,
file_offset + sectorsize - 1,
EXTENT_NODATASUM);
} else {
......@@ -509,6 +446,7 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio)
cur_disk_bytenr, cur_disk_bytenr + sectorsize);
}
}
bio_offset += count * sectorsize;
}
btrfs_free_path(path);
......@@ -659,7 +597,8 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
* in is large enough to contain all csums.
*/
int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
u8 *csum_buf, unsigned long *csum_bitmap)
u8 *csum_buf, unsigned long *csum_bitmap,
bool search_commit)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
......@@ -676,6 +615,12 @@ int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
if (!path)
return -ENOMEM;
if (search_commit) {
path->skip_locking = 1;
path->reada = READA_FORWARD;
path->search_commit_root = 1;
}
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
key.type = BTRFS_EXTENT_CSUM_KEY;
key.offset = start;
......
......@@ -57,7 +57,8 @@ int btrfs_lookup_csums_list(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit,
bool nowait);
int btrfs_lookup_csums_bitmap(struct btrfs_root *root, u64 start, u64 end,
u8 *csum_buf, unsigned long *csum_bitmap);
u8 *csum_buf, unsigned long *csum_bitmap,
bool search_commit);
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
const struct btrfs_path *path,
struct btrfs_file_extent_item *fi,
......
......@@ -24,6 +24,18 @@
#define BTRFS_SUPER_INFO_SIZE 4096
static_assert(sizeof(struct btrfs_super_block) == BTRFS_SUPER_INFO_SIZE);
/*
* Number of metadata items necessary for an unlink operation:
*
* 1 for the possible orphan item
* 1 for the dir item
* 1 for the dir index
* 1 for the inode ref
* 1 for the inode
* 1 for the parent inode
*/
#define BTRFS_UNLINK_METADATA_UNITS 6
/*
* The reserved space at the beginning of each device. It covers the primary
* super block and leaves space for potential use by other tools like
......@@ -193,11 +205,7 @@ enum {
#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
#ifdef CONFIG_BTRFS_DEBUG
/*
* Extent tree v2 supported only with CONFIG_BTRFS_DEBUG
*/
#define BTRFS_FEATURE_INCOMPAT_SUPP \
#define BTRFS_FEATURE_INCOMPAT_SUPP_STABLE \
(BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
......@@ -210,23 +218,22 @@ enum {
BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
BTRFS_FEATURE_INCOMPAT_ZONED | \
BTRFS_FEATURE_INCOMPAT_ZONED)
#ifdef CONFIG_BTRFS_DEBUG
/*
* Features under developmen like Extent tree v2 support is enabled
* only under CONFIG_BTRFS_DEBUG.
*/
#define BTRFS_FEATURE_INCOMPAT_SUPP \
(BTRFS_FEATURE_INCOMPAT_SUPP_STABLE | \
BTRFS_FEATURE_INCOMPAT_EXTENT_TREE_V2)
#else
#define BTRFS_FEATURE_INCOMPAT_SUPP \
(BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF | \
BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
BTRFS_FEATURE_INCOMPAT_RAID56 | \
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
BTRFS_FEATURE_INCOMPAT_RAID1C34 | \
BTRFS_FEATURE_INCOMPAT_ZONED)
#define BTRFS_FEATURE_INCOMPAT_SUPP \
(BTRFS_FEATURE_INCOMPAT_SUPP_STABLE)
#endif
#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
......@@ -412,7 +419,6 @@ struct btrfs_fs_info {
* Must be written and read while holding btrfs_fs_info::commit_root_sem.
*/
u64 last_reloc_trans;
u64 avg_delayed_ref_runtime;
/*
* This is updated to the current trans every time a full commit is
......@@ -638,7 +644,6 @@ struct btrfs_fs_info {
refcount_t scrub_workers_refcnt;
struct workqueue_struct *scrub_workers;
struct workqueue_struct *scrub_wr_completion_workers;
struct workqueue_struct *scrub_parity_workers;
struct btrfs_subpage_info *subpage_info;
struct btrfs_discard_ctl discard_ctl;
......@@ -828,7 +833,7 @@ static inline u64 btrfs_csum_bytes_to_leaves(
* Use this if we would be adding new items, as we could split nodes as we cow
* down the tree.
*/
static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info,
static inline u64 btrfs_calc_insert_metadata_size(const struct btrfs_fs_info *fs_info,
unsigned num_items)
{
return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
......@@ -838,7 +843,7 @@ static inline u64 btrfs_calc_insert_metadata_size(struct btrfs_fs_info *fs_info,
* Doing a truncate or a modification won't result in new nodes or leaves, just
* what we need for COW.
*/
static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
static inline u64 btrfs_calc_metadata_size(const struct btrfs_fs_info *fs_info,
unsigned num_items)
{
return (u64)fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
......
......@@ -527,7 +527,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
while (1) {
u64 clear_start = 0, clear_len = 0, extent_start = 0;
bool should_throttle = false;
bool refill_delayed_refs_rsv = false;
fi = NULL;
leaf = path->nodes[0];
......@@ -660,8 +660,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
/* No pending yet, add ourselves */
pending_del_slot = path->slots[0];
pending_del_nr = 1;
} else if (pending_del_nr &&
path->slots[0] + 1 == pending_del_slot) {
} else if (path->slots[0] + 1 == pending_del_slot) {
/* Hop on the pending chunk */
pending_del_nr++;
pending_del_slot = path->slots[0];
......@@ -686,10 +685,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
btrfs_abort_transaction(trans, ret);
break;
}
if (be_nice) {
if (btrfs_should_throttle_delayed_refs(trans))
should_throttle = true;
}
if (be_nice && btrfs_check_space_for_delayed_refs(fs_info))
refill_delayed_refs_rsv = true;
}
if (found_type == BTRFS_INODE_ITEM_KEY)
......@@ -697,7 +694,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
if (path->slots[0] == 0 ||
path->slots[0] != pending_del_slot ||
should_throttle) {
refill_delayed_refs_rsv) {
if (pending_del_nr) {
ret = btrfs_del_items(trans, root, path,
pending_del_slot,
......@@ -720,7 +717,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
* actually allocate, so just bail if we're short and
* let the normal reservation dance happen higher up.
*/
if (should_throttle) {
if (refill_delayed_refs_rsv) {
ret = btrfs_delayed_refs_rsv_refill(fs_info,
BTRFS_RESERVE_NO_FLUSH);
if (ret) {
......
This diff is collapsed.
......@@ -3161,6 +3161,11 @@ static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
if (IS_ERR(sa))
return PTR_ERR(sa);
if (sa->flags & ~BTRFS_SCRUB_SUPPORTED_FLAGS) {
ret = -EOPNOTSUPP;
goto out;
}
if (!(sa->flags & BTRFS_SCRUB_READONLY)) {
ret = mnt_want_write_file(file);
if (ret)
......
......@@ -325,24 +325,12 @@ struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root)
* acquire the lock.
*/
int btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
void btrfs_drew_lock_init(struct btrfs_drew_lock *lock)
{
int ret;
ret = percpu_counter_init(&lock->writers, 0, GFP_KERNEL);
if (ret)
return ret;
atomic_set(&lock->readers, 0);
atomic_set(&lock->writers, 0);
init_waitqueue_head(&lock->pending_readers);
init_waitqueue_head(&lock->pending_writers);
return 0;
}
void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock)
{
percpu_counter_destroy(&lock->writers);
}
/* Return true if acquisition is successful, false otherwise */
......@@ -351,10 +339,10 @@ bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock)
if (atomic_read(&lock->readers))
return false;
percpu_counter_inc(&lock->writers);
atomic_inc(&lock->writers);
/* Ensure writers count is updated before we check for pending readers */
smp_mb();
smp_mb__after_atomic();
if (atomic_read(&lock->readers)) {
btrfs_drew_write_unlock(lock);
return false;
......@@ -374,7 +362,7 @@ void btrfs_drew_write_lock(struct btrfs_drew_lock *lock)
void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock)
{
percpu_counter_dec(&lock->writers);
atomic_dec(&lock->writers);
cond_wake_up(&lock->pending_readers);
}
......@@ -390,8 +378,7 @@ void btrfs_drew_read_lock(struct btrfs_drew_lock *lock)
*/
smp_mb__after_atomic();
wait_event(lock->pending_readers,
percpu_counter_sum(&lock->writers) == 0);
wait_event(lock->pending_readers, atomic_read(&lock->writers) == 0);
}
void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock)
......
......@@ -195,13 +195,12 @@ static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
struct btrfs_drew_lock {
atomic_t readers;
struct percpu_counter writers;
atomic_t writers;
wait_queue_head_t pending_writers;
wait_queue_head_t pending_readers;
};
int btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock);
void btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
......
......@@ -55,11 +55,6 @@ static inline unsigned int btrfs_lru_cache_size(const struct btrfs_lru_cache *ca
return cache->size;
}
static inline bool btrfs_lru_cache_is_full(const struct btrfs_lru_cache *cache)
{
return cache->size >= cache->max_size;
}
static inline struct btrfs_lru_cache_entry *btrfs_lru_cache_lru_entry(
struct btrfs_lru_cache *cache)
{
......
......@@ -17,6 +17,7 @@
#include "compression.h"
#include "ctree.h"
#include "super.h"
#include "btrfs_inode.h"
#define LZO_LEN 4
......@@ -329,7 +330,7 @@ static void copy_compressed_segment(struct compressed_bio *cb,
int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
const struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
const struct btrfs_fs_info *fs_info = cb->bbio.inode->root->fs_info;
const u32 sectorsize = fs_info->sectorsize;
char *kaddr;
int ret;
......@@ -388,8 +389,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
*/
btrfs_err(fs_info, "unexpectedly large lzo segment len %u",
seg_len);
ret = -EIO;
goto out;
return -EIO;
}
/* Copy the compressed segment payload into workspace */
......@@ -400,8 +400,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
workspace->buf, &out_len);
if (ret != LZO_E_OK) {
btrfs_err(fs_info, "failed to decompress");
ret = -EIO;
goto out;
return -EIO;
}
/* Copy the data into inode pages */
......@@ -410,7 +409,7 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* All data read, exit */
if (ret == 0)
goto out;
return 0;
ret = 0;
/* Check if the sector has enough space for a segment header */
......@@ -421,10 +420,8 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
/* Skip the padding zeros */
cur_in += sector_bytes_left;
}
out:
if (!ret)
zero_fill_bio(cb->orig_bio);
return ret;
return 0;
}
int lzo_decompress(struct list_head *ws, const u8 *data_in,
......
......@@ -253,7 +253,7 @@ void __cold _btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt,
#endif
#ifdef CONFIG_BTRFS_ASSERT
void __cold btrfs_assertfail(const char *expr, const char *file, int line)
void __cold __noreturn btrfs_assertfail(const char *expr, const char *file, int line)
{
pr_err("assertion failed: %s, in %s:%d\n", expr, file, line);
BUG();
......
......@@ -160,7 +160,7 @@ do { \
} while (0)
#ifdef CONFIG_BTRFS_ASSERT
void __cold btrfs_assertfail(const char *expr, const char *file, int line);
void __cold __noreturn btrfs_assertfail(const char *expr, const char *file, int line);
#define ASSERT(expr) \
(likely(expr) ? (void)0 : btrfs_assertfail(#expr, __FILE__, __LINE__))
......
......@@ -160,14 +160,16 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
* @compress_type: Compression algorithm used for data.
*
* Most of these parameters correspond to &struct btrfs_file_extent_item. The
* tree is given a single reference on the ordered extent that was inserted.
* tree is given a single reference on the ordered extent that was inserted, and
* the returned pointer is given a second reference.
*
* Return: 0 or -ENOMEM.
* Return: the new ordered extent or error pointer.
*/
int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
u64 disk_num_bytes, u64 offset, unsigned flags,
int compress_type)
struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
struct btrfs_inode *inode, u64 file_offset,
u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
u64 disk_num_bytes, u64 offset, unsigned long flags,
int compress_type)
{
struct btrfs_root *root = inode->root;
struct btrfs_fs_info *fs_info = root->fs_info;
......@@ -181,7 +183,7 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
/* For nocow write, we can release the qgroup rsv right now */
ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes);
if (ret < 0)
return ret;
return ERR_PTR(ret);
ret = 0;
} else {
/*
......@@ -190,11 +192,11 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
*/
ret = btrfs_qgroup_release_data(inode, file_offset, num_bytes);
if (ret < 0)
return ret;
return ERR_PTR(ret);
}
entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
if (!entry)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
entry->file_offset = file_offset;
entry->num_bytes = num_bytes;
......@@ -256,6 +258,32 @@ int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
btrfs_mod_outstanding_extents(inode, 1);
spin_unlock(&inode->lock);
/* One ref for the returned entry to match semantics of lookup. */
refcount_inc(&entry->refs);
return entry;
}
/*
* Add a new btrfs_ordered_extent for the range, but drop the reference instead
* of returning it to the caller.
*/
int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
u64 disk_num_bytes, u64 offset, unsigned long flags,
int compress_type)
{
struct btrfs_ordered_extent *ordered;
ordered = btrfs_alloc_ordered_extent(inode, file_offset, num_bytes,
ram_bytes, disk_bytenr,
disk_num_bytes, offset, flags,
compress_type);
if (IS_ERR(ordered))
return PTR_ERR(ordered);
btrfs_put_ordered_extent(ordered);
return 0;
}
......@@ -1088,39 +1116,37 @@ bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
return false;
}
static int clone_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pos,
u64 len)
{
struct inode *inode = ordered->inode;
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
u64 file_offset = ordered->file_offset + pos;
u64 disk_bytenr = ordered->disk_bytenr + pos;
unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS;
/*
* The splitting extent is already counted and will be added again in
* btrfs_add_ordered_extent_*(). Subtract len to avoid double counting.
*/
percpu_counter_add_batch(&fs_info->ordered_bytes, -len,
fs_info->delalloc_batch);
WARN_ON_ONCE(flags & (1 << BTRFS_ORDERED_COMPRESSED));
return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len,
disk_bytenr, len, 0, flags,
ordered->compress_type);
}
int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
u64 post)
/* Split out a new ordered extent for this first @len bytes of @ordered. */
int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 len)
{
struct inode *inode = ordered->inode;
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
struct rb_node *node;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret = 0;
u64 file_offset = ordered->file_offset;
u64 disk_bytenr = ordered->disk_bytenr;
unsigned long flags = ordered->flags & BTRFS_ORDERED_TYPE_FLAGS;
struct rb_node *node;
trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED)));
/*
* The entire bio must be covered by the ordered extent, but we can't
* reduce the original extent to a zero length either.
*/
if (WARN_ON_ONCE(len >= ordered->num_bytes))
return -EINVAL;
/* We cannot split once ordered extent is past end_bio. */
if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes))
return -EINVAL;
/* We cannot split a compressed ordered extent. */
if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes))
return -EINVAL;
/* Checksum list should be empty. */
if (WARN_ON_ONCE(!list_empty(&ordered->list)))
return -EINVAL;
spin_lock_irq(&tree->lock);
/* Remove from tree once */
node = &ordered->rb_node;
......@@ -1129,11 +1155,11 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
if (tree->last == node)
tree->last = NULL;
ordered->file_offset += pre;
ordered->disk_bytenr += pre;
ordered->num_bytes -= (pre + post);
ordered->disk_num_bytes -= (pre + post);
ordered->bytes_left -= (pre + post);
ordered->file_offset += len;
ordered->disk_bytenr += len;
ordered->num_bytes -= len;
ordered->disk_num_bytes -= len;
ordered->bytes_left -= len;
/* Re-insert the node */
node = tree_insert(&tree->tree, ordered->file_offset, &ordered->rb_node);
......@@ -1144,13 +1170,15 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
spin_unlock_irq(&tree->lock);
if (pre)
ret = clone_ordered_extent(ordered, 0, pre);
if (ret == 0 && post)
ret = clone_ordered_extent(ordered, pre + ordered->disk_num_bytes,
post);
/*
* The splitting extent is already counted and will be added again in
* btrfs_add_ordered_extent(). Subtract len to avoid double counting.
*/
percpu_counter_add_batch(&fs_info->ordered_bytes, -len, fs_info->delalloc_batch);
return ret;
return btrfs_add_ordered_extent(BTRFS_I(inode), file_offset, len, len,
disk_bytenr, len, 0, flags,
ordered->compress_type);
}
int __init ordered_data_init(void)
......
......@@ -178,9 +178,14 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode,
struct btrfs_ordered_extent **cached,
u64 file_offset, u64 io_size);
struct btrfs_ordered_extent *btrfs_alloc_ordered_extent(
struct btrfs_inode *inode, u64 file_offset,
u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
u64 disk_num_bytes, u64 offset, unsigned long flags,
int compress_type);
int btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset,
u64 num_bytes, u64 ram_bytes, u64 disk_bytenr,
u64 disk_num_bytes, u64 offset, unsigned flags,
u64 disk_num_bytes, u64 offset, unsigned long flags,
int compress_type);
void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry,
struct btrfs_ordered_sum *sum);
......@@ -207,8 +212,7 @@ void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start,
struct extent_state **cached_state);
bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end,
struct extent_state **cached_state);
int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
u64 post);
int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 len);
int __init ordered_data_init(void);
void __cold ordered_data_exit(void);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment