Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9fea2274
Commit
9fea2274
authored
Dec 16, 2023
by
Kent Overstreet
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bcachefs: for_each_member_device() now declares loop iter
Signed-off-by:
Kent Overstreet
<
kent.overstreet@linux.dev
>
parent
80eab7a7
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
157 additions
and
242 deletions
+157
-242
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_background.c
+5
-13
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_gc.c
+66
-101
fs/bcachefs/buckets.c
fs/bcachefs/buckets.c
+6
-13
fs/bcachefs/chardev.c
fs/bcachefs/chardev.c
+2
-4
fs/bcachefs/fs.c
fs/bcachefs/fs.c
+4
-7
fs/bcachefs/journal.c
fs/bcachefs/journal.c
+1
-4
fs/bcachefs/journal_io.c
fs/bcachefs/journal_io.c
+5
-8
fs/bcachefs/journal_reclaim.c
fs/bcachefs/journal_reclaim.c
+2
-6
fs/bcachefs/movinggc.c
fs/bcachefs/movinggc.c
+2
-5
fs/bcachefs/recovery.c
fs/bcachefs/recovery.c
+4
-6
fs/bcachefs/sb-clean.c
fs/bcachefs/sb-clean.c
+7
-10
fs/bcachefs/sb-members.h
fs/bcachefs/sb-members.h
+30
-26
fs/bcachefs/super-io.c
fs/bcachefs/super-io.c
+11
-16
fs/bcachefs/super.c
fs/bcachefs/super.c
+12
-23
No files found.
fs/bcachefs/alloc_background.c
View file @
9fea2274
...
...
@@ -1787,16 +1787,14 @@ static int invalidate_one_bucket(struct btree_trans *trans,
static
void
bch2_do_invalidates_work
(
struct
work_struct
*
work
)
{
struct
bch_fs
*
c
=
container_of
(
work
,
struct
bch_fs
,
invalidate_work
);
struct
bch_dev
*
ca
;
struct
btree_trans
*
trans
=
bch2_trans_get
(
c
);
unsigned
i
;
int
ret
=
0
;
ret
=
bch2_btree_write_buffer_tryflush
(
trans
);
if
(
ret
)
goto
err
;
for_each_member_device
(
c
a
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
s64
nr_to_invalidate
=
should_invalidate_buckets
(
ca
,
bch2_dev_usage_read
(
ca
));
...
...
@@ -1925,8 +1923,6 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
int
bch2_fs_freespace_init
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
int
ret
=
0
;
bool
doing_init
=
false
;
...
...
@@ -1935,7 +1931,7 @@ int bch2_fs_freespace_init(struct bch_fs *c)
* every mount:
*/
for_each_member_device
(
c
a
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
if
(
ca
->
mi
.
freespace_initialized
)
continue
;
...
...
@@ -1995,15 +1991,13 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
void
bch2_recalc_capacity
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
u64
capacity
=
0
,
reserved_sectors
=
0
,
gc_reserve
;
unsigned
bucket_size_max
=
0
;
unsigned
long
ra_pages
=
0
;
unsigned
i
;
lockdep_assert_held
(
&
c
->
state_lock
);
for_each_online_member
(
c
a
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
struct
backing_dev_info
*
bdi
=
ca
->
disk_sb
.
bdev
->
bd_disk
->
bdi
;
ra_pages
+=
bdi
->
ra_pages
;
...
...
@@ -2011,7 +2005,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
bch2_set_ra_pages
(
c
,
ra_pages
);
for_each_rw_member
(
c
a
,
c
,
i
)
{
for_each_rw_member
(
c
,
ca
)
{
u64
dev_reserve
=
0
;
/*
...
...
@@ -2067,11 +2061,9 @@ void bch2_recalc_capacity(struct bch_fs *c)
u64
bch2_min_rw_member_capacity
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
u64
ret
=
U64_MAX
;
for_each_rw_member
(
c
a
,
c
,
i
)
for_each_rw_member
(
c
,
ca
)
ret
=
min
(
ret
,
ca
->
mi
.
nbuckets
*
ca
->
mi
.
bucket_size
);
return
ret
;
}
...
...
fs/bcachefs/btree_gc.c
View file @
9fea2274
...
...
@@ -1153,13 +1153,10 @@ static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
static
void
bch2_mark_superblocks
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
mutex_lock
(
&
c
->
sb_lock
);
gc_pos_set
(
c
,
gc_phase
(
GC_PHASE_SB
));
for_each_online_member
(
c
a
,
c
,
i
)
for_each_online_member
(
c
,
ca
)
bch2_mark_dev_superblock
(
c
,
ca
,
BTREE_TRIGGER_GC
);
mutex_unlock
(
&
c
->
sb_lock
);
}
...
...
@@ -1184,13 +1181,10 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
static
void
bch2_gc_free
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
genradix_free
(
&
c
->
reflink_gc_table
);
genradix_free
(
&
c
->
gc_stripes
);
for_each_member_device
(
c
a
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
kvpfree
(
rcu_dereference_protected
(
ca
->
buckets_gc
,
1
),
sizeof
(
struct
bucket_array
)
+
ca
->
mi
.
nbuckets
*
sizeof
(
struct
bucket
));
...
...
@@ -1212,7 +1206,7 @@ static int bch2_gc_done(struct bch_fs *c,
bool
verify
=
!
metadata_only
&&
!
c
->
opts
.
reconstruct_alloc
&&
(
!
initial
||
(
c
->
sb
.
compat
&
(
1ULL
<<
BCH_COMPAT_alloc_info
)));
unsigned
i
,
dev
;
unsigned
i
;
int
ret
=
0
;
percpu_down_write
(
&
c
->
mark_lock
);
...
...
@@ -1224,14 +1218,14 @@ static int bch2_gc_done(struct bch_fs *c,
, ##__VA_ARGS__, dst->_f, src->_f))) \
dst->_f = src->_f
#define copy_dev_field(_err, _f, _msg, ...) \
copy_field(_err, _f, "dev %u has wrong " _msg,
dev
, ##__VA_ARGS__)
copy_field(_err, _f, "dev %u has wrong " _msg,
ca->dev_idx
, ##__VA_ARGS__)
#define copy_fs_field(_err, _f, _msg, ...) \
copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
c
->
usage
);
i
++
)
bch2_fs_usage_acc_to_base
(
c
,
i
);
for_each_member_device
(
ca
,
c
,
dev
)
{
__for_each_member_device
(
c
,
ca
)
{
struct
bch_dev_usage
*
dst
=
ca
->
usage_base
;
struct
bch_dev_usage
*
src
=
(
void
*
)
bch2_acc_percpu_u64s
((
u64
__percpu
*
)
ca
->
usage_gc
,
...
...
@@ -1307,9 +1301,6 @@ static int bch2_gc_done(struct bch_fs *c,
static
int
bch2_gc_start
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
=
NULL
;
unsigned
i
;
BUG_ON
(
c
->
usage_gc
);
c
->
usage_gc
=
__alloc_percpu_gfp
(
fs_usage_u64s
(
c
)
*
sizeof
(
u64
),
...
...
@@ -1319,7 +1310,7 @@ static int bch2_gc_start(struct bch_fs *c)
return
-
BCH_ERR_ENOMEM_gc_start
;
}
for_each_member_device
(
c
a
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
BUG_ON
(
ca
->
usage_gc
);
ca
->
usage_gc
=
alloc_percpu
(
struct
bch_dev_usage
);
...
...
@@ -1338,10 +1329,7 @@ static int bch2_gc_start(struct bch_fs *c)
static
int
bch2_gc_reset
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
for_each_member_device
(
ca
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
free_percpu
(
ca
->
usage_gc
);
ca
->
usage_gc
=
NULL
;
}
...
...
@@ -1379,9 +1367,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
enum
bch_data_type
type
;
int
ret
;
if
(
bkey_ge
(
iter
->
pos
,
POS
(
ca
->
dev_idx
,
ca
->
mi
.
nbuckets
)))
return
1
;
old
=
bch2_alloc_to_v4
(
k
,
&
old_convert
);
new
=
*
old
;
...
...
@@ -1478,48 +1463,36 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
static
int
bch2_gc_alloc_done
(
struct
bch_fs
*
c
,
bool
metadata_only
)
{
struct
btree_trans
*
trans
=
bch2_trans_get
(
c
);
struct
bch_dev
*
ca
;
unsigned
i
;
int
ret
=
0
;
for_each_member_device
(
c
a
,
c
,
i
)
{
ret
=
for_each_btree_key_commit
(
trans
,
iter
,
BTREE_ID_allo
c
,
POS
(
ca
->
dev_idx
,
ca
->
mi
.
first_bucket
)
,
BTREE_ITER_SLOTS
|
BTREE_ITER_PREFETCH
,
k
,
NULL
,
NULL
,
BCH_TRANS_COMMIT_lazy_rw
,
bch2_alloc_write_key
(
trans
,
&
iter
,
k
,
metadata_only
));
if
(
ret
<
0
)
{
bch_err_fn
(
c
,
ret
);
for_each_member_device
(
c
,
ca
)
{
ret
=
bch2_trans_run
(
c
,
for_each_btree_key_upto_commit
(
trans
,
iter
,
BTREE_ID_alloc
,
POS
(
ca
->
dev_idx
,
ca
->
mi
.
first_bucket
)
,
POS
(
ca
->
dev_idx
,
ca
->
mi
.
nbuckets
-
1
)
,
BTREE_ITER_SLOTS
|
BTREE_ITER_PREFETCH
,
k
,
NULL
,
NULL
,
BCH_TRANS_COMMIT_lazy_rw
,
bch2_alloc_write_key
(
trans
,
&
iter
,
k
,
metadata_only
)));
if
(
ret
)
{
percpu_ref_put
(
&
ca
->
ref
);
break
;
}
}
bch
2_trans_put
(
trans
);
return
ret
<
0
?
ret
:
0
;
bch
_err_fn
(
c
,
ret
);
return
ret
;
}
static
int
bch2_gc_alloc_start
(
struct
bch_fs
*
c
,
bool
metadata_only
)
{
struct
bch_dev
*
ca
;
struct
btree_trans
*
trans
=
bch2_trans_get
(
c
);
struct
bucket
*
g
;
struct
bch_alloc_v4
a_convert
;
const
struct
bch_alloc_v4
*
a
;
unsigned
i
;
int
ret
;
for_each_member_device
(
ca
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
struct
bucket_array
*
buckets
=
kvpmalloc
(
sizeof
(
struct
bucket_array
)
+
ca
->
mi
.
nbuckets
*
sizeof
(
struct
bucket
),
GFP_KERNEL
|
__GFP_ZERO
);
if
(
!
buckets
)
{
percpu_ref_put
(
&
ca
->
ref
);
bch_err
(
c
,
"error allocating ca->buckets[gc]"
);
ret
=
-
BCH_ERR_ENOMEM_gc_alloc_start
;
goto
err
;
return
-
BCH_ERR_ENOMEM_gc_alloc_start
;
}
buckets
->
first_bucket
=
ca
->
mi
.
first_bucket
;
...
...
@@ -1527,41 +1500,38 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
rcu_assign_pointer
(
ca
->
buckets_gc
,
buckets
);
}
ret
=
for_each_btree_key
(
trans
,
iter
,
BTREE_ID_alloc
,
POS_MIN
,
BTREE_ITER_PREFETCH
,
k
,
({
ca
=
bch_dev_bkey_exists
(
c
,
k
.
k
->
p
.
inode
);
g
=
gc_bucket
(
ca
,
k
.
k
->
p
.
offset
);
a
=
bch2_alloc_to_v4
(
k
,
&
a_convert
);
g
->
gen_valid
=
1
;
g
->
gen
=
a
->
gen
;
if
(
metadata_only
&&
(
a
->
data_type
==
BCH_DATA_user
||
a
->
data_type
==
BCH_DATA_cached
||
a
->
data_type
==
BCH_DATA_parity
))
{
g
->
data_type
=
a
->
data_type
;
g
->
dirty_sectors
=
a
->
dirty_sectors
;
g
->
cached_sectors
=
a
->
cached_sectors
;
g
->
stripe
=
a
->
stripe
;
g
->
stripe_redundancy
=
a
->
stripe_redundancy
;
}
int
ret
=
bch2_trans_run
(
c
,
for_each_btree_key
(
trans
,
iter
,
BTREE_ID_alloc
,
POS_MIN
,
BTREE_ITER_PREFETCH
,
k
,
({
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
k
.
k
->
p
.
inode
);
struct
bucket
*
g
=
gc_bucket
(
ca
,
k
.
k
->
p
.
offset
);
0
;
}));
err:
bch2_trans_put
(
trans
);
struct
bch_alloc_v4
a_convert
;
const
struct
bch_alloc_v4
*
a
=
bch2_alloc_to_v4
(
k
,
&
a_convert
);
g
->
gen_valid
=
1
;
g
->
gen
=
a
->
gen
;
if
(
metadata_only
&&
(
a
->
data_type
==
BCH_DATA_user
||
a
->
data_type
==
BCH_DATA_cached
||
a
->
data_type
==
BCH_DATA_parity
))
{
g
->
data_type
=
a
->
data_type
;
g
->
dirty_sectors
=
a
->
dirty_sectors
;
g
->
cached_sectors
=
a
->
cached_sectors
;
g
->
stripe
=
a
->
stripe
;
g
->
stripe_redundancy
=
a
->
stripe_redundancy
;
}
0
;
})));
bch_err_fn
(
c
,
ret
);
return
ret
;
}
static
void
bch2_gc_alloc_reset
(
struct
bch_fs
*
c
,
bool
metadata_only
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
for_each_member_device
(
ca
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
struct
bucket_array
*
buckets
=
gc_bucket_array
(
ca
);
struct
bucket
*
g
;
...
...
@@ -1932,10 +1902,7 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
int
bch2_gc_gens
(
struct
bch_fs
*
c
)
{
struct
btree_trans
*
trans
;
struct
bch_dev
*
ca
;
u64
b
,
start_time
=
local_clock
();
unsigned
i
;
int
ret
;
/*
...
...
@@ -1948,9 +1915,8 @@ int bch2_gc_gens(struct bch_fs *c)
trace_and_count
(
c
,
gc_gens_start
,
c
);
down_read
(
&
c
->
gc_lock
);
trans
=
bch2_trans_get
(
c
);
for_each_member_device
(
c
a
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
struct
bucket_gens
*
gens
=
bucket_gens
(
ca
);
BUG_ON
(
ca
->
oldest_gen
);
...
...
@@ -1967,33 +1933,31 @@ int bch2_gc_gens(struct bch_fs *c)
ca
->
oldest_gen
[
b
]
=
gens
->
b
[
b
];
}
for
(
i
=
0
;
i
<
BTREE_ID_NR
;
i
++
)
for
(
unsigned
i
=
0
;
i
<
BTREE_ID_NR
;
i
++
)
if
(
btree_type_has_ptrs
(
i
))
{
c
->
gc_gens_btree
=
i
;
c
->
gc_gens_pos
=
POS_MIN
;
ret
=
for_each_btree_key_commit
(
trans
,
iter
,
i
,
POS_MIN
,
BTREE_ITER_PREFETCH
|
BTREE_ITER_ALL_SNAPSHOTS
,
k
,
NULL
,
NULL
,
BCH_TRANS_COMMIT_no_enospc
,
gc_btree_gens_key
(
trans
,
&
iter
,
k
));
if
(
!
bch2_err_matches
(
ret
,
EROFS
))
bch_err_fn
(
c
,
ret
);
ret
=
bch2_trans_run
(
c
,
for_each_btree_key_commit
(
trans
,
iter
,
i
,
POS_MIN
,
BTREE_ITER_PREFETCH
|
BTREE_ITER_ALL_SNAPSHOTS
,
k
,
NULL
,
NULL
,
BCH_TRANS_COMMIT_no_enospc
,
gc_btree_gens_key
(
trans
,
&
iter
,
k
)));
if
(
ret
)
goto
err
;
}
ret
=
for_each_btree_key_commit
(
trans
,
iter
,
BTREE_ID_alloc
,
POS_MIN
,
BTREE_ITER_PREFETCH
,
k
,
NULL
,
NULL
,
BCH_TRANS_COMMIT_no_enospc
,
bch2_alloc_write_oldest_gen
(
trans
,
&
iter
,
k
));
if
(
!
bch2_err_matches
(
ret
,
EROFS
))
bch_err_fn
(
c
,
ret
);
ret
=
bch2_trans_run
(
c
,
for_each_btree_key_commit
(
trans
,
iter
,
BTREE_ID_alloc
,
POS_MIN
,
BTREE_ITER_PREFETCH
,
k
,
NULL
,
NULL
,
BCH_TRANS_COMMIT_no_enospc
,
bch2_alloc_write_oldest_gen
(
trans
,
&
iter
,
k
)));
if
(
ret
)
goto
err
;
...
...
@@ -2005,14 +1969,15 @@ int bch2_gc_gens(struct bch_fs *c)
bch2_time_stats_update
(
&
c
->
times
[
BCH_TIME_btree_gc
],
start_time
);
trace_and_count
(
c
,
gc_gens_end
,
c
);
err:
for_each_member_device
(
c
a
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
kvfree
(
ca
->
oldest_gen
);
ca
->
oldest_gen
=
NULL
;
}
bch2_trans_put
(
trans
);
up_read
(
&
c
->
gc_lock
);
mutex_unlock
(
&
c
->
gc_gens_lock
);
if
(
!
bch2_err_matches
(
ret
,
EROFS
))
bch_err_fn
(
c
,
ret
);
return
ret
;
}
...
...
fs/bcachefs/buckets.c
View file @
9fea2274
...
...
@@ -47,27 +47,23 @@ static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
void
bch2_fs_usage_initialize
(
struct
bch_fs
*
c
)
{
struct
bch_fs_usage
*
usage
;
struct
bch_dev
*
ca
;
unsigned
i
;
percpu_down_write
(
&
c
->
mark_lock
);
usage
=
c
->
usage_base
;
struct
bch_fs_usage
*
usage
=
c
->
usage_base
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
c
->
usage
);
i
++
)
for
(
unsigned
i
=
0
;
i
<
ARRAY_SIZE
(
c
->
usage
);
i
++
)
bch2_fs_usage_acc_to_base
(
c
,
i
);
for
(
i
=
0
;
i
<
BCH_REPLICAS_MAX
;
i
++
)
for
(
unsigned
i
=
0
;
i
<
BCH_REPLICAS_MAX
;
i
++
)
usage
->
reserved
+=
usage
->
persistent_reserved
[
i
];
for
(
i
=
0
;
i
<
c
->
replicas
.
nr
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
c
->
replicas
.
nr
;
i
++
)
{
struct
bch_replicas_entry_v1
*
e
=
cpu_replicas_entry
(
&
c
->
replicas
,
i
);
fs_usage_data_type_to_base
(
usage
,
e
->
data_type
,
usage
->
replicas
[
i
]);
}
for_each_member_device
(
c
a
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
struct
bch_dev_usage
dev
=
bch2_dev_usage_read
(
ca
);
usage
->
hidden
+=
(
dev
.
d
[
BCH_DATA_sb
].
buckets
+
...
...
@@ -1766,10 +1762,7 @@ int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
int
bch2_trans_mark_dev_sbs
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
for_each_online_member
(
ca
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
int
ret
=
bch2_trans_mark_dev_sb
(
c
,
ca
);
if
(
ret
)
{
percpu_ref_put
(
&
ca
->
ref
);
...
...
fs/bcachefs/chardev.c
View file @
9fea2274
...
...
@@ -866,8 +866,6 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
struct
bch_ioctl_disk_get_idx
arg
)
{
dev_t
dev
=
huge_decode_dev
(
arg
.
dev
);
struct
bch_dev
*
ca
;
unsigned
i
;
if
(
!
capable
(
CAP_SYS_ADMIN
))
return
-
EPERM
;
...
...
@@ -875,10 +873,10 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
if
(
!
dev
)
return
-
EINVAL
;
for_each_online_member
(
c
a
,
c
,
i
)
for_each_online_member
(
c
,
ca
)
if
(
ca
->
dev
==
dev
)
{
percpu_ref_put
(
&
ca
->
io_ref
);
return
i
;
return
ca
->
dev_idx
;
}
return
-
BCH_ERR_ENOENT_dev_idx_not_found
;
...
...
fs/bcachefs/fs.c
View file @
9fea2274
...
...
@@ -1694,11 +1694,9 @@ static int bch2_remount(struct super_block *sb, int *flags, char *data)
static
int
bch2_show_devname
(
struct
seq_file
*
seq
,
struct
dentry
*
root
)
{
struct
bch_fs
*
c
=
root
->
d_sb
->
s_fs_info
;
struct
bch_dev
*
ca
;
unsigned
i
;
bool
first
=
true
;
for_each_online_member
(
c
a
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
if
(
!
first
)
seq_putc
(
seq
,
':'
);
first
=
false
;
...
...
@@ -1822,13 +1820,12 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
int
flags
,
const
char
*
dev_name
,
void
*
data
)
{
struct
bch_fs
*
c
;
struct
bch_dev
*
ca
;
struct
super_block
*
sb
;
struct
inode
*
vinode
;
struct
bch_opts
opts
=
bch2_opts_empty
();
char
**
devs
;
struct
bch_fs
**
devs_to_fs
=
NULL
;
unsigned
i
,
nr_devs
;
unsigned
nr_devs
;
int
ret
;
opt_set
(
opts
,
read_only
,
(
flags
&
SB_RDONLY
)
!=
0
);
...
...
@@ -1850,7 +1847,7 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
goto
got_sb
;
}
for
(
i
=
0
;
i
<
nr_devs
;
i
++
)
for
(
unsigned
i
=
0
;
i
<
nr_devs
;
i
++
)
devs_to_fs
[
i
]
=
bch2_path_to_fs
(
devs
[
i
]);
sb
=
sget
(
fs_type
,
bch2_test_super
,
bch2_noset_super
,
...
...
@@ -1921,7 +1918,7 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
sb
->
s_bdi
->
ra_pages
=
VM_READAHEAD_PAGES
;
for_each_online_member
(
c
a
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
struct
block_device
*
bdev
=
ca
->
disk_sb
.
bdev
;
/* XXX: create an anonymous device for multi device filesystems */
...
...
fs/bcachefs/journal.c
View file @
9fea2274
...
...
@@ -1029,10 +1029,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
int
bch2_fs_journal_alloc
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
for_each_online_member
(
ca
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
if
(
ca
->
journal
.
nr
)
continue
;
...
...
fs/bcachefs/journal_io.c
View file @
9fea2274
...
...
@@ -1171,8 +1171,6 @@ int bch2_journal_read(struct bch_fs *c,
struct
journal_list
jlist
;
struct
journal_replay
*
i
,
**
_i
,
*
prev
=
NULL
;
struct
genradix_iter
radix_iter
;
struct
bch_dev
*
ca
;
unsigned
iter
;
struct
printbuf
buf
=
PRINTBUF
;
bool
degraded
=
false
,
last_write_torn
=
false
;
u64
seq
;
...
...
@@ -1183,7 +1181,7 @@ int bch2_journal_read(struct bch_fs *c,
jlist
.
last_seq
=
0
;
jlist
.
ret
=
0
;
for_each_member_device
(
c
a
,
c
,
iter
)
{
for_each_member_device
(
c
,
ca
)
{
if
(
!
c
->
opts
.
fsck
&&
!
(
bch2_dev_has_data
(
c
,
ca
)
&
(
1
<<
BCH_DATA_journal
)))
continue
;
...
...
@@ -1349,7 +1347,7 @@ int bch2_journal_read(struct bch_fs *c,
continue
;
for
(
ptr
=
0
;
ptr
<
i
->
nr_ptrs
;
ptr
++
)
{
ca
=
bch_dev_bkey_exists
(
c
,
i
->
ptrs
[
ptr
].
dev
);
struct
bch_dev
*
ca
=
bch_dev_bkey_exists
(
c
,
i
->
ptrs
[
ptr
].
dev
);
if
(
!
i
->
ptrs
[
ptr
].
csum_good
)
bch_err_dev_offset
(
ca
,
i
->
ptrs
[
ptr
].
sector
,
...
...
@@ -1893,12 +1891,11 @@ CLOSURE_CALLBACK(bch2_journal_write)
{
closure_type
(
j
,
struct
journal
,
io
);
struct
bch_fs
*
c
=
container_of
(
j
,
struct
bch_fs
,
journal
);
struct
bch_dev
*
ca
;
struct
journal_buf
*
w
=
journal_last_unwritten_buf
(
j
);
struct
bch_replicas_padded
replicas
;
struct
bio
*
bio
;
struct
printbuf
journal_debug_buf
=
PRINTBUF
;
unsigned
i
,
nr_rw_members
=
0
;
unsigned
nr_rw_members
=
0
;
int
ret
;
BUG_ON
(
BCH_SB_CLEAN
(
c
->
disk_sb
.
sb
));
...
...
@@ -1958,7 +1955,7 @@ CLOSURE_CALLBACK(bch2_journal_write)
if
(
c
->
opts
.
nochanges
)
goto
no_io
;
for_each_rw_member
(
c
a
,
c
,
i
)
for_each_rw_member
(
c
,
ca
)
nr_rw_members
++
;
if
(
nr_rw_members
>
1
)
...
...
@@ -1975,7 +1972,7 @@ CLOSURE_CALLBACK(bch2_journal_write)
goto
err
;
if
(
!
JSET_NO_FLUSH
(
w
->
data
)
&&
w
->
separate_flush
)
{
for_each_rw_member
(
c
a
,
c
,
i
)
{
for_each_rw_member
(
c
,
ca
)
{
percpu_ref_get
(
&
ca
->
io_ref
);
bio
=
ca
->
journal
.
bio
;
...
...
fs/bcachefs/journal_reclaim.c
View file @
9fea2274
...
...
@@ -263,12 +263,10 @@ static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
void
bch2_journal_do_discards
(
struct
journal
*
j
)
{
struct
bch_fs
*
c
=
container_of
(
j
,
struct
bch_fs
,
journal
);
struct
bch_dev
*
ca
;
unsigned
iter
;
mutex_lock
(
&
j
->
discard_lock
);
for_each_rw_member
(
c
a
,
c
,
iter
)
{
for_each_rw_member
(
c
,
ca
)
{
struct
journal_device
*
ja
=
&
ca
->
journal
;
while
(
should_discard_bucket
(
j
,
ja
))
{
...
...
@@ -583,13 +581,11 @@ static size_t journal_flush_pins(struct journal *j,
static
u64
journal_seq_to_flush
(
struct
journal
*
j
)
{
struct
bch_fs
*
c
=
container_of
(
j
,
struct
bch_fs
,
journal
);
struct
bch_dev
*
ca
;
u64
seq_to_flush
=
0
;
unsigned
iter
;
spin_lock
(
&
j
->
lock
);
for_each_rw_member
(
c
a
,
c
,
iter
)
{
for_each_rw_member
(
c
,
ca
)
{
struct
journal_device
*
ja
=
&
ca
->
journal
;
unsigned
nr_buckets
,
bucket_to_flush
;
...
...
fs/bcachefs/movinggc.c
View file @
9fea2274
...
...
@@ -267,19 +267,16 @@ static int bch2_copygc(struct moving_context *ctxt,
*/
unsigned
long
bch2_copygc_wait_amount
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
dev_idx
;
s64
wait
=
S64_MAX
,
fragmented_allowed
,
fragmented
;
unsigned
i
;
for_each_rw_member
(
c
a
,
c
,
dev_idx
)
{
for_each_rw_member
(
c
,
ca
)
{
struct
bch_dev_usage
usage
=
bch2_dev_usage_read
(
ca
);
fragmented_allowed
=
((
__dev_buckets_available
(
ca
,
usage
,
BCH_WATERMARK_stripe
)
*
ca
->
mi
.
bucket_size
)
>>
1
);
fragmented
=
0
;
for
(
i
=
0
;
i
<
BCH_DATA_NR
;
i
++
)
for
(
unsigned
i
=
0
;
i
<
BCH_DATA_NR
;
i
++
)
if
(
data_type_movable
(
i
))
fragmented
+=
usage
.
d
[
i
].
fragmented
;
...
...
fs/bcachefs/recovery.c
View file @
9fea2274
...
...
@@ -1097,8 +1097,6 @@ int bch2_fs_initialize(struct bch_fs *c)
struct
bch_inode_unpacked
root_inode
,
lostfound_inode
;
struct
bkey_inode_buf
packed_inode
;
struct
qstr
lostfound
=
QSTR
(
"lost+found"
);
struct
bch_dev
*
ca
;
unsigned
i
;
int
ret
;
bch_notice
(
c
,
"initializing new filesystem"
);
...
...
@@ -1120,10 +1118,10 @@ int bch2_fs_initialize(struct bch_fs *c)
set_bit
(
BCH_FS_may_go_rw
,
&
c
->
flags
);
set_bit
(
BCH_FS_fsck_done
,
&
c
->
flags
);
for
(
i
=
0
;
i
<
BTREE_ID_NR
;
i
++
)
for
(
unsigned
i
=
0
;
i
<
BTREE_ID_NR
;
i
++
)
bch2_btree_root_alloc
(
c
,
i
);
for_each_member_device
(
c
a
,
c
,
i
)
for_each_member_device
(
c
,
ca
)
bch2_dev_usage_init
(
ca
);
ret
=
bch2_fs_journal_alloc
(
c
);
...
...
@@ -1151,7 +1149,7 @@ int bch2_fs_initialize(struct bch_fs *c)
if
(
ret
)
goto
err
;
for_each_online_member
(
c
a
,
c
,
i
)
for_each_online_member
(
c
,
ca
)
ca
->
new_fs_bucket_idx
=
0
;
ret
=
bch2_fs_freespace_init
(
c
);
...
...
@@ -1214,6 +1212,6 @@ int bch2_fs_initialize(struct bch_fs *c)
return
0
;
err:
bch_err_fn
(
c
a
,
ret
);
bch_err_fn
(
c
,
ret
);
return
ret
;
}
fs/bcachefs/sb-clean.c
View file @
9fea2274
...
...
@@ -191,13 +191,10 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
struct
jset_entry
**
end
,
u64
journal_seq
)
{
struct
bch_dev
*
ca
;
unsigned
i
,
dev
;
percpu_down_read
(
&
c
->
mark_lock
);
if
(
!
journal_seq
)
{
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
c
->
usage
);
i
++
)
for
(
unsigned
i
=
0
;
i
<
ARRAY_SIZE
(
c
->
usage
);
i
++
)
bch2_fs_usage_acc_to_base
(
c
,
i
);
}
else
{
bch2_fs_usage_acc_to_base
(
c
,
journal_seq
&
JOURNAL_BUF_MASK
);
...
...
@@ -223,7 +220,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
u
->
v
=
cpu_to_le64
(
atomic64_read
(
&
c
->
key_version
));
}
for
(
i
=
0
;
i
<
BCH_REPLICAS_MAX
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
BCH_REPLICAS_MAX
;
i
++
)
{
struct
jset_entry_usage
*
u
=
container_of
(
jset_entry_init
(
end
,
sizeof
(
*
u
)),
struct
jset_entry_usage
,
entry
);
...
...
@@ -234,7 +231,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
u
->
v
=
cpu_to_le64
(
c
->
usage_base
->
persistent_reserved
[
i
]);
}
for
(
i
=
0
;
i
<
c
->
replicas
.
nr
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
c
->
replicas
.
nr
;
i
++
)
{
struct
bch_replicas_entry_v1
*
e
=
cpu_replicas_entry
(
&
c
->
replicas
,
i
);
struct
jset_entry_data_usage
*
u
=
...
...
@@ -247,7 +244,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
"embedded variable length struct"
);
}
for_each_member_device
(
c
a
,
c
,
dev
)
{
for_each_member_device
(
c
,
ca
)
{
unsigned
b
=
sizeof
(
struct
jset_entry_dev_usage
)
+
sizeof
(
struct
jset_entry_dev_usage_type
)
*
BCH_DATA_NR
;
struct
jset_entry_dev_usage
*
u
=
...
...
@@ -255,9 +252,9 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
struct
jset_entry_dev_usage
,
entry
);
u
->
entry
.
type
=
BCH_JSET_ENTRY_dev_usage
;
u
->
dev
=
cpu_to_le32
(
dev
);
u
->
dev
=
cpu_to_le32
(
ca
->
dev_idx
);
for
(
i
=
0
;
i
<
BCH_DATA_NR
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
BCH_DATA_NR
;
i
++
)
{
u
->
d
[
i
].
buckets
=
cpu_to_le64
(
ca
->
usage_base
->
d
[
i
].
buckets
);
u
->
d
[
i
].
sectors
=
cpu_to_le64
(
ca
->
usage_base
->
d
[
i
].
sectors
);
u
->
d
[
i
].
fragmented
=
cpu_to_le64
(
ca
->
usage_base
->
d
[
i
].
fragmented
);
...
...
@@ -266,7 +263,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
percpu_up_read
(
&
c
->
mark_lock
);
for
(
i
=
0
;
i
<
2
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
2
;
i
++
)
{
struct
jset_entry_clock
*
clock
=
container_of
(
jset_entry_init
(
end
,
sizeof
(
*
clock
)),
struct
jset_entry_clock
,
entry
);
...
...
fs/bcachefs/sb-members.h
View file @
9fea2274
...
...
@@ -97,12 +97,15 @@ static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
#define for_each_member_device_rcu(ca, c, iter, mask) \
for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
static
inline
struct
bch_dev
*
bch2_get_next_dev
(
struct
bch_fs
*
c
,
unsigned
*
iter
)
static
inline
struct
bch_dev
*
bch2_get_next_dev
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
)
{
struct
bch_dev
*
ca
;
unsigned
idx
=
ca
?
ca
->
dev_idx
+
1
:
0
;
if
(
ca
)
percpu_ref_put
(
&
ca
->
ref
);
rcu_read_lock
();
if
((
ca
=
__bch2_next_dev
(
c
,
iter
,
NULL
)))
if
((
ca
=
__bch2_next_dev
(
c
,
&
idx
,
NULL
)))
percpu_ref_get
(
&
ca
->
ref
);
rcu_read_unlock
();
...
...
@@ -112,41 +115,44 @@ static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter
/*
* If you break early, you must drop your ref on the current device
*/
#define for_each_member_device(ca, c, iter) \
for ((iter) = 0; \
(ca = bch2_get_next_dev(c, &(iter))); \
percpu_ref_put(&ca->ref), (iter)++)
#define __for_each_member_device(_c, _ca) \
for (; (_ca = bch2_get_next_dev(_c, _ca));)
#define for_each_member_device(_c, _ca) \
for (struct bch_dev *_ca = NULL; \
(_ca = bch2_get_next_dev(_c, _ca));)
static
inline
struct
bch_dev
*
bch2_get_next_online_dev
(
struct
bch_fs
*
c
,
unsigned
*
iter
,
int
state_mask
)
struct
bch_dev
*
ca
,
unsigned
state_mask
)
{
struct
bch_dev
*
ca
;
unsigned
idx
=
ca
?
ca
->
dev_idx
+
1
:
0
;
if
(
ca
)
percpu_ref_put
(
&
ca
->
io_ref
);
rcu_read_lock
();
while
((
ca
=
__bch2_next_dev
(
c
,
iter
,
NULL
))
&&
while
((
ca
=
__bch2_next_dev
(
c
,
&
idx
,
NULL
))
&&
(
!
((
1
<<
ca
->
mi
.
state
)
&
state_mask
)
||
!
percpu_ref_tryget
(
&
ca
->
io_ref
)))
(
*
iter
)
++
;
idx
++
;
rcu_read_unlock
();
return
ca
;
}
#define __for_each_online_member(ca, c, iter, state_mask) \
for ((iter) = 0; \
(ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
percpu_ref_put(&ca->io_ref), (iter)++)
#define __for_each_online_member(_c, _ca, state_mask) \
for (struct bch_dev *_ca = NULL; \
(_ca = bch2_get_next_online_dev(_c, _ca, state_mask));)
#define for_each_online_member(c
a, c, iter)
\
__for_each_online_member(c
a, c, iter
, ~0)
#define for_each_online_member(c
, ca)
\
__for_each_online_member(c
, ca
, ~0)
#define for_each_rw_member(c
a, c, iter
) \
__for_each_online_member(c
a, c, iter, 1 << BCH_MEMBER_STATE_rw
)
#define for_each_rw_member(c
, ca
) \
__for_each_online_member(c
, ca, BIT(BCH_MEMBER_STATE_rw)
)
#define for_each_readable_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, \
(1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
#define for_each_readable_member(c, ca) \
__for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
/*
* If a key exists that references a device, the device won't be going away and
...
...
@@ -172,11 +178,9 @@ static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
static
inline
struct
bch_devs_mask
bch2_online_devs
(
struct
bch_fs
*
c
)
{
struct
bch_devs_mask
devs
;
struct
bch_dev
*
ca
;
unsigned
i
;
memset
(
&
devs
,
0
,
sizeof
(
devs
));
for_each_online_member
(
c
a
,
c
,
i
)
for_each_online_member
(
c
,
ca
)
__set_bit
(
ca
->
dev_idx
,
devs
.
d
);
return
devs
;
}
...
...
fs/bcachefs/super-io.c
View file @
9fea2274
...
...
@@ -241,14 +241,12 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
if
(
sb
->
fs_sb
)
{
struct
bch_fs
*
c
=
container_of
(
sb
,
struct
bch_fs
,
disk_sb
);
struct
bch_dev
*
ca
;
unsigned
i
;
lockdep_assert_held
(
&
c
->
sb_lock
);
/* XXX: we're not checking that offline device have enough space */
for_each_online_member
(
c
a
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
struct
bch_sb_handle
*
dev_sb
=
&
ca
->
disk_sb
;
if
(
bch2_sb_realloc
(
dev_sb
,
le32_to_cpu
(
dev_sb
->
sb
->
u64s
)
+
d
))
{
...
...
@@ -514,8 +512,6 @@ static void le_bitvector_to_cpu(unsigned long *dst, unsigned long *src, unsigned
static
void
bch2_sb_update
(
struct
bch_fs
*
c
)
{
struct
bch_sb
*
src
=
c
->
disk_sb
.
sb
;
struct
bch_dev
*
ca
;
unsigned
i
;
lockdep_assert_held
(
&
c
->
sb_lock
);
...
...
@@ -546,7 +542,7 @@ static void bch2_sb_update(struct bch_fs *c)
le_bitvector_to_cpu
(
c
->
sb
.
errors_silent
,
(
void
*
)
ext
->
errors_silent
,
sizeof
(
c
->
sb
.
errors_silent
)
*
8
);
for_each_member_device
(
c
a
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
struct
bch_member
m
=
bch2_sb_member_get
(
src
,
ca
->
dev_idx
);
ca
->
mi
=
bch2_mi_to_cpu
(
&
m
);
}
...
...
@@ -926,9 +922,8 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
int
bch2_write_super
(
struct
bch_fs
*
c
)
{
struct
closure
*
cl
=
&
c
->
sb_write
;
struct
bch_dev
*
ca
;
struct
printbuf
err
=
PRINTBUF
;
unsigned
i
,
sb
=
0
,
nr_wrote
;
unsigned
sb
=
0
,
nr_wrote
;
struct
bch_devs_mask
sb_written
;
bool
wrote
,
can_mount_without_written
,
can_mount_with_written
;
unsigned
degraded_flags
=
BCH_FORCE_IF_DEGRADED
;
...
...
@@ -963,10 +958,10 @@ int bch2_write_super(struct bch_fs *c)
bch2_sb_errors_from_cpu
(
c
);
bch2_sb_downgrade_update
(
c
);
for_each_online_member
(
c
a
,
c
,
i
)
for_each_online_member
(
c
,
ca
)
bch2_sb_from_fs
(
c
,
ca
);
for_each_online_member
(
c
a
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
printbuf_reset
(
&
err
);
ret
=
bch2_sb_validate
(
&
ca
->
disk_sb
,
&
err
,
WRITE
);
...
...
@@ -999,16 +994,16 @@ int bch2_write_super(struct bch_fs *c)
return
-
BCH_ERR_sb_not_downgraded
;
}
for_each_online_member
(
c
a
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
__set_bit
(
ca
->
dev_idx
,
sb_written
.
d
);
ca
->
sb_write_error
=
0
;
}
for_each_online_member
(
c
a
,
c
,
i
)
for_each_online_member
(
c
,
ca
)
read_back_super
(
c
,
ca
);
closure_sync
(
cl
);
for_each_online_member
(
c
a
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
if
(
ca
->
sb_write_error
)
continue
;
...
...
@@ -1035,7 +1030,7 @@ int bch2_write_super(struct bch_fs *c)
do
{
wrote
=
false
;
for_each_online_member
(
c
a
,
c
,
i
)
for_each_online_member
(
c
,
ca
)
if
(
!
ca
->
sb_write_error
&&
sb
<
ca
->
disk_sb
.
sb
->
layout
.
nr_superblocks
)
{
write_one_super
(
c
,
ca
,
sb
);
...
...
@@ -1045,7 +1040,7 @@ int bch2_write_super(struct bch_fs *c)
sb
++
;
}
while
(
wrote
);
for_each_online_member
(
c
a
,
c
,
i
)
{
for_each_online_member
(
c
,
ca
)
{
if
(
ca
->
sb_write_error
)
__clear_bit
(
ca
->
dev_idx
,
sb_written
.
d
);
else
...
...
@@ -1057,7 +1052,7 @@ int bch2_write_super(struct bch_fs *c)
can_mount_with_written
=
bch2_have_enough_devs
(
c
,
sb_written
,
degraded_flags
,
false
);
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
sb_written
.
d
);
i
++
)
for
(
unsigned
i
=
0
;
i
<
ARRAY_SIZE
(
sb_written
.
d
);
i
++
)
sb_written
.
d
[
i
]
=
~
sb_written
.
d
[
i
];
can_mount_without_written
=
...
...
fs/bcachefs/super.c
View file @
9fea2274
...
...
@@ -249,8 +249,7 @@ static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
static
void
__bch2_fs_read_only
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
,
clean_passes
=
0
;
unsigned
clean_passes
=
0
;
u64
seq
=
0
;
bch2_fs_ec_stop
(
c
);
...
...
@@ -286,7 +285,7 @@ static void __bch2_fs_read_only(struct bch_fs *c)
/*
* After stopping journal:
*/
for_each_member_device
(
c
a
,
c
,
i
)
for_each_member_device
(
c
,
ca
)
bch2_dev_allocator_remove
(
c
,
ca
);
}
...
...
@@ -427,8 +426,6 @@ static int bch2_fs_read_write_late(struct bch_fs *c)
static
int
__bch2_fs_read_write
(
struct
bch_fs
*
c
,
bool
early
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
int
ret
;
if
(
test_bit
(
BCH_FS_initial_gc_unfixed
,
&
c
->
flags
))
{
...
...
@@ -469,7 +466,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
*/
set_bit
(
JOURNAL_NEED_FLUSH_WRITE
,
&
c
->
journal
.
flags
);
for_each_rw_member
(
c
a
,
c
,
i
)
for_each_rw_member
(
c
,
ca
)
bch2_dev_allocator_add
(
c
,
ca
);
bch2_recalc_capacity
(
c
);
...
...
@@ -479,7 +476,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
#ifndef BCH_WRITE_REF_DEBUG
percpu_ref_reinit
(
&
c
->
writes
);
#else
for
(
i
=
0
;
i
<
BCH_WRITE_REF_NR
;
i
++
)
{
for
(
unsigned
i
=
0
;
i
<
BCH_WRITE_REF_NR
;
i
++
)
{
BUG_ON
(
atomic_long_read
(
&
c
->
writes
[
i
]));
atomic_long_inc
(
&
c
->
writes
[
i
]);
}
...
...
@@ -602,9 +599,6 @@ static void bch2_fs_release(struct kobject *kobj)
void
__bch2_fs_stop
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
bch_verbose
(
c
,
"shutting down"
);
set_bit
(
BCH_FS_stopping
,
&
c
->
flags
);
...
...
@@ -615,7 +609,7 @@ void __bch2_fs_stop(struct bch_fs *c)
bch2_fs_read_only
(
c
);
up_write
(
&
c
->
state_lock
);
for_each_member_device
(
c
a
,
c
,
i
)
for_each_member_device
(
c
,
ca
)
if
(
ca
->
kobj
.
state_in_sysfs
&&
ca
->
disk_sb
.
bdev
)
sysfs_remove_link
(
bdev_kobj
(
ca
->
disk_sb
.
bdev
),
"bcachefs"
);
...
...
@@ -637,7 +631,7 @@ void __bch2_fs_stop(struct bch_fs *c)
/* btree prefetch might have kicked off reads in the background: */
bch2_btree_flush_all_reads
(
c
);
for_each_member_device
(
c
a
,
c
,
i
)
for_each_member_device
(
c
,
ca
)
cancel_work_sync
(
&
ca
->
io_error_work
);
cancel_work_sync
(
&
c
->
read_only_work
);
...
...
@@ -676,8 +670,6 @@ void bch2_fs_stop(struct bch_fs *c)
static
int
bch2_fs_online
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
unsigned
i
;
int
ret
=
0
;
lockdep_assert_held
(
&
bch_fs_list_lock
);
...
...
@@ -710,7 +702,7 @@ static int bch2_fs_online(struct bch_fs *c)
down_write
(
&
c
->
state_lock
);
for_each_member_device
(
c
a
,
c
,
i
)
{
for_each_member_device
(
c
,
ca
)
{
ret
=
bch2_dev_sysfs_online
(
c
,
ca
);
if
(
ret
)
{
bch_err
(
c
,
"error creating sysfs objects"
);
...
...
@@ -1000,9 +992,7 @@ static void print_mount_opts(struct bch_fs *c)
int
bch2_fs_start
(
struct
bch_fs
*
c
)
{
struct
bch_dev
*
ca
;
time64_t
now
=
ktime_get_real_seconds
();
unsigned
i
;
int
ret
;
print_mount_opts
(
c
);
...
...
@@ -1019,12 +1009,12 @@ int bch2_fs_start(struct bch_fs *c)
goto
err
;
}
for_each_online_member
(
c
a
,
c
,
i
)
bch2_members_v2_get_mut
(
c
->
disk_sb
.
sb
,
i
)
->
last_mount
=
cpu_to_le64
(
now
);
for_each_online_member
(
c
,
ca
)
bch2_members_v2_get_mut
(
c
->
disk_sb
.
sb
,
ca
->
dev_idx
)
->
last_mount
=
cpu_to_le64
(
now
);
mutex_unlock
(
&
c
->
sb_lock
);
for_each_rw_member
(
c
a
,
c
,
i
)
for_each_rw_member
(
c
,
ca
)
bch2_dev_allocator_add
(
c
,
ca
);
bch2_recalc_capacity
(
c
);
...
...
@@ -1362,8 +1352,7 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
enum
bch_member_state
new_state
,
int
flags
)
{
struct
bch_devs_mask
new_online_devs
;
struct
bch_dev
*
ca2
;
int
i
,
nr_rw
=
0
,
required
;
int
nr_rw
=
0
,
required
;
lockdep_assert_held
(
&
c
->
state_lock
);
...
...
@@ -1375,7 +1364,7 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
return
true
;
/* do we have enough devices to write to? */
for_each_member_device
(
c
a2
,
c
,
i
)
for_each_member_device
(
c
,
ca2
)
if
(
ca2
!=
ca
)
nr_rw
+=
ca2
->
mi
.
state
==
BCH_MEMBER_STATE_rw
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment