Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
1e81f89b
Commit
1e81f89b
authored
Aug 07, 2023
by
Kent Overstreet
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
bcachefs: Fix assorted checkpatch nits
Signed-off-by:
Kent Overstreet
<
kent.overstreet@linux.dev
>
parent
6fe893ea
Changes
22
Hide whitespace changes
Inline
Side-by-side
Showing
22 changed files
with
55 additions
and
39 deletions
+55
-39
fs/bcachefs/alloc_types.h
fs/bcachefs/alloc_types.h
+2
-2
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_gc.c
+1
-1
fs/bcachefs/btree_io.h
fs/bcachefs/btree_io.h
+2
-2
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.c
+4
-4
fs/bcachefs/btree_update.h
fs/bcachefs/btree_update.h
+2
-2
fs/bcachefs/buckets.c
fs/bcachefs/buckets.c
+1
-0
fs/bcachefs/chardev.h
fs/bcachefs/chardev.h
+1
-1
fs/bcachefs/checksum.c
fs/bcachefs/checksum.c
+7
-5
fs/bcachefs/compress.c
fs/bcachefs/compress.c
+2
-1
fs/bcachefs/extents.c
fs/bcachefs/extents.c
+6
-6
fs/bcachefs/fs-io.c
fs/bcachefs/fs-io.c
+1
-1
fs/bcachefs/io.c
fs/bcachefs/io.c
+1
-0
fs/bcachefs/io.h
fs/bcachefs/io.h
+1
-1
fs/bcachefs/journal.c
fs/bcachefs/journal.c
+2
-2
fs/bcachefs/journal_io.c
fs/bcachefs/journal_io.c
+1
-1
fs/bcachefs/journal_reclaim.c
fs/bcachefs/journal_reclaim.c
+1
-1
fs/bcachefs/recovery.c
fs/bcachefs/recovery.c
+2
-2
fs/bcachefs/super-io.c
fs/bcachefs/super-io.c
+3
-1
fs/bcachefs/super-io.h
fs/bcachefs/super-io.h
+1
-0
fs/bcachefs/util.c
fs/bcachefs/util.c
+7
-4
fs/bcachefs/util.h
fs/bcachefs/util.h
+6
-2
fs/bcachefs/varint.c
fs/bcachefs/varint.c
+1
-0
No files found.
fs/bcachefs/alloc_types.h
View file @
1e81f89b
...
...
@@ -105,7 +105,7 @@ struct write_point {
struct
dev_stripe_state
stripe
;
u64
sectors_allocated
;
}
__a
ttribute__
((
__aligned__
(
SMP_CACHE_BYTES
))
);
}
__a
ligned
(
SMP_CACHE_BYTES
);
struct
{
struct
work_struct
index_update_work
;
...
...
@@ -116,7 +116,7 @@ struct write_point {
enum
write_point_state
state
;
u64
last_state_change
;
u64
time
[
WRITE_POINT_STATE_NR
];
}
__a
ttribute__
((
__aligned__
(
SMP_CACHE_BYTES
))
);
}
__a
ligned
(
SMP_CACHE_BYTES
);
};
struct
write_point_specifier
{
...
...
fs/bcachefs/btree_gc.c
View file @
1e81f89b
...
...
@@ -535,7 +535,7 @@ int bch2_check_topology(struct bch_fs *c)
bch2_trans_init
(
&
trans
,
c
,
0
,
0
);
for
(
i
=
0
;
i
<
btree_id_nr_alive
(
c
)
&&
!
ret
;
i
++
)
{
for
(
i
=
0
;
i
<
btree_id_nr_alive
(
c
)
&&
!
ret
;
i
++
)
{
struct
btree_root
*
r
=
bch2_btree_id_root
(
c
,
i
);
if
(
!
r
->
alive
)
...
...
fs/bcachefs/btree_io.h
View file @
1e81f89b
...
...
@@ -143,8 +143,8 @@ enum btree_write_flags {
__BTREE_WRITE_ONLY_IF_NEED
=
BTREE_WRITE_TYPE_BITS
,
__BTREE_WRITE_ALREADY_STARTED
,
};
#define BTREE_WRITE_ONLY_IF_NEED
(1U << __BTREE_WRITE_ONLY_IF_NEED
)
#define BTREE_WRITE_ALREADY_STARTED
(1U <<
__BTREE_WRITE_ALREADY_STARTED)
#define BTREE_WRITE_ONLY_IF_NEED
BIT(__BTREE_WRITE_ONLY_IF_NEED
)
#define BTREE_WRITE_ALREADY_STARTED
BIT(
__BTREE_WRITE_ALREADY_STARTED)
void
__bch2_btree_node_write
(
struct
bch_fs
*
,
struct
btree
*
,
unsigned
);
void
bch2_btree_node_write
(
struct
bch_fs
*
,
struct
btree
*
,
...
...
fs/bcachefs/btree_iter.c
View file @
1e81f89b
...
...
@@ -1008,7 +1008,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
/*
* We used to assert that all paths had been traversed here
* (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
* path->
Should_be_locked is not set yet, we
we might have unlocked and
* path->
should_be_locked is not set yet,
we might have unlocked and
* then failed to relock a path - that's fine.
*/
err:
...
...
@@ -2738,9 +2738,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
unsigned
depth
,
unsigned
flags
)
{
flags
|=
BTREE_ITER_NOT_EXTENTS
;
flags
|=
__BTREE_ITER_ALL_SNAPSHOTS
;
flags
|=
BTREE_ITER_ALL_SNAPSHOTS
;
flags
|=
BTREE_ITER_NOT_EXTENTS
;
flags
|=
__BTREE_ITER_ALL_SNAPSHOTS
;
flags
|=
BTREE_ITER_ALL_SNAPSHOTS
;
bch2_trans_iter_init_common
(
trans
,
iter
,
btree_id
,
pos
,
locks_want
,
depth
,
__bch2_btree_iter_flags
(
trans
,
btree_id
,
flags
),
...
...
fs/bcachefs/btree_update.h
View file @
1e81f89b
...
...
@@ -268,10 +268,10 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr
{
struct
bkey_s_c
k
=
__bch2_bkey_get_iter
(
trans
,
iter
,
btree_id
,
pos
,
flags
|
BTREE_ITER_INTENT
,
type
);
struct
bkey_i
*
ret
=
unlikely
(
IS_ERR
(
k
.
k
)
)
struct
bkey_i
*
ret
=
IS_ERR
(
k
.
k
)
?
ERR_CAST
(
k
.
k
)
:
__bch2_bkey_make_mut_noupdate
(
trans
,
k
,
0
,
min_bytes
);
if
(
unlikely
(
IS_ERR
(
ret
)
))
if
(
IS_ERR
(
ret
))
bch2_trans_iter_exit
(
trans
,
iter
);
return
ret
;
}
...
...
fs/bcachefs/buckets.c
View file @
1e81f89b
...
...
@@ -1924,6 +1924,7 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
int
bch2_trans_mark_dev_sb
(
struct
bch_fs
*
c
,
struct
bch_dev
*
ca
)
{
int
ret
=
bch2_trans_run
(
c
,
__bch2_trans_mark_dev_sb
(
&
trans
,
ca
));
if
(
ret
)
bch_err_fn
(
c
,
ret
);
return
ret
;
...
...
fs/bcachefs/chardev.h
View file @
1e81f89b
...
...
@@ -17,7 +17,7 @@ int __init bch2_chardev_init(void);
static
inline
long
bch2_fs_ioctl
(
struct
bch_fs
*
c
,
unsigned
cmd
,
void
__user
*
arg
)
{
return
-
ENO
SYS
;
return
-
ENO
TTY
;
}
static
inline
void
bch2_fs_chardev_exit
(
struct
bch_fs
*
c
)
{}
...
...
fs/bcachefs/checksum.c
View file @
1e81f89b
...
...
@@ -265,9 +265,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
#ifdef CONFIG_HIGHMEM
__bio_for_each_segment
(
bv
,
bio
,
*
iter
,
*
iter
)
{
void
*
p
=
kmap_atomic
(
bv
.
bv_page
)
+
bv
.
bv_offset
;
void
*
p
=
kmap_local_page
(
bv
.
bv_page
)
+
bv
.
bv_offset
;
bch2_checksum_update
(
&
state
,
p
,
bv
.
bv_len
);
kunmap_
atomic
(
p
);
kunmap_
local
(
p
);
}
#else
__bio_for_each_bvec
(
bv
,
bio
,
*
iter
,
*
iter
)
...
...
@@ -287,10 +288,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
#ifdef CONFIG_HIGHMEM
__bio_for_each_segment
(
bv
,
bio
,
*
iter
,
*
iter
)
{
void
*
p
=
kmap_
atomic
(
bv
.
bv_page
)
+
bv
.
bv_offset
;
void
*
p
=
kmap_
local_page
(
bv
.
bv_page
)
+
bv
.
bv_offset
;
crypto_shash_update
(
desc
,
p
,
bv
.
bv_len
);
kunmap_
atomic
(
p
);
kunmap_
local
(
p
);
}
#else
__bio_for_each_bvec
(
bv
,
bio
,
*
iter
,
*
iter
)
...
...
@@ -427,8 +428,9 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
extent_nonce
(
version
,
crc_old
),
bio
);
if
(
bch2_crc_cmp
(
merged
,
crc_old
.
csum
)
&&
!
c
->
opts
.
no_data_io
)
{
bch_err
(
c
,
"checksum error in
bch2_rechecksum_bio
() (memory corruption or bug?)
\n
"
bch_err
(
c
,
"checksum error in
%s
() (memory corruption or bug?)
\n
"
"expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)"
,
__func__
,
crc_old
.
csum
.
hi
,
crc_old
.
csum
.
lo
,
merged
.
hi
,
...
...
fs/bcachefs/compress.c
View file @
1e81f89b
...
...
@@ -643,7 +643,8 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
static
u64
compression_opt_to_feature
(
unsigned
v
)
{
unsigned
type
=
bch2_compression_decode
(
v
).
type
;
return
1ULL
<<
bch2_compression_opt_to_feature
[
type
];
return
BIT_ULL
(
bch2_compression_opt_to_feature
[
type
]);
}
int
bch2_fs_compress_init
(
struct
bch_fs
*
c
)
...
...
fs/bcachefs/extents.c
View file @
1e81f89b
...
...
@@ -517,7 +517,7 @@ static void bch2_extent_crc_pack(union bch_extent_crc *dst,
switch
(
type
)
{
case
BCH_EXTENT_ENTRY_crc32
:
set_common_fields
(
dst
->
crc32
,
src
);
dst
->
crc32
.
csum
=
(
u32
__force
)
*
((
__le32
*
)
&
src
.
csum
.
lo
);
dst
->
crc32
.
csum
=
(
u32
__force
)
*
((
__le32
*
)
&
src
.
csum
.
lo
);
break
;
case
BCH_EXTENT_ENTRY_crc64
:
set_common_fields
(
dst
->
crc64
,
src
);
...
...
@@ -915,11 +915,11 @@ bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
bkey_for_each_ptr_decode
(
k1
.
k
,
ptrs1
,
p1
,
entry1
)
bkey_for_each_ptr_decode
(
k2
.
k
,
ptrs2
,
p2
,
entry2
)
if
(
p1
.
ptr
.
dev
==
p2
.
ptr
.
dev
&&
p1
.
ptr
.
gen
==
p2
.
ptr
.
gen
&&
(
s64
)
p1
.
ptr
.
offset
+
p1
.
crc
.
offset
-
bkey_start_offset
(
k1
.
k
)
==
(
s64
)
p2
.
ptr
.
offset
+
p2
.
crc
.
offset
-
bkey_start_offset
(
k2
.
k
))
return
true
;
if
(
p1
.
ptr
.
dev
==
p2
.
ptr
.
dev
&&
p1
.
ptr
.
gen
==
p2
.
ptr
.
gen
&&
(
s64
)
p1
.
ptr
.
offset
+
p1
.
crc
.
offset
-
bkey_start_offset
(
k1
.
k
)
==
(
s64
)
p2
.
ptr
.
offset
+
p2
.
crc
.
offset
-
bkey_start_offset
(
k2
.
k
))
return
true
;
return
false
;
}
else
{
...
...
fs/bcachefs/fs-io.c
View file @
1e81f89b
...
...
@@ -2867,7 +2867,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
folio
=
__filemap_get_folio
(
mapping
,
index
,
FGP_LOCK
|
FGP_CREAT
,
GFP_KERNEL
);
if
(
unlikely
(
IS_ERR_OR_NULL
(
folio
)
))
{
if
(
IS_ERR_OR_NULL
(
folio
))
{
ret
=
-
ENOMEM
;
goto
out
;
}
...
...
fs/bcachefs/io.c
View file @
1e81f89b
...
...
@@ -2435,6 +2435,7 @@ static void __bch2_read_endio(struct work_struct *work)
if
(
rbio
->
bounce
)
{
struct
bvec_iter
src_iter
=
src
->
bi_iter
;
bio_copy_data_iter
(
dst
,
&
dst_iter
,
src
,
&
src_iter
);
}
}
...
...
fs/bcachefs/io.h
View file @
1e81f89b
...
...
@@ -52,7 +52,7 @@ enum __bch_write_flags {
};
enum
bch_write_flags
{
#define x(f) BCH_WRITE_##f =
1U << __BCH_WRITE_##f
,
#define x(f) BCH_WRITE_##f =
BIT(__BCH_WRITE_##f)
,
BCH_WRITE_FLAGS
()
#undef x
};
...
...
fs/bcachefs/journal.c
View file @
1e81f89b
...
...
@@ -63,6 +63,7 @@ journal_seq_to_buf(struct journal *j, u64 seq)
static
void
journal_pin_list_init
(
struct
journal_entry_pin_list
*
p
,
int
count
)
{
unsigned
i
;
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
p
->
list
);
i
++
)
INIT_LIST_HEAD
(
&
p
->
list
[
i
]);
INIT_LIST_HEAD
(
&
p
->
flushed
);
...
...
@@ -514,8 +515,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
int
ret
;
closure_wait_event
(
&
j
->
async_wait
,
(
ret
=
__journal_res_get
(
j
,
res
,
flags
))
!=
-
BCH_ERR_journal_res_get_blocked
||
(
ret
=
__journal_res_get
(
j
,
res
,
flags
))
!=
-
BCH_ERR_journal_res_get_blocked
||
(
flags
&
JOURNAL_RES_GET_NONBLOCK
));
return
ret
;
}
...
...
fs/bcachefs/journal_io.c
View file @
1e81f89b
...
...
@@ -1053,6 +1053,7 @@ static void bch2_journal_read_device(struct closure *cl)
bch_err
(
c
,
"cur_idx %u/%u"
,
ja
->
cur_idx
,
ja
->
nr
);
for
(
i
=
0
;
i
<
3
;
i
++
)
{
unsigned
idx
=
(
ja
->
cur_idx
+
ja
->
nr
-
1
+
i
)
%
ja
->
nr
;
bch_err
(
c
,
"bucket_seq[%u] = %llu"
,
idx
,
ja
->
bucket_seq
[
idx
]);
}
ja
->
sectors_free
=
0
;
...
...
@@ -1629,7 +1630,6 @@ static void do_journal_write(struct closure *cl)
}
continue_at
(
cl
,
journal_write_done
,
c
->
io_complete_wq
);
return
;
}
static
void
bch2_journal_entries_postprocess
(
struct
bch_fs
*
c
,
struct
jset
*
jset
)
...
...
fs/bcachefs/journal_reclaim.c
View file @
1e81f89b
...
...
@@ -345,7 +345,7 @@ static inline bool __journal_pin_drop(struct journal *j,
list_del_init
(
&
pin
->
list
);
/*
* Unpinning a journal entry ma
ke
make journal_next_bucket() succeed, if
* Unpinning a journal entry ma
y
make journal_next_bucket() succeed, if
* writing a new last_seq will now make another bucket available:
*/
return
atomic_dec_and_test
(
&
pin_list
->
count
)
&&
...
...
fs/bcachefs/recovery.c
View file @
1e81f89b
...
...
@@ -648,7 +648,7 @@ static int bch2_journal_replay(struct bch_fs *c)
move_gap
(
keys
->
d
,
keys
->
nr
,
keys
->
size
,
keys
->
gap
,
keys
->
nr
);
keys
->
gap
=
keys
->
nr
;
keys_sorted
=
kvmalloc_array
(
sizeof
(
*
keys_sorted
),
keys
->
nr
,
GFP_KERNEL
);
keys_sorted
=
kvmalloc_array
(
keys
->
nr
,
sizeof
(
*
keys_sorted
)
,
GFP_KERNEL
);
if
(
!
keys_sorted
)
return
-
BCH_ERR_ENOMEM_journal_replay
;
...
...
@@ -1403,7 +1403,7 @@ int bch2_fs_recovery(struct bch_fs *c)
}
c
->
journal_replay_seq_start
=
last_seq
;
c
->
journal_replay_seq_end
=
blacklist_seq
-
1
;
;
c
->
journal_replay_seq_end
=
blacklist_seq
-
1
;
if
(
c
->
opts
.
reconstruct_alloc
)
{
c
->
sb
.
compat
&=
~
(
1ULL
<<
BCH_COMPAT_alloc_info
);
...
...
fs/bcachefs/super-io.c
View file @
1e81f89b
...
...
@@ -553,7 +553,9 @@ static int __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src)
d
=
(
src_f
?
le32_to_cpu
(
src_f
->
u64s
)
:
0
)
-
(
dst_f
?
le32_to_cpu
(
dst_f
->
u64s
)
:
0
);
if
(
d
>
0
)
{
int
ret
=
bch2_sb_realloc
(
dst_handle
,
le32_to_cpu
(
dst_handle
->
sb
->
u64s
)
+
d
);
int
ret
=
bch2_sb_realloc
(
dst_handle
,
le32_to_cpu
(
dst_handle
->
sb
->
u64s
)
+
d
);
if
(
ret
)
return
ret
;
...
...
fs/bcachefs/super-io.h
View file @
1e81f89b
...
...
@@ -58,6 +58,7 @@ struct bch_sb_field_ops {
static
inline
__le64
bch2_sb_magic
(
struct
bch_fs
*
c
)
{
__le64
ret
;
memcpy
(
&
ret
,
&
c
->
sb
.
uuid
,
sizeof
(
ret
));
return
ret
;
}
...
...
fs/bcachefs/util.c
View file @
1e81f89b
...
...
@@ -216,6 +216,7 @@ u64 bch2_read_flag_list(char *opt, const char * const list[])
while
((
p
=
strsep
(
&
s
,
","
)))
{
int
flag
=
match_string
(
list
,
-
1
,
p
);
if
(
flag
<
0
)
{
ret
=
-
1
;
break
;
...
...
@@ -797,9 +798,10 @@ void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)
struct
bvec_iter
iter
;
__bio_for_each_segment
(
bv
,
dst
,
iter
,
dst_iter
)
{
void
*
dstp
=
kmap_atomic
(
bv
.
bv_page
);
void
*
dstp
=
kmap_local_page
(
bv
.
bv_page
);
memcpy
(
dstp
+
bv
.
bv_offset
,
src
,
bv
.
bv_len
);
kunmap_
atomic
(
dstp
);
kunmap_
local
(
dstp
);
src
+=
bv
.
bv_len
;
}
...
...
@@ -811,9 +813,10 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
struct
bvec_iter
iter
;
__bio_for_each_segment
(
bv
,
src
,
iter
,
src_iter
)
{
void
*
srcp
=
kmap_atomic
(
bv
.
bv_page
);
void
*
srcp
=
kmap_local_page
(
bv
.
bv_page
);
memcpy
(
dst
,
srcp
+
bv
.
bv_offset
,
bv
.
bv_len
);
kunmap_
atomic
(
srcp
);
kunmap_
local
(
srcp
);
dst
+=
bv
.
bv_len
;
}
...
...
fs/bcachefs/util.h
View file @
1e81f89b
...
...
@@ -467,8 +467,10 @@ struct bch_pd_controller {
s64
last_change
;
s64
last_target
;
/* If true, the rate will not increase if bch2_ratelimit_delay()
* is not being called often enough. */
/*
* If true, the rate will not increase if bch2_ratelimit_delay()
* is not being called often enough.
*/
bool
backpressure
;
};
...
...
@@ -604,6 +606,7 @@ static inline void __memcpy_u64s(void *dst, const void *src,
{
#ifdef CONFIG_X86_64
long
d0
,
d1
,
d2
;
asm
volatile
(
"rep ; movsq"
:
"=&c"
(
d0
),
"=&D"
(
d1
),
"=&S"
(
d2
)
:
"0"
(
u64s
),
"1"
(
dst
),
"2"
(
src
)
...
...
@@ -680,6 +683,7 @@ static inline void __memmove_u64s_up(void *_dst, const void *_src,
#ifdef CONFIG_X86_64
long
d0
,
d1
,
d2
;
asm
volatile
(
"std ;
\n
"
"rep ; movsq
\n
"
"cld ;
\n
"
...
...
fs/bcachefs/varint.c
View file @
1e81f89b
...
...
@@ -59,6 +59,7 @@ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
if
(
likely
(
bytes
<
9
))
{
__le64
v_le
=
0
;
memcpy
(
&
v_le
,
in
,
bytes
);
v
=
le64_to_cpu
(
v_le
);
v
>>=
bytes
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment