Commit bf5a261c authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Assorted fixes for clang

clang had a few more warnings about enum conversion, and also didn't
like the opts.c initializer.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 7904c82c
...@@ -79,36 +79,6 @@ static inline u64 alloc_field_v1_get(const struct bch_alloc *a, ...@@ -79,36 +79,6 @@ static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
return v; return v;
} }
static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
unsigned field, u64 v)
{
unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
if (!v)
return;
a->v.fields |= 1 << field;
switch (bytes) {
case 1:
*((u8 *) *p) = v;
break;
case 2:
*((__le16 *) *p) = cpu_to_le16(v);
break;
case 4:
*((__le32 *) *p) = cpu_to_le32(v);
break;
case 8:
*((__le64 *) *p) = cpu_to_le64(v);
break;
default:
BUG();
}
*p += bytes;
}
static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out, static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
struct bkey_s_c k) struct bkey_s_c k)
{ {
...@@ -1334,7 +1304,7 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans, ...@@ -1334,7 +1304,7 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans,
struct btree_iter *iter, struct btree_iter *iter,
struct bpos end) struct bpos end)
{ {
if (!btree_node_type_is_extents(iter->btree_id)) { if (!btree_id_is_extents(iter->btree_id)) {
return __bch2_check_discard_freespace_key(trans, iter); return __bch2_check_discard_freespace_key(trans, iter);
} else { } else {
int ret = 0; int ret = 0;
......
...@@ -1042,8 +1042,12 @@ static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c, ...@@ -1042,8 +1042,12 @@ static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
unsigned i; unsigned i;
if (!drop && ob->ec) { if (!drop && ob->ec) {
unsigned nr_blocks;
mutex_lock(&ob->ec->lock); mutex_lock(&ob->ec->lock);
for (i = 0; i < ob->ec->new_stripe.key.v.nr_blocks; i++) { nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
for (i = 0; i < nr_blocks; i++) {
if (!ob->ec->blocks[i]) if (!ob->ec->blocks[i])
continue; continue;
......
...@@ -32,7 +32,7 @@ enum bch_watermark { ...@@ -32,7 +32,7 @@ enum bch_watermark {
}; };
#define BCH_WATERMARK_BITS 3 #define BCH_WATERMARK_BITS 3
#define BCH_WATERMARK_MASK ~(~0 << BCH_WATERMARK_BITS) #define BCH_WATERMARK_MASK ~(~0U << BCH_WATERMARK_BITS)
#define OPEN_BUCKETS_COUNT 1024 #define OPEN_BUCKETS_COUNT 1024
......
...@@ -2138,7 +2138,7 @@ struct jset_entry_dev_usage { ...@@ -2138,7 +2138,7 @@ struct jset_entry_dev_usage {
__le64 _buckets_unavailable; /* No longer used */ __le64 _buckets_unavailable; /* No longer used */
struct jset_entry_dev_usage_type d[]; struct jset_entry_dev_usage_type d[];
} __packed; };
static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u) static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
{ {
......
...@@ -35,18 +35,6 @@ static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter) ...@@ -35,18 +35,6 @@ static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *); static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
/*
* Unlocks before scheduling
* Note: does not revalidate iterator
*/
static inline int bch2_trans_cond_resched(struct btree_trans *trans)
{
if (need_resched() || race_fault())
return drop_locks_do(trans, (schedule(), 0));
else
return 0;
}
static inline int __btree_path_cmp(const struct btree_path *l, static inline int __btree_path_cmp(const struct btree_path *l,
enum btree_id r_btree_id, enum btree_id r_btree_id,
bool r_cached, bool r_cached,
...@@ -2732,16 +2720,6 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) ...@@ -2732,16 +2720,6 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
iter->key_cache_path = NULL; iter->key_cache_path = NULL;
} }
static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans,
struct btree_iter *iter,
unsigned btree_id, struct bpos pos,
unsigned flags)
{
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
bch2_btree_iter_flags(trans, btree_id, flags),
_RET_IP_);
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans, void bch2_trans_iter_init_outlined(struct btree_trans *trans,
struct btree_iter *iter, struct btree_iter *iter,
enum btree_id btree_id, struct bpos pos, enum btree_id btree_id, struct bpos pos,
......
...@@ -388,7 +388,7 @@ int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *p ...@@ -388,7 +388,7 @@ int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *p
six_lock_readers_add(&b->lock, readers); six_lock_readers_add(&b->lock, readers);
if (ret) if (ret)
mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent); mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
return ret; return ret;
} }
...@@ -552,7 +552,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans, ...@@ -552,7 +552,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
return false; return false;
success: success:
mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent); mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
return true; return true;
} }
...@@ -667,7 +667,7 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans, ...@@ -667,7 +667,7 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
} else { } else {
if (btree_node_intent_locked(path, l)) { if (btree_node_intent_locked(path, l)) {
six_lock_downgrade(&path->l[l].b->c.lock); six_lock_downgrade(&path->l[l].b->c.lock);
mark_btree_node_locked_noreset(path, l, SIX_LOCK_read); mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
} }
break; break;
} }
......
...@@ -180,7 +180,7 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat ...@@ -180,7 +180,7 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat
EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock)); EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write); EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent); mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
trans_for_each_path_with_node(trans, b, linked) trans_for_each_path_with_node(trans, b, linked)
linked->l[b->c.level].lock_seq++; linked->l[b->c.level].lock_seq++;
...@@ -293,7 +293,7 @@ static inline int __btree_node_lock_write(struct btree_trans *trans, ...@@ -293,7 +293,7 @@ static inline int __btree_node_lock_write(struct btree_trans *trans,
* write lock: thus, we need to tell the cycle detector we have a write * write lock: thus, we need to tell the cycle detector we have a write
* lock _before_ taking the lock: * lock _before_ taking the lock:
*/ */
mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write); mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
return likely(six_trylock_write(&b->lock)) return likely(six_trylock_write(&b->lock))
? 0 ? 0
......
...@@ -188,7 +188,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans, ...@@ -188,7 +188,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
bch2_btree_node_hash_remove(&c->btree_cache, b); bch2_btree_node_hash_remove(&c->btree_cache, b);
__btree_node_free(c, b); __btree_node_free(c, b);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent); mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
trans_for_each_path(trans, path) trans_for_each_path(trans, path)
if (path->l[level].b == b) { if (path->l[level].b == b) {
...@@ -720,7 +720,7 @@ static void btree_update_nodes_written(struct btree_update *as) ...@@ -720,7 +720,7 @@ static void btree_update_nodes_written(struct btree_update *as)
mutex_unlock(&c->btree_interior_update_lock); mutex_unlock(&c->btree_interior_update_lock);
mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent); mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
btree_node_write_if_need(c, b, SIX_LOCK_intent); btree_node_write_if_need(c, b, SIX_LOCK_intent);
......
...@@ -413,7 +413,7 @@ static int run_one_mem_trigger(struct btree_trans *trans, ...@@ -413,7 +413,7 @@ static int run_one_mem_trigger(struct btree_trans *trans,
if (unlikely(flags & BTREE_TRIGGER_NORUN)) if (unlikely(flags & BTREE_TRIGGER_NORUN))
return 0; return 0;
if (!btree_node_type_needs_gc(i->btree_id)) if (!btree_node_type_needs_gc((enum btree_node_type) i->btree_id))
return 0; return 0;
if (old_ops->atomic_trigger == new_ops->atomic_trigger && if (old_ops->atomic_trigger == new_ops->atomic_trigger &&
......
...@@ -59,13 +59,13 @@ static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more, ...@@ -59,13 +59,13 @@ static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more,
#define darray_first(_d) ((_d).data[0]) #define darray_first(_d) ((_d).data[0])
#define darray_last(_d) ((_d).data[(_d).nr - 1]) #define darray_last(_d) ((_d).data[(_d).nr - 1])
#define darray_insert_item(_d, _pos, _item) \ #define darray_insert_item(_d, pos, _item) \
({ \ ({ \
size_t pos = (_pos); \ size_t _pos = (pos); \
int _ret = darray_make_room((_d), 1); \ int _ret = darray_make_room((_d), 1); \
\ \
if (!_ret) \ if (!_ret) \
array_insert_item((_d)->data, (_d)->nr, pos, (_item)); \ array_insert_item((_d)->data, (_d)->nr, _pos, (_item)); \
_ret; \ _ret; \
}) })
......
This diff is collapsed.
...@@ -138,10 +138,7 @@ struct ec_stripe_buf { ...@@ -138,10 +138,7 @@ struct ec_stripe_buf {
void *data[BCH_BKEY_PTRS_MAX]; void *data[BCH_BKEY_PTRS_MAX];
union { __BKEY_PADDED(key, 255);
struct bkey_i_stripe key;
u64 pad[255];
};
}; };
struct ec_stripe_head; struct ec_stripe_head;
......
...@@ -121,10 +121,10 @@ static void bch2_opt_fix_errors_to_text(struct printbuf *out, ...@@ -121,10 +121,10 @@ static void bch2_opt_fix_errors_to_text(struct printbuf *out,
prt_str(out, bch2_fsck_fix_opts[v]); prt_str(out, bch2_fsck_fix_opts[v]);
} }
static const struct bch_opt_fn bch2_opt_fix_errors = { #define bch2_opt_fix_errors (struct bch_opt_fn) { \
.parse = bch2_opt_fix_errors_parse, .parse = bch2_opt_fix_errors_parse, \
.to_text = bch2_opt_fix_errors_to_text, .to_text = bch2_opt_fix_errors_to_text, \
}; }
const char * const bch2_d_types[BCH_DT_MAX] = { const char * const bch2_d_types[BCH_DT_MAX] = {
[DT_UNKNOWN] = "unknown", [DT_UNKNOWN] = "unknown",
......
...@@ -265,16 +265,13 @@ struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb, ...@@ -265,16 +265,13 @@ struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb,
/* Superblock validate: */ /* Superblock validate: */
static inline void __bch2_sb_layout_size_assert(void)
{
BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
}
static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out) static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out)
{ {
u64 offset, prev_offset, max_sectors; u64 offset, prev_offset, max_sectors;
unsigned i; unsigned i;
BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
if (!uuid_equal(&layout->magic, &BCACHE_MAGIC) && if (!uuid_equal(&layout->magic, &BCACHE_MAGIC) &&
!uuid_equal(&layout->magic, &BCHFS_MAGIC)) { !uuid_equal(&layout->magic, &BCHFS_MAGIC)) {
prt_printf(out, "Not a bcachefs superblock layout"); prt_printf(out, "Not a bcachefs superblock layout");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment