Commit fb8e5b4c authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: sb-members.c

Split out a new file for bch_sb_field_members - we'll likely want to
move more code here in the future.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 8079aab0
......@@ -64,6 +64,7 @@ bcachefs-y := \
recovery.o \
reflink.o \
replicas.o \
sb-members.o \
siphash.o \
six.o \
subvolume.o \
......
......@@ -5,7 +5,7 @@
#include "bcachefs.h"
#include "alloc_types.h"
#include "extents.h"
#include "super.h"
#include "sb-members.h"
#include <linux/hash.h>
......
......@@ -10,7 +10,31 @@
#include "buckets_types.h"
#include "extents.h"
#include "super.h"
#include "sb-members.h"
static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
{
return div_u64(s, ca->mi.bucket_size);
}
static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
{
return ((sector_t) b) * ca->mi.bucket_size;
}
static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
{
u32 remainder;
div_u64_rem(s, ca->mi.bucket_size, &remainder);
return remainder;
}
static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
u32 *offset)
{
return div_u64_rem(s, ca->mi.bucket_size, offset);
}
#define for_each_bucket(_b, _buckets) \
for (_b = (_buckets)->b + (_buckets)->first_bucket; \
......@@ -292,6 +316,27 @@ int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
size_t, enum bch_data_type, unsigned);
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
{
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
u64 b_offset = bucket_to_sector(ca, b);
u64 b_end = bucket_to_sector(ca, b + 1);
unsigned i;
if (!b)
return true;
for (i = 0; i < layout->nr_superblocks; i++) {
u64 offset = le64_to_cpu(layout->sb_offset[i]);
u64 end = offset + (1 << layout->sb_max_size_bits);
if (!(offset >= b_end || end <= b_offset))
return true;
}
return false;
}
/* disk reservations: */
static inline void bch2_disk_reservation_put(struct bch_fs *c,
......
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "disk_groups.h"
#include "sb-members.h"
#include "super-io.h"
#include <linux/sort.h>
......
......@@ -3,13 +3,14 @@
#include "bcachefs.h"
#include "btree_key_cache.h"
#include "btree_update.h"
#include "buckets.h"
#include "errcode.h"
#include "error.h"
#include "journal.h"
#include "journal_io.h"
#include "journal_reclaim.h"
#include "replicas.h"
#include "super.h"
#include "sb-members.h"
#include "trace.h"
#include <linux/kthread.h>
......
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "disk_groups.h"
#include "replicas.h"
#include "sb-members.h"
#include "super-io.h"
/* Code for bch_sb_field_members: */
static int bch2_sb_members_validate(struct bch_sb *sb,
struct bch_sb_field *f,
struct printbuf *err)
{
struct bch_sb_field_members *mi = field_to_type(f, members);
unsigned i;
if ((void *) (mi->members + sb->nr_devices) >
vstruct_end(&mi->field)) {
prt_printf(err, "too many devices for section size");
return -BCH_ERR_invalid_sb_members;
}
for (i = 0; i < sb->nr_devices; i++) {
struct bch_member *m = mi->members + i;
if (!bch2_member_exists(m))
continue;
if (le64_to_cpu(m->nbuckets) > LONG_MAX) {
prt_printf(err, "device %u: too many buckets (got %llu, max %lu)",
i, le64_to_cpu(m->nbuckets), LONG_MAX);
return -BCH_ERR_invalid_sb_members;
}
if (le64_to_cpu(m->nbuckets) -
le16_to_cpu(m->first_bucket) < BCH_MIN_NR_NBUCKETS) {
prt_printf(err, "device %u: not enough buckets (got %llu, max %u)",
i, le64_to_cpu(m->nbuckets), BCH_MIN_NR_NBUCKETS);
return -BCH_ERR_invalid_sb_members;
}
if (le16_to_cpu(m->bucket_size) <
le16_to_cpu(sb->block_size)) {
prt_printf(err, "device %u: bucket size %u smaller than block size %u",
i, le16_to_cpu(m->bucket_size), le16_to_cpu(sb->block_size));
return -BCH_ERR_invalid_sb_members;
}
if (le16_to_cpu(m->bucket_size) <
BCH_SB_BTREE_NODE_SIZE(sb)) {
prt_printf(err, "device %u: bucket size %u smaller than btree node size %llu",
i, le16_to_cpu(m->bucket_size), BCH_SB_BTREE_NODE_SIZE(sb));
return -BCH_ERR_invalid_sb_members;
}
}
return 0;
}
static void bch2_sb_members_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_members *mi = field_to_type(f, members);
struct bch_sb_field_disk_groups *gi = bch2_sb_get_disk_groups(sb);
unsigned i;
for (i = 0; i < sb->nr_devices; i++) {
struct bch_member *m = mi->members + i;
unsigned data_have = bch2_sb_dev_has_data(sb, i);
u64 bucket_size = le16_to_cpu(m->bucket_size);
u64 device_size = le64_to_cpu(m->nbuckets) * bucket_size;
if (!bch2_member_exists(m))
continue;
prt_printf(out, "Device:");
prt_tab(out);
prt_printf(out, "%u", i);
prt_newline(out);
printbuf_indent_add(out, 2);
prt_printf(out, "UUID:");
prt_tab(out);
pr_uuid(out, m->uuid.b);
prt_newline(out);
prt_printf(out, "Size:");
prt_tab(out);
prt_units_u64(out, device_size << 9);
prt_newline(out);
prt_printf(out, "Bucket size:");
prt_tab(out);
prt_units_u64(out, bucket_size << 9);
prt_newline(out);
prt_printf(out, "First bucket:");
prt_tab(out);
prt_printf(out, "%u", le16_to_cpu(m->first_bucket));
prt_newline(out);
prt_printf(out, "Buckets:");
prt_tab(out);
prt_printf(out, "%llu", le64_to_cpu(m->nbuckets));
prt_newline(out);
prt_printf(out, "Last mount:");
prt_tab(out);
if (m->last_mount)
pr_time(out, le64_to_cpu(m->last_mount));
else
prt_printf(out, "(never)");
prt_newline(out);
prt_printf(out, "State:");
prt_tab(out);
prt_printf(out, "%s",
BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR
? bch2_member_states[BCH_MEMBER_STATE(m)]
: "unknown");
prt_newline(out);
prt_printf(out, "Label:");
prt_tab(out);
if (BCH_MEMBER_GROUP(m)) {
unsigned idx = BCH_MEMBER_GROUP(m) - 1;
if (idx < disk_groups_nr(gi))
prt_printf(out, "%s (%u)",
gi->entries[idx].label, idx);
else
prt_printf(out, "(bad disk labels section)");
} else {
prt_printf(out, "(none)");
}
prt_newline(out);
prt_printf(out, "Data allowed:");
prt_tab(out);
if (BCH_MEMBER_DATA_ALLOWED(m))
prt_bitflags(out, bch2_data_types, BCH_MEMBER_DATA_ALLOWED(m));
else
prt_printf(out, "(none)");
prt_newline(out);
prt_printf(out, "Has data:");
prt_tab(out);
if (data_have)
prt_bitflags(out, bch2_data_types, data_have);
else
prt_printf(out, "(none)");
prt_newline(out);
prt_printf(out, "Discard:");
prt_tab(out);
prt_printf(out, "%llu", BCH_MEMBER_DISCARD(m));
prt_newline(out);
prt_printf(out, "Freespace initialized:");
prt_tab(out);
prt_printf(out, "%llu", BCH_MEMBER_FREESPACE_INITIALIZED(m));
prt_newline(out);
printbuf_indent_sub(out, 2);
}
}
const struct bch_sb_field_ops bch_sb_field_ops_members = {
.validate = bch2_sb_members_validate,
.to_text = bch2_sb_members_to_text,
};
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_SB_MEMBERS_H
#define _BCACHEFS_SB_MEMBERS_H
static inline bool bch2_dev_is_online(struct bch_dev *ca)
{
return !percpu_ref_is_zero(&ca->io_ref);
}
static inline bool bch2_dev_is_readable(struct bch_dev *ca)
{
return bch2_dev_is_online(ca) &&
ca->mi.state != BCH_MEMBER_STATE_failed;
}
static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
{
if (!percpu_ref_tryget(&ca->io_ref))
return false;
if (ca->mi.state == BCH_MEMBER_STATE_rw ||
(ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
return true;
percpu_ref_put(&ca->io_ref);
return false;
}
static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
{
return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
}
static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
unsigned dev)
{
unsigned i;
for (i = 0; i < devs.nr; i++)
if (devs.devs[i] == dev)
return true;
return false;
}
static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
unsigned dev)
{
unsigned i;
for (i = 0; i < devs->nr; i++)
if (devs->devs[i] == dev) {
array_remove_item(devs->devs, devs->nr, i);
return;
}
}
static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
unsigned dev)
{
if (!bch2_dev_list_has_dev(*devs, dev)) {
BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
devs->devs[devs->nr++] = dev;
}
}
static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
{
return (struct bch_devs_list) { .nr = 1, .devs[0] = dev };
}
static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
const struct bch_devs_mask *mask)
{
struct bch_dev *ca = NULL;
while ((*iter = mask
? find_next_bit(mask->d, c->sb.nr_devices, *iter)
: *iter) < c->sb.nr_devices &&
!(ca = rcu_dereference_check(c->devs[*iter],
lockdep_is_held(&c->state_lock))))
(*iter)++;
return ca;
}
#define for_each_member_device_rcu(ca, c, iter, mask) \
for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
{
struct bch_dev *ca;
rcu_read_lock();
if ((ca = __bch2_next_dev(c, iter, NULL)))
percpu_ref_get(&ca->ref);
rcu_read_unlock();
return ca;
}
/*
* If you break early, you must drop your ref on the current device
*/
#define for_each_member_device(ca, c, iter) \
for ((iter) = 0; \
(ca = bch2_get_next_dev(c, &(iter))); \
percpu_ref_put(&ca->ref), (iter)++)
static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
unsigned *iter,
int state_mask)
{
struct bch_dev *ca;
rcu_read_lock();
while ((ca = __bch2_next_dev(c, iter, NULL)) &&
(!((1 << ca->mi.state) & state_mask) ||
!percpu_ref_tryget(&ca->io_ref)))
(*iter)++;
rcu_read_unlock();
return ca;
}
#define __for_each_online_member(ca, c, iter, state_mask) \
for ((iter) = 0; \
(ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
percpu_ref_put(&ca->io_ref), (iter)++)
#define for_each_online_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, ~0)
#define for_each_rw_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw)
#define for_each_readable_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, \
(1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
/*
* If a key exists that references a device, the device won't be going away and
* we can omit rcu_read_lock():
*/
static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
{
EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
return rcu_dereference_check(c->devs[idx], 1);
}
static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
{
EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
return rcu_dereference_protected(c->devs[idx],
lockdep_is_held(&c->sb_lock) ||
lockdep_is_held(&c->state_lock));
}
/* XXX kill, move to struct bch_fs */
static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
{
struct bch_devs_mask devs;
struct bch_dev *ca;
unsigned i;
memset(&devs, 0, sizeof(devs));
for_each_online_member(ca, c, i)
__set_bit(ca->dev_idx, devs.d);
return devs;
}
extern const struct bch_sb_field_ops bch_sb_field_ops_members;
#endif /* _BCACHEFS_SB_MEMBERS_H */
......@@ -16,6 +16,7 @@
#include "recovery.h"
#include "replicas.h"
#include "quota.h"
#include "sb-members.h"
#include "super-io.h"
#include "super.h"
#include "trace.h"
......@@ -1015,172 +1016,6 @@ void __bch2_check_set_feature(struct bch_fs *c, unsigned feat)
mutex_unlock(&c->sb_lock);
}
/* BCH_SB_FIELD_members: */
static int bch2_sb_members_validate(struct bch_sb *sb,
struct bch_sb_field *f,
struct printbuf *err)
{
struct bch_sb_field_members *mi = field_to_type(f, members);
unsigned i;
if ((void *) (mi->members + sb->nr_devices) >
vstruct_end(&mi->field)) {
prt_printf(err, "too many devices for section size");
return -BCH_ERR_invalid_sb_members;
}
for (i = 0; i < sb->nr_devices; i++) {
struct bch_member *m = mi->members + i;
if (!bch2_member_exists(m))
continue;
if (le64_to_cpu(m->nbuckets) > LONG_MAX) {
prt_printf(err, "device %u: too many buckets (got %llu, max %lu)",
i, le64_to_cpu(m->nbuckets), LONG_MAX);
return -BCH_ERR_invalid_sb_members;
}
if (le64_to_cpu(m->nbuckets) -
le16_to_cpu(m->first_bucket) < BCH_MIN_NR_NBUCKETS) {
prt_printf(err, "device %u: not enough buckets (got %llu, max %u)",
i, le64_to_cpu(m->nbuckets), BCH_MIN_NR_NBUCKETS);
return -BCH_ERR_invalid_sb_members;
}
if (le16_to_cpu(m->bucket_size) <
le16_to_cpu(sb->block_size)) {
prt_printf(err, "device %u: bucket size %u smaller than block size %u",
i, le16_to_cpu(m->bucket_size), le16_to_cpu(sb->block_size));
return -BCH_ERR_invalid_sb_members;
}
if (le16_to_cpu(m->bucket_size) <
BCH_SB_BTREE_NODE_SIZE(sb)) {
prt_printf(err, "device %u: bucket size %u smaller than btree node size %llu",
i, le16_to_cpu(m->bucket_size), BCH_SB_BTREE_NODE_SIZE(sb));
return -BCH_ERR_invalid_sb_members;
}
}
return 0;
}
static void bch2_sb_members_to_text(struct printbuf *out, struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_members *mi = field_to_type(f, members);
struct bch_sb_field_disk_groups *gi = bch2_sb_get_disk_groups(sb);
unsigned i;
for (i = 0; i < sb->nr_devices; i++) {
struct bch_member *m = mi->members + i;
unsigned data_have = bch2_sb_dev_has_data(sb, i);
u64 bucket_size = le16_to_cpu(m->bucket_size);
u64 device_size = le64_to_cpu(m->nbuckets) * bucket_size;
if (!bch2_member_exists(m))
continue;
prt_printf(out, "Device:");
prt_tab(out);
prt_printf(out, "%u", i);
prt_newline(out);
printbuf_indent_add(out, 2);
prt_printf(out, "UUID:");
prt_tab(out);
pr_uuid(out, m->uuid.b);
prt_newline(out);
prt_printf(out, "Size:");
prt_tab(out);
prt_units_u64(out, device_size << 9);
prt_newline(out);
prt_printf(out, "Bucket size:");
prt_tab(out);
prt_units_u64(out, bucket_size << 9);
prt_newline(out);
prt_printf(out, "First bucket:");
prt_tab(out);
prt_printf(out, "%u", le16_to_cpu(m->first_bucket));
prt_newline(out);
prt_printf(out, "Buckets:");
prt_tab(out);
prt_printf(out, "%llu", le64_to_cpu(m->nbuckets));
prt_newline(out);
prt_printf(out, "Last mount:");
prt_tab(out);
if (m->last_mount)
pr_time(out, le64_to_cpu(m->last_mount));
else
prt_printf(out, "(never)");
prt_newline(out);
prt_printf(out, "State:");
prt_tab(out);
prt_printf(out, "%s",
BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR
? bch2_member_states[BCH_MEMBER_STATE(m)]
: "unknown");
prt_newline(out);
prt_printf(out, "Label:");
prt_tab(out);
if (BCH_MEMBER_GROUP(m)) {
unsigned idx = BCH_MEMBER_GROUP(m) - 1;
if (idx < disk_groups_nr(gi))
prt_printf(out, "%s (%u)",
gi->entries[idx].label, idx);
else
prt_printf(out, "(bad disk labels section)");
} else {
prt_printf(out, "(none)");
}
prt_newline(out);
prt_printf(out, "Data allowed:");
prt_tab(out);
if (BCH_MEMBER_DATA_ALLOWED(m))
prt_bitflags(out, bch2_data_types, BCH_MEMBER_DATA_ALLOWED(m));
else
prt_printf(out, "(none)");
prt_newline(out);
prt_printf(out, "Has data:");
prt_tab(out);
if (data_have)
prt_bitflags(out, bch2_data_types, data_have);
else
prt_printf(out, "(none)");
prt_newline(out);
prt_printf(out, "Discard:");
prt_tab(out);
prt_printf(out, "%llu", BCH_MEMBER_DISCARD(m));
prt_newline(out);
prt_printf(out, "Freespace initialized:");
prt_tab(out);
prt_printf(out, "%llu", BCH_MEMBER_FREESPACE_INITIALIZED(m));
prt_newline(out);
printbuf_indent_sub(out, 2);
}
}
static const struct bch_sb_field_ops bch_sb_field_ops_members = {
.validate = bch2_sb_members_validate,
.to_text = bch2_sb_members_to_text,
};
/* BCH_SB_FIELD_crypt: */
static int bch2_sb_crypt_validate(struct bch_sb *sb,
......
......@@ -8,220 +8,6 @@
#include <linux/math64.h>
static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
{
return div_u64(s, ca->mi.bucket_size);
}
static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b)
{
return ((sector_t) b) * ca->mi.bucket_size;
}
static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
{
u32 remainder;
div_u64_rem(s, ca->mi.bucket_size, &remainder);
return remainder;
}
static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
u32 *offset)
{
return div_u64_rem(s, ca->mi.bucket_size, offset);
}
static inline bool bch2_dev_is_online(struct bch_dev *ca)
{
return !percpu_ref_is_zero(&ca->io_ref);
}
static inline bool bch2_dev_is_readable(struct bch_dev *ca)
{
return bch2_dev_is_online(ca) &&
ca->mi.state != BCH_MEMBER_STATE_failed;
}
static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
{
if (!percpu_ref_tryget(&ca->io_ref))
return false;
if (ca->mi.state == BCH_MEMBER_STATE_rw ||
(ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
return true;
percpu_ref_put(&ca->io_ref);
return false;
}
static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
{
return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
}
static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
unsigned dev)
{
unsigned i;
for (i = 0; i < devs.nr; i++)
if (devs.devs[i] == dev)
return true;
return false;
}
static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
unsigned dev)
{
unsigned i;
for (i = 0; i < devs->nr; i++)
if (devs->devs[i] == dev) {
array_remove_item(devs->devs, devs->nr, i);
return;
}
}
static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
unsigned dev)
{
if (!bch2_dev_list_has_dev(*devs, dev)) {
BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
devs->devs[devs->nr++] = dev;
}
}
static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
{
return (struct bch_devs_list) { .nr = 1, .devs[0] = dev };
}
static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
const struct bch_devs_mask *mask)
{
struct bch_dev *ca = NULL;
while ((*iter = mask
? find_next_bit(mask->d, c->sb.nr_devices, *iter)
: *iter) < c->sb.nr_devices &&
!(ca = rcu_dereference_check(c->devs[*iter],
lockdep_is_held(&c->state_lock))))
(*iter)++;
return ca;
}
#define for_each_member_device_rcu(ca, c, iter, mask) \
for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
{
struct bch_dev *ca;
rcu_read_lock();
if ((ca = __bch2_next_dev(c, iter, NULL)))
percpu_ref_get(&ca->ref);
rcu_read_unlock();
return ca;
}
/*
* If you break early, you must drop your ref on the current device
*/
#define for_each_member_device(ca, c, iter) \
for ((iter) = 0; \
(ca = bch2_get_next_dev(c, &(iter))); \
percpu_ref_put(&ca->ref), (iter)++)
static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
unsigned *iter,
int state_mask)
{
struct bch_dev *ca;
rcu_read_lock();
while ((ca = __bch2_next_dev(c, iter, NULL)) &&
(!((1 << ca->mi.state) & state_mask) ||
!percpu_ref_tryget(&ca->io_ref)))
(*iter)++;
rcu_read_unlock();
return ca;
}
#define __for_each_online_member(ca, c, iter, state_mask) \
for ((iter) = 0; \
(ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
percpu_ref_put(&ca->io_ref), (iter)++)
#define for_each_online_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, ~0)
#define for_each_rw_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw)
#define for_each_readable_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, \
(1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
/*
* If a key exists that references a device, the device won't be going away and
* we can omit rcu_read_lock():
*/
static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
{
EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
return rcu_dereference_check(c->devs[idx], 1);
}
static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
{
EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
return rcu_dereference_protected(c->devs[idx],
lockdep_is_held(&c->sb_lock) ||
lockdep_is_held(&c->state_lock));
}
/* XXX kill, move to struct bch_fs */
static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
{
struct bch_devs_mask devs;
struct bch_dev *ca;
unsigned i;
memset(&devs, 0, sizeof(devs));
for_each_online_member(ca, c, i)
__set_bit(ca->dev_idx, devs.d);
return devs;
}
static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
{
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
u64 b_offset = bucket_to_sector(ca, b);
u64 b_end = bucket_to_sector(ca, b + 1);
unsigned i;
if (!b)
return true;
for (i = 0; i < layout->nr_superblocks; i++) {
u64 offset = le64_to_cpu(layout->sb_offset[i]);
u64 end = offset + (1 << layout->sb_max_size_bits);
if (!(offset >= b_end || end <= b_offset))
return true;
}
return false;
}
struct bch_fs *bch2_dev_to_fs(dev_t);
struct bch_fs *bch2_uuid_to_fs(__uuid_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment