Commit fd0c7679 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Convert to __packed and __aligned

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent e5baf3da
...@@ -151,7 +151,7 @@ struct bpos { ...@@ -151,7 +151,7 @@ struct bpos {
#else #else
#error edit for your odd byteorder. #error edit for your odd byteorder.
#endif #endif
} __attribute__((packed, aligned(4))); } __packed __aligned(4);
#define KEY_INODE_MAX ((__u64)~0ULL) #define KEY_INODE_MAX ((__u64)~0ULL)
#define KEY_OFFSET_MAX ((__u64)~0ULL) #define KEY_OFFSET_MAX ((__u64)~0ULL)
...@@ -185,7 +185,7 @@ struct bversion { ...@@ -185,7 +185,7 @@ struct bversion {
__u32 hi; __u32 hi;
__u64 lo; __u64 lo;
#endif #endif
} __attribute__((packed, aligned(4))); } __packed __aligned(4);
struct bkey { struct bkey {
/* Size of combined key and value, in u64s */ /* Size of combined key and value, in u64s */
...@@ -218,7 +218,7 @@ struct bkey { ...@@ -218,7 +218,7 @@ struct bkey {
__u8 pad[1]; __u8 pad[1];
#endif #endif
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bkey_packed { struct bkey_packed {
__u64 _data[0]; __u64 _data[0];
...@@ -252,7 +252,7 @@ struct bkey_packed { ...@@ -252,7 +252,7 @@ struct bkey_packed {
* to the same size as struct bkey should hopefully be safest. * to the same size as struct bkey should hopefully be safest.
*/ */
__u8 pad[sizeof(struct bkey) - 3]; __u8 pad[sizeof(struct bkey) - 3];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64)) #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
#define BKEY_U64s_MAX U8_MAX #define BKEY_U64s_MAX U8_MAX
...@@ -480,7 +480,7 @@ struct bch_set { ...@@ -480,7 +480,7 @@ struct bch_set {
struct bch_csum { struct bch_csum {
__le64 lo; __le64 lo;
__le64 hi; __le64 hi;
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define BCH_EXTENT_ENTRY_TYPES() \ #define BCH_EXTENT_ENTRY_TYPES() \
x(ptr, 0) \ x(ptr, 0) \
...@@ -517,7 +517,7 @@ struct bch_extent_crc32 { ...@@ -517,7 +517,7 @@ struct bch_extent_crc32 {
_compressed_size:7, _compressed_size:7,
type:2; type:2;
#endif #endif
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define CRC32_SIZE_MAX (1U << 7) #define CRC32_SIZE_MAX (1U << 7)
#define CRC32_NONCE_MAX 0 #define CRC32_NONCE_MAX 0
...@@ -543,7 +543,7 @@ struct bch_extent_crc64 { ...@@ -543,7 +543,7 @@ struct bch_extent_crc64 {
type:3; type:3;
#endif #endif
__u64 csum_lo; __u64 csum_lo;
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define CRC64_SIZE_MAX (1U << 9) #define CRC64_SIZE_MAX (1U << 9)
#define CRC64_NONCE_MAX ((1U << 10) - 1) #define CRC64_NONCE_MAX ((1U << 10) - 1)
...@@ -567,7 +567,7 @@ struct bch_extent_crc128 { ...@@ -567,7 +567,7 @@ struct bch_extent_crc128 {
type:4; type:4;
#endif #endif
struct bch_csum csum; struct bch_csum csum;
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define CRC128_SIZE_MAX (1U << 13) #define CRC128_SIZE_MAX (1U << 13)
#define CRC128_NONCE_MAX ((1U << 13) - 1) #define CRC128_NONCE_MAX ((1U << 13) - 1)
...@@ -593,7 +593,7 @@ struct bch_extent_ptr { ...@@ -593,7 +593,7 @@ struct bch_extent_ptr {
cached:1, cached:1,
type:1; type:1;
#endif #endif
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_extent_stripe_ptr { struct bch_extent_stripe_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD) #if defined(__LITTLE_ENDIAN_BITFIELD)
...@@ -645,7 +645,7 @@ struct bch_btree_ptr { ...@@ -645,7 +645,7 @@ struct bch_btree_ptr {
__u64 _data[0]; __u64 _data[0];
struct bch_extent_ptr start[]; struct bch_extent_ptr start[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_btree_ptr_v2 { struct bch_btree_ptr_v2 {
struct bch_val v; struct bch_val v;
...@@ -657,7 +657,7 @@ struct bch_btree_ptr_v2 { ...@@ -657,7 +657,7 @@ struct bch_btree_ptr_v2 {
struct bpos min_key; struct bpos min_key;
__u64 _data[0]; __u64 _data[0];
struct bch_extent_ptr start[]; struct bch_extent_ptr start[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1); LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
...@@ -666,7 +666,7 @@ struct bch_extent { ...@@ -666,7 +666,7 @@ struct bch_extent {
__u64 _data[0]; __u64 _data[0];
union bch_extent_entry start[]; union bch_extent_entry start[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_reservation { struct bch_reservation {
struct bch_val v; struct bch_val v;
...@@ -674,7 +674,7 @@ struct bch_reservation { ...@@ -674,7 +674,7 @@ struct bch_reservation {
__le32 generation; __le32 generation;
__u8 nr_replicas; __u8 nr_replicas;
__u8 pad[3]; __u8 pad[3];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
/* Maximum size (in u64s) a single pointer could be: */ /* Maximum size (in u64s) a single pointer could be: */
#define BKEY_EXTENT_PTR_U64s_MAX\ #define BKEY_EXTENT_PTR_U64s_MAX\
...@@ -708,7 +708,7 @@ struct bch_inode { ...@@ -708,7 +708,7 @@ struct bch_inode {
__le32 bi_flags; __le32 bi_flags;
__le16 bi_mode; __le16 bi_mode;
__u8 fields[0]; __u8 fields[0];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_inode_v2 { struct bch_inode_v2 {
struct bch_val v; struct bch_val v;
...@@ -718,14 +718,14 @@ struct bch_inode_v2 { ...@@ -718,14 +718,14 @@ struct bch_inode_v2 {
__le64 bi_flags; __le64 bi_flags;
__le16 bi_mode; __le16 bi_mode;
__u8 fields[0]; __u8 fields[0];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_inode_generation { struct bch_inode_generation {
struct bch_val v; struct bch_val v;
__le32 bi_generation; __le32 bi_generation;
__le32 pad; __le32 pad;
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
/* /*
* bi_subvol and bi_parent_subvol are only set for subvolume roots: * bi_subvol and bi_parent_subvol are only set for subvolume roots:
...@@ -846,7 +846,7 @@ struct bch_dirent { ...@@ -846,7 +846,7 @@ struct bch_dirent {
__u8 d_type; __u8 d_type;
__u8 d_name[]; __u8 d_name[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define DT_SUBVOL 16 #define DT_SUBVOL 16
#define BCH_DT_MAX 17 #define BCH_DT_MAX 17
...@@ -869,7 +869,7 @@ struct bch_xattr { ...@@ -869,7 +869,7 @@ struct bch_xattr {
__u8 x_name_len; __u8 x_name_len;
__le16 x_val_len; __le16 x_val_len;
__u8 x_name[]; __u8 x_name[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
/* Bucket/allocation information: */ /* Bucket/allocation information: */
...@@ -878,7 +878,7 @@ struct bch_alloc { ...@@ -878,7 +878,7 @@ struct bch_alloc {
__u8 fields; __u8 fields;
__u8 gen; __u8 gen;
__u8 data[]; __u8 data[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V1() \ #define BCH_ALLOC_FIELDS_V1() \
x(read_time, 16) \ x(read_time, 16) \
...@@ -897,7 +897,7 @@ struct bch_alloc_v2 { ...@@ -897,7 +897,7 @@ struct bch_alloc_v2 {
__u8 oldest_gen; __u8 oldest_gen;
__u8 data_type; __u8 data_type;
__u8 data[]; __u8 data[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V2() \ #define BCH_ALLOC_FIELDS_V2() \
x(read_time, 64) \ x(read_time, 64) \
...@@ -916,7 +916,7 @@ struct bch_alloc_v3 { ...@@ -916,7 +916,7 @@ struct bch_alloc_v3 {
__u8 oldest_gen; __u8 oldest_gen;
__u8 data_type; __u8 data_type;
__u8 data[]; __u8 data[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_alloc_v4 { struct bch_alloc_v4 {
struct bch_val v; struct bch_val v;
...@@ -932,7 +932,7 @@ struct bch_alloc_v4 { ...@@ -932,7 +932,7 @@ struct bch_alloc_v4 {
__u32 stripe; __u32 stripe;
__u32 nr_external_backpointers; __u32 nr_external_backpointers;
struct bpos backpointers[0]; struct bpos backpointers[0];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1) LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2) LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
...@@ -971,7 +971,7 @@ struct bch_quota_counter { ...@@ -971,7 +971,7 @@ struct bch_quota_counter {
struct bch_quota { struct bch_quota {
struct bch_val v; struct bch_val v;
struct bch_quota_counter c[Q_COUNTERS]; struct bch_quota_counter c[Q_COUNTERS];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
/* Erasure coding */ /* Erasure coding */
...@@ -987,7 +987,7 @@ struct bch_stripe { ...@@ -987,7 +987,7 @@ struct bch_stripe {
__u8 pad; __u8 pad;
struct bch_extent_ptr ptrs[]; struct bch_extent_ptr ptrs[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
/* Reflink: */ /* Reflink: */
...@@ -1004,14 +1004,14 @@ struct bch_reflink_p { ...@@ -1004,14 +1004,14 @@ struct bch_reflink_p {
*/ */
__le32 front_pad; __le32 front_pad;
__le32 back_pad; __le32 back_pad;
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_reflink_v { struct bch_reflink_v {
struct bch_val v; struct bch_val v;
__le64 refcount; __le64 refcount;
union bch_extent_entry start[0]; union bch_extent_entry start[0];
__u64 _data[0]; __u64 _data[0];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_indirect_inline_data { struct bch_indirect_inline_data {
struct bch_val v; struct bch_val v;
...@@ -1068,7 +1068,7 @@ LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2) ...@@ -1068,7 +1068,7 @@ LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
struct bch_lru { struct bch_lru {
struct bch_val v; struct bch_val v;
__le64 idx; __le64 idx;
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define LRU_ID_STRIPES (1U << 16) #define LRU_ID_STRIPES (1U << 16)
...@@ -1267,19 +1267,19 @@ struct bch_replicas_entry_v0 { ...@@ -1267,19 +1267,19 @@ struct bch_replicas_entry_v0 {
__u8 data_type; __u8 data_type;
__u8 nr_devs; __u8 nr_devs;
__u8 devs[]; __u8 devs[];
} __attribute__((packed)); } __packed;
struct bch_sb_field_replicas_v0 { struct bch_sb_field_replicas_v0 {
struct bch_sb_field field; struct bch_sb_field field;
struct bch_replicas_entry_v0 entries[]; struct bch_replicas_entry_v0 entries[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_replicas_entry { struct bch_replicas_entry {
__u8 data_type; __u8 data_type;
__u8 nr_devs; __u8 nr_devs;
__u8 nr_required; __u8 nr_required;
__u8 devs[]; __u8 devs[];
} __attribute__((packed)); } __packed;
#define replicas_entry_bytes(_i) \ #define replicas_entry_bytes(_i) \
(offsetof(typeof(*(_i)), devs) + (_i)->nr_devs) (offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
...@@ -1287,7 +1287,7 @@ struct bch_replicas_entry { ...@@ -1287,7 +1287,7 @@ struct bch_replicas_entry {
struct bch_sb_field_replicas { struct bch_sb_field_replicas {
struct bch_sb_field field; struct bch_sb_field field;
struct bch_replicas_entry entries[]; struct bch_replicas_entry entries[];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
/* BCH_SB_FIELD_quota: */ /* BCH_SB_FIELD_quota: */
...@@ -1304,7 +1304,7 @@ struct bch_sb_quota_type { ...@@ -1304,7 +1304,7 @@ struct bch_sb_quota_type {
struct bch_sb_field_quota { struct bch_sb_field_quota {
struct bch_sb_field field; struct bch_sb_field field;
struct bch_sb_quota_type q[QTYP_NR]; struct bch_sb_quota_type q[QTYP_NR];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
/* BCH_SB_FIELD_disk_groups: */ /* BCH_SB_FIELD_disk_groups: */
...@@ -1313,7 +1313,7 @@ struct bch_sb_field_quota { ...@@ -1313,7 +1313,7 @@ struct bch_sb_field_quota {
struct bch_disk_group { struct bch_disk_group {
__u8 label[BCH_SB_LABEL_SIZE]; __u8 label[BCH_SB_LABEL_SIZE];
__le64 flags[2]; __le64 flags[2];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1) LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6) LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
...@@ -1322,7 +1322,7 @@ LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24) ...@@ -1322,7 +1322,7 @@ LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
struct bch_sb_field_disk_groups { struct bch_sb_field_disk_groups {
struct bch_sb_field field; struct bch_sb_field field;
struct bch_disk_group entries[0]; struct bch_disk_group entries[0];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
/* BCH_SB_FIELD_counters */ /* BCH_SB_FIELD_counters */
...@@ -1504,7 +1504,7 @@ struct bch_sb_layout { ...@@ -1504,7 +1504,7 @@ struct bch_sb_layout {
__u8 nr_superblocks; __u8 nr_superblocks;
__u8 pad[5]; __u8 pad[5];
__le64 sb_offset[61]; __le64 sb_offset[61];
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#define BCH_SB_LAYOUT_SECTOR 7 #define BCH_SB_LAYOUT_SECTOR 7
...@@ -1555,7 +1555,7 @@ struct bch_sb { ...@@ -1555,7 +1555,7 @@ struct bch_sb {
struct bch_sb_field start[0]; struct bch_sb_field start[0];
__le64 _data[0]; __le64 _data[0];
}; };
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
/* /*
* Flags: * Flags:
...@@ -1914,26 +1914,26 @@ enum { ...@@ -1914,26 +1914,26 @@ enum {
struct jset_entry_usage { struct jset_entry_usage {
struct jset_entry entry; struct jset_entry entry;
__le64 v; __le64 v;
} __attribute__((packed)); } __packed;
struct jset_entry_data_usage { struct jset_entry_data_usage {
struct jset_entry entry; struct jset_entry entry;
__le64 v; __le64 v;
struct bch_replicas_entry r; struct bch_replicas_entry r;
} __attribute__((packed)); } __packed;
struct jset_entry_clock { struct jset_entry_clock {
struct jset_entry entry; struct jset_entry entry;
__u8 rw; __u8 rw;
__u8 pad[7]; __u8 pad[7];
__le64 time; __le64 time;
} __attribute__((packed)); } __packed;
struct jset_entry_dev_usage_type { struct jset_entry_dev_usage_type {
__le64 buckets; __le64 buckets;
__le64 sectors; __le64 sectors;
__le64 fragmented; __le64 fragmented;
} __attribute__((packed)); } __packed;
struct jset_entry_dev_usage { struct jset_entry_dev_usage {
struct jset_entry entry; struct jset_entry entry;
...@@ -1944,7 +1944,7 @@ struct jset_entry_dev_usage { ...@@ -1944,7 +1944,7 @@ struct jset_entry_dev_usage {
__le64 _buckets_unavailable; /* No longer used */ __le64 _buckets_unavailable; /* No longer used */
struct jset_entry_dev_usage_type d[]; struct jset_entry_dev_usage_type d[];
} __attribute__((packed)); } __packed;
static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u) static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
{ {
...@@ -1955,7 +1955,7 @@ static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage ...@@ -1955,7 +1955,7 @@ static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage
struct jset_entry_log { struct jset_entry_log {
struct jset_entry entry; struct jset_entry entry;
u8 d[]; u8 d[];
} __attribute__((packed)); } __packed;
/* /*
* On disk format for a journal entry: * On disk format for a journal entry:
...@@ -1990,7 +1990,7 @@ struct jset { ...@@ -1990,7 +1990,7 @@ struct jset {
struct jset_entry start[0]; struct jset_entry start[0];
__u64 _data[0]; __u64 _data[0];
}; };
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4); LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5); LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
...@@ -2052,7 +2052,7 @@ struct bset { ...@@ -2052,7 +2052,7 @@ struct bset {
struct bkey_packed start[0]; struct bkey_packed start[0];
__u64 _data[0]; __u64 _data[0];
}; };
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4); LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
...@@ -2085,7 +2085,7 @@ struct btree_node { ...@@ -2085,7 +2085,7 @@ struct btree_node {
}; };
}; };
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4); LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8); LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
...@@ -2106,6 +2106,6 @@ struct btree_node_entry { ...@@ -2106,6 +2106,6 @@ struct btree_node_entry {
}; };
}; };
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
#endif /* _BCACHEFS_FORMAT_H */ #endif /* _BCACHEFS_FORMAT_H */
...@@ -208,7 +208,7 @@ struct bch_ioctl_data { ...@@ -208,7 +208,7 @@ struct bch_ioctl_data {
__u64 pad[8]; __u64 pad[8];
}; };
}; };
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
enum bch_data_event { enum bch_data_event {
BCH_DATA_EVENT_PROGRESS = 0, BCH_DATA_EVENT_PROGRESS = 0,
...@@ -224,7 +224,7 @@ struct bch_ioctl_data_progress { ...@@ -224,7 +224,7 @@ struct bch_ioctl_data_progress {
__u64 sectors_done; __u64 sectors_done;
__u64 sectors_total; __u64 sectors_total;
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_ioctl_data_event { struct bch_ioctl_data_event {
__u8 type; __u8 type;
...@@ -233,12 +233,12 @@ struct bch_ioctl_data_event { ...@@ -233,12 +233,12 @@ struct bch_ioctl_data_event {
struct bch_ioctl_data_progress p; struct bch_ioctl_data_progress p;
__u64 pad2[15]; __u64 pad2[15];
}; };
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
struct bch_replicas_usage { struct bch_replicas_usage {
__u64 sectors; __u64 sectors;
struct bch_replicas_entry r; struct bch_replicas_entry r;
} __attribute__((packed)); } __packed;
static inline struct bch_replicas_usage * static inline struct bch_replicas_usage *
replicas_usage_next(struct bch_replicas_usage *u) replicas_usage_next(struct bch_replicas_usage *u)
......
...@@ -313,7 +313,7 @@ struct btree_key_cache { ...@@ -313,7 +313,7 @@ struct btree_key_cache {
struct bkey_cached_key { struct bkey_cached_key {
u32 btree_id; u32 btree_id;
struct bpos pos; struct bpos pos;
} __attribute__((packed, aligned(4))); } __packed __aligned(4);
#define BKEY_CACHED_ACCESSED 0 #define BKEY_CACHED_ACCESSED 0
#define BKEY_CACHED_DIRTY 1 #define BKEY_CACHED_DIRTY 1
......
...@@ -66,7 +66,7 @@ struct bkey_inode_buf { ...@@ -66,7 +66,7 @@ struct bkey_inode_buf {
#define x(_name, _bits) + 8 + _bits / 8 #define x(_name, _bits) + 8 + _bits / 8
u8 _pad[0 + BCH_INODE_FIELDS()]; u8 _pad[0 + BCH_INODE_FIELDS()];
#undef x #undef x
} __attribute__((packed, aligned(8))); } __packed __aligned(8);
void bch2_inode_pack(struct bch_fs *, struct bkey_inode_buf *, void bch2_inode_pack(struct bch_fs *, struct bkey_inode_buf *,
const struct bch_inode_unpacked *); const struct bch_inode_unpacked *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment