Commit 1e81f89b authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Fix assorted checkpatch nits

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 6fe893ea
...@@ -105,7 +105,7 @@ struct write_point { ...@@ -105,7 +105,7 @@ struct write_point {
struct dev_stripe_state stripe; struct dev_stripe_state stripe;
u64 sectors_allocated; u64 sectors_allocated;
} __attribute__((__aligned__(SMP_CACHE_BYTES))); } __aligned(SMP_CACHE_BYTES);
struct { struct {
struct work_struct index_update_work; struct work_struct index_update_work;
...@@ -116,7 +116,7 @@ struct write_point { ...@@ -116,7 +116,7 @@ struct write_point {
enum write_point_state state; enum write_point_state state;
u64 last_state_change; u64 last_state_change;
u64 time[WRITE_POINT_STATE_NR]; u64 time[WRITE_POINT_STATE_NR];
} __attribute__((__aligned__(SMP_CACHE_BYTES))); } __aligned(SMP_CACHE_BYTES);
}; };
struct write_point_specifier { struct write_point_specifier {
......
...@@ -535,7 +535,7 @@ int bch2_check_topology(struct bch_fs *c) ...@@ -535,7 +535,7 @@ int bch2_check_topology(struct bch_fs *c)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < btree_id_nr_alive(c)&& !ret; i++) { for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
struct btree_root *r = bch2_btree_id_root(c, i); struct btree_root *r = bch2_btree_id_root(c, i);
if (!r->alive) if (!r->alive)
......
...@@ -143,8 +143,8 @@ enum btree_write_flags { ...@@ -143,8 +143,8 @@ enum btree_write_flags {
__BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS, __BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS,
__BTREE_WRITE_ALREADY_STARTED, __BTREE_WRITE_ALREADY_STARTED,
}; };
#define BTREE_WRITE_ONLY_IF_NEED (1U << __BTREE_WRITE_ONLY_IF_NEED ) #define BTREE_WRITE_ONLY_IF_NEED BIT(__BTREE_WRITE_ONLY_IF_NEED)
#define BTREE_WRITE_ALREADY_STARTED (1U << __BTREE_WRITE_ALREADY_STARTED) #define BTREE_WRITE_ALREADY_STARTED BIT(__BTREE_WRITE_ALREADY_STARTED)
void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned); void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
void bch2_btree_node_write(struct bch_fs *, struct btree *, void bch2_btree_node_write(struct bch_fs *, struct btree *,
......
...@@ -1008,7 +1008,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans) ...@@ -1008,7 +1008,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
/* /*
* We used to assert that all paths had been traversed here * We used to assert that all paths had been traversed here
* (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
* path->Should_be_locked is not set yet, we we might have unlocked and * path->should_be_locked is not set yet, we might have unlocked and
* then failed to relock a path - that's fine. * then failed to relock a path - that's fine.
*/ */
err: err:
...@@ -2738,9 +2738,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans, ...@@ -2738,9 +2738,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
unsigned depth, unsigned depth,
unsigned flags) unsigned flags)
{ {
flags |= BTREE_ITER_NOT_EXTENTS; flags |= BTREE_ITER_NOT_EXTENTS;
flags |= __BTREE_ITER_ALL_SNAPSHOTS; flags |= __BTREE_ITER_ALL_SNAPSHOTS;
flags |= BTREE_ITER_ALL_SNAPSHOTS; flags |= BTREE_ITER_ALL_SNAPSHOTS;
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth, bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
__bch2_btree_iter_flags(trans, btree_id, flags), __bch2_btree_iter_flags(trans, btree_id, flags),
......
...@@ -268,10 +268,10 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr ...@@ -268,10 +268,10 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr
{ {
struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter, struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
btree_id, pos, flags|BTREE_ITER_INTENT, type); btree_id, pos, flags|BTREE_ITER_INTENT, type);
struct bkey_i *ret = unlikely(IS_ERR(k.k)) struct bkey_i *ret = IS_ERR(k.k)
? ERR_CAST(k.k) ? ERR_CAST(k.k)
: __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes); : __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
if (unlikely(IS_ERR(ret))) if (IS_ERR(ret))
bch2_trans_iter_exit(trans, iter); bch2_trans_iter_exit(trans, iter);
return ret; return ret;
} }
......
...@@ -1924,6 +1924,7 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, ...@@ -1924,6 +1924,7 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca) int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
{ {
int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca)); int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
if (ret) if (ret)
bch_err_fn(c, ret); bch_err_fn(c, ret);
return ret; return ret;
......
...@@ -17,7 +17,7 @@ int __init bch2_chardev_init(void); ...@@ -17,7 +17,7 @@ int __init bch2_chardev_init(void);
static inline long bch2_fs_ioctl(struct bch_fs *c, static inline long bch2_fs_ioctl(struct bch_fs *c,
unsigned cmd, void __user * arg) unsigned cmd, void __user * arg)
{ {
return -ENOSYS; return -ENOTTY;
} }
static inline void bch2_fs_chardev_exit(struct bch_fs *c) {} static inline void bch2_fs_chardev_exit(struct bch_fs *c) {}
......
...@@ -265,9 +265,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type, ...@@ -265,9 +265,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
__bio_for_each_segment(bv, bio, *iter, *iter) { __bio_for_each_segment(bv, bio, *iter, *iter) {
void *p = kmap_atomic(bv.bv_page) + bv.bv_offset; void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
bch2_checksum_update(&state, p, bv.bv_len); bch2_checksum_update(&state, p, bv.bv_len);
kunmap_atomic(p); kunmap_local(p);
} }
#else #else
__bio_for_each_bvec(bv, bio, *iter, *iter) __bio_for_each_bvec(bv, bio, *iter, *iter)
...@@ -287,10 +288,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type, ...@@ -287,10 +288,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
__bio_for_each_segment(bv, bio, *iter, *iter) { __bio_for_each_segment(bv, bio, *iter, *iter) {
void *p = kmap_atomic(bv.bv_page) + bv.bv_offset; void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
crypto_shash_update(desc, p, bv.bv_len); crypto_shash_update(desc, p, bv.bv_len);
kunmap_atomic(p); kunmap_local(p);
} }
#else #else
__bio_for_each_bvec(bv, bio, *iter, *iter) __bio_for_each_bvec(bv, bio, *iter, *iter)
...@@ -427,8 +428,9 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio, ...@@ -427,8 +428,9 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
extent_nonce(version, crc_old), bio); extent_nonce(version, crc_old), bio);
if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) { if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
bch_err(c, "checksum error in bch2_rechecksum_bio() (memory corruption or bug?)\n" bch_err(c, "checksum error in %s() (memory corruption or bug?)\n"
"expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)", "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
__func__,
crc_old.csum.hi, crc_old.csum.hi,
crc_old.csum.lo, crc_old.csum.lo,
merged.hi, merged.hi,
......
...@@ -643,7 +643,8 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) ...@@ -643,7 +643,8 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
static u64 compression_opt_to_feature(unsigned v) static u64 compression_opt_to_feature(unsigned v)
{ {
unsigned type = bch2_compression_decode(v).type; unsigned type = bch2_compression_decode(v).type;
return 1ULL << bch2_compression_opt_to_feature[type];
return BIT_ULL(bch2_compression_opt_to_feature[type]);
} }
int bch2_fs_compress_init(struct bch_fs *c) int bch2_fs_compress_init(struct bch_fs *c)
......
...@@ -517,7 +517,7 @@ static void bch2_extent_crc_pack(union bch_extent_crc *dst, ...@@ -517,7 +517,7 @@ static void bch2_extent_crc_pack(union bch_extent_crc *dst,
switch (type) { switch (type) {
case BCH_EXTENT_ENTRY_crc32: case BCH_EXTENT_ENTRY_crc32:
set_common_fields(dst->crc32, src); set_common_fields(dst->crc32, src);
dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo); dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
break; break;
case BCH_EXTENT_ENTRY_crc64: case BCH_EXTENT_ENTRY_crc64:
set_common_fields(dst->crc64, src); set_common_fields(dst->crc64, src);
...@@ -915,11 +915,11 @@ bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2) ...@@ -915,11 +915,11 @@ bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1) bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
if (p1.ptr.dev == p2.ptr.dev && if (p1.ptr.dev == p2.ptr.dev &&
p1.ptr.gen == p2.ptr.gen && p1.ptr.gen == p2.ptr.gen &&
(s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
(s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
return true; return true;
return false; return false;
} else { } else {
......
...@@ -2867,7 +2867,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode, ...@@ -2867,7 +2867,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
folio = __filemap_get_folio(mapping, index, folio = __filemap_get_folio(mapping, index,
FGP_LOCK|FGP_CREAT, GFP_KERNEL); FGP_LOCK|FGP_CREAT, GFP_KERNEL);
if (unlikely(IS_ERR_OR_NULL(folio))) { if (IS_ERR_OR_NULL(folio)) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
......
...@@ -2435,6 +2435,7 @@ static void __bch2_read_endio(struct work_struct *work) ...@@ -2435,6 +2435,7 @@ static void __bch2_read_endio(struct work_struct *work)
if (rbio->bounce) { if (rbio->bounce) {
struct bvec_iter src_iter = src->bi_iter; struct bvec_iter src_iter = src->bi_iter;
bio_copy_data_iter(dst, &dst_iter, src, &src_iter); bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
} }
} }
......
...@@ -52,7 +52,7 @@ enum __bch_write_flags { ...@@ -52,7 +52,7 @@ enum __bch_write_flags {
}; };
enum bch_write_flags { enum bch_write_flags {
#define x(f) BCH_WRITE_##f = 1U << __BCH_WRITE_##f, #define x(f) BCH_WRITE_##f = BIT(__BCH_WRITE_##f),
BCH_WRITE_FLAGS() BCH_WRITE_FLAGS()
#undef x #undef x
}; };
......
...@@ -63,6 +63,7 @@ journal_seq_to_buf(struct journal *j, u64 seq) ...@@ -63,6 +63,7 @@ journal_seq_to_buf(struct journal *j, u64 seq)
static void journal_pin_list_init(struct journal_entry_pin_list *p, int count) static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
{ {
unsigned i; unsigned i;
for (i = 0; i < ARRAY_SIZE(p->list); i++) for (i = 0; i < ARRAY_SIZE(p->list); i++)
INIT_LIST_HEAD(&p->list[i]); INIT_LIST_HEAD(&p->list[i]);
INIT_LIST_HEAD(&p->flushed); INIT_LIST_HEAD(&p->flushed);
...@@ -514,8 +515,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res, ...@@ -514,8 +515,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
int ret; int ret;
closure_wait_event(&j->async_wait, closure_wait_event(&j->async_wait,
(ret = __journal_res_get(j, res, flags)) != (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
-BCH_ERR_journal_res_get_blocked||
(flags & JOURNAL_RES_GET_NONBLOCK)); (flags & JOURNAL_RES_GET_NONBLOCK));
return ret; return ret;
} }
......
...@@ -1053,6 +1053,7 @@ static void bch2_journal_read_device(struct closure *cl) ...@@ -1053,6 +1053,7 @@ static void bch2_journal_read_device(struct closure *cl)
bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr); bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr; unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]); bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
} }
ja->sectors_free = 0; ja->sectors_free = 0;
...@@ -1629,7 +1630,6 @@ static void do_journal_write(struct closure *cl) ...@@ -1629,7 +1630,6 @@ static void do_journal_write(struct closure *cl)
} }
continue_at(cl, journal_write_done, c->io_complete_wq); continue_at(cl, journal_write_done, c->io_complete_wq);
return;
} }
static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset) static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset)
......
...@@ -345,7 +345,7 @@ static inline bool __journal_pin_drop(struct journal *j, ...@@ -345,7 +345,7 @@ static inline bool __journal_pin_drop(struct journal *j,
list_del_init(&pin->list); list_del_init(&pin->list);
/* /*
* Unpinning a journal entry make make journal_next_bucket() succeed, if * Unpinning a journal entry may make journal_next_bucket() succeed, if
* writing a new last_seq will now make another bucket available: * writing a new last_seq will now make another bucket available:
*/ */
return atomic_dec_and_test(&pin_list->count) && return atomic_dec_and_test(&pin_list->count) &&
......
...@@ -648,7 +648,7 @@ static int bch2_journal_replay(struct bch_fs *c) ...@@ -648,7 +648,7 @@ static int bch2_journal_replay(struct bch_fs *c)
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr); move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
keys->gap = keys->nr; keys->gap = keys->nr;
keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL); keys_sorted = kvmalloc_array(keys->nr, sizeof(*keys_sorted), GFP_KERNEL);
if (!keys_sorted) if (!keys_sorted)
return -BCH_ERR_ENOMEM_journal_replay; return -BCH_ERR_ENOMEM_journal_replay;
...@@ -1403,7 +1403,7 @@ int bch2_fs_recovery(struct bch_fs *c) ...@@ -1403,7 +1403,7 @@ int bch2_fs_recovery(struct bch_fs *c)
} }
c->journal_replay_seq_start = last_seq; c->journal_replay_seq_start = last_seq;
c->journal_replay_seq_end = blacklist_seq - 1;; c->journal_replay_seq_end = blacklist_seq - 1;
if (c->opts.reconstruct_alloc) { if (c->opts.reconstruct_alloc) {
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
......
...@@ -553,7 +553,9 @@ static int __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src) ...@@ -553,7 +553,9 @@ static int __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src)
d = (src_f ? le32_to_cpu(src_f->u64s) : 0) - d = (src_f ? le32_to_cpu(src_f->u64s) : 0) -
(dst_f ? le32_to_cpu(dst_f->u64s) : 0); (dst_f ? le32_to_cpu(dst_f->u64s) : 0);
if (d > 0) { if (d > 0) {
int ret = bch2_sb_realloc(dst_handle, le32_to_cpu(dst_handle->sb->u64s) + d); int ret = bch2_sb_realloc(dst_handle,
le32_to_cpu(dst_handle->sb->u64s) + d);
if (ret) if (ret)
return ret; return ret;
......
...@@ -58,6 +58,7 @@ struct bch_sb_field_ops { ...@@ -58,6 +58,7 @@ struct bch_sb_field_ops {
static inline __le64 bch2_sb_magic(struct bch_fs *c) static inline __le64 bch2_sb_magic(struct bch_fs *c)
{ {
__le64 ret; __le64 ret;
memcpy(&ret, &c->sb.uuid, sizeof(ret)); memcpy(&ret, &c->sb.uuid, sizeof(ret));
return ret; return ret;
} }
......
...@@ -216,6 +216,7 @@ u64 bch2_read_flag_list(char *opt, const char * const list[]) ...@@ -216,6 +216,7 @@ u64 bch2_read_flag_list(char *opt, const char * const list[])
while ((p = strsep(&s, ","))) { while ((p = strsep(&s, ","))) {
int flag = match_string(list, -1, p); int flag = match_string(list, -1, p);
if (flag < 0) { if (flag < 0) {
ret = -1; ret = -1;
break; break;
...@@ -797,9 +798,10 @@ void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src) ...@@ -797,9 +798,10 @@ void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)
struct bvec_iter iter; struct bvec_iter iter;
__bio_for_each_segment(bv, dst, iter, dst_iter) { __bio_for_each_segment(bv, dst, iter, dst_iter) {
void *dstp = kmap_atomic(bv.bv_page); void *dstp = kmap_local_page(bv.bv_page);
memcpy(dstp + bv.bv_offset, src, bv.bv_len); memcpy(dstp + bv.bv_offset, src, bv.bv_len);
kunmap_atomic(dstp); kunmap_local(dstp);
src += bv.bv_len; src += bv.bv_len;
} }
...@@ -811,9 +813,10 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter) ...@@ -811,9 +813,10 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
struct bvec_iter iter; struct bvec_iter iter;
__bio_for_each_segment(bv, src, iter, src_iter) { __bio_for_each_segment(bv, src, iter, src_iter) {
void *srcp = kmap_atomic(bv.bv_page); void *srcp = kmap_local_page(bv.bv_page);
memcpy(dst, srcp + bv.bv_offset, bv.bv_len); memcpy(dst, srcp + bv.bv_offset, bv.bv_len);
kunmap_atomic(srcp); kunmap_local(srcp);
dst += bv.bv_len; dst += bv.bv_len;
} }
......
...@@ -467,8 +467,10 @@ struct bch_pd_controller { ...@@ -467,8 +467,10 @@ struct bch_pd_controller {
s64 last_change; s64 last_change;
s64 last_target; s64 last_target;
/* If true, the rate will not increase if bch2_ratelimit_delay() /*
* is not being called often enough. */ * If true, the rate will not increase if bch2_ratelimit_delay()
* is not being called often enough.
*/
bool backpressure; bool backpressure;
}; };
...@@ -604,6 +606,7 @@ static inline void __memcpy_u64s(void *dst, const void *src, ...@@ -604,6 +606,7 @@ static inline void __memcpy_u64s(void *dst, const void *src,
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
long d0, d1, d2; long d0, d1, d2;
asm volatile("rep ; movsq" asm volatile("rep ; movsq"
: "=&c" (d0), "=&D" (d1), "=&S" (d2) : "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" (u64s), "1" (dst), "2" (src) : "0" (u64s), "1" (dst), "2" (src)
...@@ -680,6 +683,7 @@ static inline void __memmove_u64s_up(void *_dst, const void *_src, ...@@ -680,6 +683,7 @@ static inline void __memmove_u64s_up(void *_dst, const void *_src,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
long d0, d1, d2; long d0, d1, d2;
asm volatile("std ;\n" asm volatile("std ;\n"
"rep ; movsq\n" "rep ; movsq\n"
"cld ;\n" "cld ;\n"
......
...@@ -59,6 +59,7 @@ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out) ...@@ -59,6 +59,7 @@ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
if (likely(bytes < 9)) { if (likely(bytes < 9)) {
__le64 v_le = 0; __le64 v_le = 0;
memcpy(&v_le, in, bytes); memcpy(&v_le, in, bytes);
v = le64_to_cpu(v_le); v = le64_to_cpu(v_le);
v >>= bytes; v >>= bytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment