Commit 6b473de8 authored by Ryusuke Konishi's avatar Ryusuke Konishi Committed by Andrew Morton

nilfs2: convert sufile to use kmap_local

Concerning the code of the metadata file sufile for segment management,
convert all parts that uses the deprecated kmap_atomic() to use
kmap_local.  All transformations are directly possible here.

Link: https://lkml.kernel.org/r/20240122140202.6950-6-konishi.ryusuke@gmail.comSigned-off-by: default avatarRyusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a3baca58
...@@ -107,11 +107,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, ...@@ -107,11 +107,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
struct nilfs_sufile_header *header; struct nilfs_sufile_header *header;
void *kaddr; void *kaddr;
kaddr = kmap_atomic(header_bh->b_page); kaddr = kmap_local_page(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
le64_add_cpu(&header->sh_ncleansegs, ncleanadd); le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
kunmap_atomic(kaddr); kunmap_local(kaddr);
mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
} }
...@@ -315,10 +315,10 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -315,10 +315,10 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
ret = nilfs_sufile_get_header_block(sufile, &header_bh); ret = nilfs_sufile_get_header_block(sufile, &header_bh);
if (ret < 0) if (ret < 0)
goto out_sem; goto out_sem;
kaddr = kmap_atomic(header_bh->b_page); kaddr = kmap_local_page(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
last_alloc = le64_to_cpu(header->sh_last_alloc); last_alloc = le64_to_cpu(header->sh_last_alloc);
kunmap_atomic(kaddr); kunmap_local(kaddr);
nsegments = nilfs_sufile_get_nsegments(sufile); nsegments = nilfs_sufile_get_nsegments(sufile);
maxsegnum = sui->allocmax; maxsegnum = sui->allocmax;
...@@ -352,7 +352,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -352,7 +352,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
&su_bh); &su_bh);
if (ret < 0) if (ret < 0)
goto out_header; goto out_header;
kaddr = kmap_atomic(su_bh->b_page); kaddr = kmap_local_page(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage( su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr); sufile, segnum, su_bh, kaddr);
...@@ -363,14 +363,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -363,14 +363,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
continue; continue;
/* found a clean segment */ /* found a clean segment */
nilfs_segment_usage_set_dirty(su); nilfs_segment_usage_set_dirty(su);
kunmap_atomic(kaddr); kunmap_local(kaddr);
kaddr = kmap_atomic(header_bh->b_page); kaddr = kmap_local_page(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
le64_add_cpu(&header->sh_ncleansegs, -1); le64_add_cpu(&header->sh_ncleansegs, -1);
le64_add_cpu(&header->sh_ndirtysegs, 1); le64_add_cpu(&header->sh_ndirtysegs, 1);
header->sh_last_alloc = cpu_to_le64(segnum); header->sh_last_alloc = cpu_to_le64(segnum);
kunmap_atomic(kaddr); kunmap_local(kaddr);
sui->ncleansegs--; sui->ncleansegs--;
mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
...@@ -384,7 +384,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -384,7 +384,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
goto out_header; goto out_header;
} }
kunmap_atomic(kaddr); kunmap_local(kaddr);
brelse(su_bh); brelse(su_bh);
} }
...@@ -406,16 +406,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, ...@@ -406,16 +406,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
struct nilfs_segment_usage *su; struct nilfs_segment_usage *su;
void *kaddr; void *kaddr;
kaddr = kmap_atomic(su_bh->b_page); kaddr = kmap_local_page(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (unlikely(!nilfs_segment_usage_clean(su))) { if (unlikely(!nilfs_segment_usage_clean(su))) {
nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean", nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
__func__, (unsigned long long)segnum); __func__, (unsigned long long)segnum);
kunmap_atomic(kaddr); kunmap_local(kaddr);
return; return;
} }
nilfs_segment_usage_set_dirty(su); nilfs_segment_usage_set_dirty(su);
kunmap_atomic(kaddr); kunmap_local(kaddr);
nilfs_sufile_mod_counter(header_bh, -1, 1); nilfs_sufile_mod_counter(header_bh, -1, 1);
NILFS_SUI(sufile)->ncleansegs--; NILFS_SUI(sufile)->ncleansegs--;
...@@ -432,11 +432,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, ...@@ -432,11 +432,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
void *kaddr; void *kaddr;
int clean, dirty; int clean, dirty;
kaddr = kmap_atomic(su_bh->b_page); kaddr = kmap_local_page(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) && if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
su->su_nblocks == cpu_to_le32(0)) { su->su_nblocks == cpu_to_le32(0)) {
kunmap_atomic(kaddr); kunmap_local(kaddr);
return; return;
} }
clean = nilfs_segment_usage_clean(su); clean = nilfs_segment_usage_clean(su);
...@@ -446,7 +446,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, ...@@ -446,7 +446,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
su->su_lastmod = cpu_to_le64(0); su->su_lastmod = cpu_to_le64(0);
su->su_nblocks = cpu_to_le32(0); su->su_nblocks = cpu_to_le32(0);
su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)); su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
kunmap_atomic(kaddr); kunmap_local(kaddr);
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
NILFS_SUI(sufile)->ncleansegs -= clean; NILFS_SUI(sufile)->ncleansegs -= clean;
...@@ -463,12 +463,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, ...@@ -463,12 +463,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
void *kaddr; void *kaddr;
int sudirty; int sudirty;
kaddr = kmap_atomic(su_bh->b_page); kaddr = kmap_local_page(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (nilfs_segment_usage_clean(su)) { if (nilfs_segment_usage_clean(su)) {
nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean", nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
__func__, (unsigned long long)segnum); __func__, (unsigned long long)segnum);
kunmap_atomic(kaddr); kunmap_local(kaddr);
return; return;
} }
if (unlikely(nilfs_segment_usage_error(su))) if (unlikely(nilfs_segment_usage_error(su)))
...@@ -481,7 +481,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, ...@@ -481,7 +481,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
(unsigned long long)segnum); (unsigned long long)segnum);
nilfs_segment_usage_set_clean(su); nilfs_segment_usage_set_clean(su);
kunmap_atomic(kaddr); kunmap_local(kaddr);
mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
...@@ -509,12 +509,12 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) ...@@ -509,12 +509,12 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
if (ret) if (ret)
goto out_sem; goto out_sem;
kaddr = kmap_atomic(bh->b_page); kaddr = kmap_local_page(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
if (unlikely(nilfs_segment_usage_error(su))) { if (unlikely(nilfs_segment_usage_error(su))) {
struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
kunmap_atomic(kaddr); kunmap_local(kaddr);
brelse(bh); brelse(bh);
if (nilfs_segment_is_active(nilfs, segnum)) { if (nilfs_segment_is_active(nilfs, segnum)) {
nilfs_error(sufile->i_sb, nilfs_error(sufile->i_sb,
...@@ -532,7 +532,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) ...@@ -532,7 +532,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
ret = -EIO; ret = -EIO;
} else { } else {
nilfs_segment_usage_set_dirty(su); nilfs_segment_usage_set_dirty(su);
kunmap_atomic(kaddr); kunmap_local(kaddr);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
brelse(bh); brelse(bh);
...@@ -562,7 +562,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, ...@@ -562,7 +562,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
if (ret < 0) if (ret < 0)
goto out_sem; goto out_sem;
kaddr = kmap_atomic(bh->b_page); kaddr = kmap_local_page(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
if (modtime) { if (modtime) {
/* /*
...@@ -573,7 +573,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, ...@@ -573,7 +573,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
su->su_lastmod = cpu_to_le64(modtime); su->su_lastmod = cpu_to_le64(modtime);
} }
su->su_nblocks = cpu_to_le32(nblocks); su->su_nblocks = cpu_to_le32(nblocks);
kunmap_atomic(kaddr); kunmap_local(kaddr);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
...@@ -614,7 +614,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) ...@@ -614,7 +614,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
if (ret < 0) if (ret < 0)
goto out_sem; goto out_sem;
kaddr = kmap_atomic(header_bh->b_page); kaddr = kmap_local_page(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
...@@ -624,7 +624,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) ...@@ -624,7 +624,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
spin_lock(&nilfs->ns_last_segment_lock); spin_lock(&nilfs->ns_last_segment_lock);
sustat->ss_prot_seq = nilfs->ns_prot_seq; sustat->ss_prot_seq = nilfs->ns_prot_seq;
spin_unlock(&nilfs->ns_last_segment_lock); spin_unlock(&nilfs->ns_last_segment_lock);
kunmap_atomic(kaddr); kunmap_local(kaddr);
brelse(header_bh); brelse(header_bh);
out_sem: out_sem:
...@@ -640,15 +640,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, ...@@ -640,15 +640,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
void *kaddr; void *kaddr;
int suclean; int suclean;
kaddr = kmap_atomic(su_bh->b_page); kaddr = kmap_local_page(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (nilfs_segment_usage_error(su)) { if (nilfs_segment_usage_error(su)) {
kunmap_atomic(kaddr); kunmap_local(kaddr);
return; return;
} }
suclean = nilfs_segment_usage_clean(su); suclean = nilfs_segment_usage_clean(su);
nilfs_segment_usage_set_error(su); nilfs_segment_usage_set_error(su);
kunmap_atomic(kaddr); kunmap_local(kaddr);
if (suclean) { if (suclean) {
nilfs_sufile_mod_counter(header_bh, -1, 0); nilfs_sufile_mod_counter(header_bh, -1, 0);
...@@ -717,7 +717,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, ...@@ -717,7 +717,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
/* hole */ /* hole */
continue; continue;
} }
kaddr = kmap_atomic(su_bh->b_page); kaddr = kmap_local_page(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage( su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr); sufile, segnum, su_bh, kaddr);
su2 = su; su2 = su;
...@@ -726,7 +726,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, ...@@ -726,7 +726,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
~BIT(NILFS_SEGMENT_USAGE_ERROR)) || ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
nilfs_segment_is_active(nilfs, segnum + j)) { nilfs_segment_is_active(nilfs, segnum + j)) {
ret = -EBUSY; ret = -EBUSY;
kunmap_atomic(kaddr); kunmap_local(kaddr);
brelse(su_bh); brelse(su_bh);
goto out_header; goto out_header;
} }
...@@ -738,7 +738,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, ...@@ -738,7 +738,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
nc++; nc++;
} }
} }
kunmap_atomic(kaddr); kunmap_local(kaddr);
if (nc > 0) { if (nc > 0) {
mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
ncleaned += nc; ncleaned += nc;
...@@ -823,10 +823,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) ...@@ -823,10 +823,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
sui->allocmin = 0; sui->allocmin = 0;
} }
kaddr = kmap_atomic(header_bh->b_page); kaddr = kmap_local_page(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
kunmap_atomic(kaddr); kunmap_local(kaddr);
mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
...@@ -891,7 +891,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, ...@@ -891,7 +891,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
continue; continue;
} }
kaddr = kmap_atomic(su_bh->b_page); kaddr = kmap_local_page(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage( su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr); sufile, segnum, su_bh, kaddr);
for (j = 0; j < n; for (j = 0; j < n;
...@@ -904,7 +904,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, ...@@ -904,7 +904,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
si->sui_flags |= si->sui_flags |=
BIT(NILFS_SEGMENT_USAGE_ACTIVE); BIT(NILFS_SEGMENT_USAGE_ACTIVE);
} }
kunmap_atomic(kaddr); kunmap_local(kaddr);
brelse(su_bh); brelse(su_bh);
} }
ret = nsegs; ret = nsegs;
...@@ -973,7 +973,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, ...@@ -973,7 +973,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
goto out_header; goto out_header;
for (;;) { for (;;) {
kaddr = kmap_atomic(bh->b_page); kaddr = kmap_local_page(bh->b_page);
su = nilfs_sufile_block_get_segment_usage( su = nilfs_sufile_block_get_segment_usage(
sufile, sup->sup_segnum, bh, kaddr); sufile, sup->sup_segnum, bh, kaddr);
...@@ -1010,7 +1010,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, ...@@ -1010,7 +1010,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags); su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
} }
kunmap_atomic(kaddr); kunmap_local(kaddr);
sup = (void *)sup + supsz; sup = (void *)sup + supsz;
if (sup >= supend) if (sup >= supend)
...@@ -1115,7 +1115,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) ...@@ -1115,7 +1115,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
continue; continue;
} }
kaddr = kmap_atomic(su_bh->b_page); kaddr = kmap_local_page(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
su_bh, kaddr); su_bh, kaddr);
for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) { for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
...@@ -1145,7 +1145,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) ...@@ -1145,7 +1145,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
} }
if (nblocks >= minlen) { if (nblocks >= minlen) {
kunmap_atomic(kaddr); kunmap_local(kaddr);
ret = blkdev_issue_discard(nilfs->ns_bdev, ret = blkdev_issue_discard(nilfs->ns_bdev,
start * sects_per_block, start * sects_per_block,
...@@ -1157,7 +1157,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) ...@@ -1157,7 +1157,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
} }
ndiscarded += nblocks; ndiscarded += nblocks;
kaddr = kmap_atomic(su_bh->b_page); kaddr = kmap_local_page(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage( su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr); sufile, segnum, su_bh, kaddr);
} }
...@@ -1166,7 +1166,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) ...@@ -1166,7 +1166,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
start = seg_start; start = seg_start;
nblocks = seg_end - seg_start + 1; nblocks = seg_end - seg_start + 1;
} }
kunmap_atomic(kaddr); kunmap_local(kaddr);
put_bh(su_bh); put_bh(su_bh);
} }
...@@ -1246,10 +1246,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, ...@@ -1246,10 +1246,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
goto failed; goto failed;
sui = NILFS_SUI(sufile); sui = NILFS_SUI(sufile);
kaddr = kmap_atomic(header_bh->b_page); kaddr = kmap_local_page(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
kunmap_atomic(kaddr); kunmap_local(kaddr);
brelse(header_bh); brelse(header_bh);
sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment