Commit 7b9c0976 authored by Cong Wang's avatar Cong Wang Committed by Cong Wang

nilfs2: remove the second argument of k[un]map_atomic()

Acked-by: default avatarRyusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Signed-off-by: default avatarCong Wang <amwang@redhat.com>
parent 2b86ce2d
This diff is collapsed.
...@@ -85,13 +85,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req) ...@@ -85,13 +85,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
struct nilfs_dat_entry *entry; struct nilfs_dat_entry *entry;
void *kaddr; void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr); req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MAX); entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
entry->de_blocknr = cpu_to_le64(0); entry->de_blocknr = cpu_to_le64(0);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
nilfs_palloc_commit_alloc_entry(dat, req); nilfs_palloc_commit_alloc_entry(dat, req);
nilfs_dat_commit_entry(dat, req); nilfs_dat_commit_entry(dat, req);
...@@ -109,13 +109,13 @@ static void nilfs_dat_commit_free(struct inode *dat, ...@@ -109,13 +109,13 @@ static void nilfs_dat_commit_free(struct inode *dat,
struct nilfs_dat_entry *entry; struct nilfs_dat_entry *entry;
void *kaddr; void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr); req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(NILFS_CNO_MIN); entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
entry->de_end = cpu_to_le64(NILFS_CNO_MIN); entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
entry->de_blocknr = cpu_to_le64(0); entry->de_blocknr = cpu_to_le64(0);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req); nilfs_dat_commit_entry(dat, req);
nilfs_palloc_commit_free_entry(dat, req); nilfs_palloc_commit_free_entry(dat, req);
...@@ -136,12 +136,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, ...@@ -136,12 +136,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
struct nilfs_dat_entry *entry; struct nilfs_dat_entry *entry;
void *kaddr; void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr); req->pr_entry_bh, kaddr);
entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
entry->de_blocknr = cpu_to_le64(blocknr); entry->de_blocknr = cpu_to_le64(blocknr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
nilfs_dat_commit_entry(dat, req); nilfs_dat_commit_entry(dat, req);
} }
...@@ -160,12 +160,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) ...@@ -160,12 +160,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
return ret; return ret;
} }
kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr); req->pr_entry_bh, kaddr);
start = le64_to_cpu(entry->de_start); start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr); blocknr = le64_to_cpu(entry->de_blocknr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
if (blocknr == 0) { if (blocknr == 0) {
ret = nilfs_palloc_prepare_free_entry(dat, req); ret = nilfs_palloc_prepare_free_entry(dat, req);
...@@ -186,7 +186,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, ...@@ -186,7 +186,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
sector_t blocknr; sector_t blocknr;
void *kaddr; void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr); req->pr_entry_bh, kaddr);
end = start = le64_to_cpu(entry->de_start); end = start = le64_to_cpu(entry->de_start);
...@@ -196,7 +196,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, ...@@ -196,7 +196,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
} }
entry->de_end = cpu_to_le64(end); entry->de_end = cpu_to_le64(end);
blocknr = le64_to_cpu(entry->de_blocknr); blocknr = le64_to_cpu(entry->de_blocknr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
if (blocknr == 0) if (blocknr == 0)
nilfs_dat_commit_free(dat, req); nilfs_dat_commit_free(dat, req);
...@@ -211,12 +211,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req) ...@@ -211,12 +211,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
sector_t blocknr; sector_t blocknr;
void *kaddr; void *kaddr;
kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(req->pr_entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
req->pr_entry_bh, kaddr); req->pr_entry_bh, kaddr);
start = le64_to_cpu(entry->de_start); start = le64_to_cpu(entry->de_start);
blocknr = le64_to_cpu(entry->de_blocknr); blocknr = le64_to_cpu(entry->de_blocknr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
if (start == nilfs_mdt_cno(dat) && blocknr == 0) if (start == nilfs_mdt_cno(dat) && blocknr == 0)
nilfs_palloc_abort_free_entry(dat, req); nilfs_palloc_abort_free_entry(dat, req);
...@@ -346,20 +346,20 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) ...@@ -346,20 +346,20 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
} }
} }
kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
(unsigned long long)vblocknr, (unsigned long long)vblocknr,
(unsigned long long)le64_to_cpu(entry->de_start), (unsigned long long)le64_to_cpu(entry->de_start),
(unsigned long long)le64_to_cpu(entry->de_end)); (unsigned long long)le64_to_cpu(entry->de_end));
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
brelse(entry_bh); brelse(entry_bh);
return -EINVAL; return -EINVAL;
} }
WARN_ON(blocknr == 0); WARN_ON(blocknr == 0);
entry->de_blocknr = cpu_to_le64(blocknr); entry->de_blocknr = cpu_to_le64(blocknr);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
mark_buffer_dirty(entry_bh); mark_buffer_dirty(entry_bh);
nilfs_mdt_mark_dirty(dat); nilfs_mdt_mark_dirty(dat);
...@@ -409,7 +409,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) ...@@ -409,7 +409,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
} }
} }
kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
blocknr = le64_to_cpu(entry->de_blocknr); blocknr = le64_to_cpu(entry->de_blocknr);
if (blocknr == 0) { if (blocknr == 0) {
...@@ -419,7 +419,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) ...@@ -419,7 +419,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
*blocknrp = blocknr; *blocknrp = blocknr;
out: out:
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
brelse(entry_bh); brelse(entry_bh);
return ret; return ret;
} }
...@@ -440,7 +440,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, ...@@ -440,7 +440,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
0, &entry_bh); 0, &entry_bh);
if (ret < 0) if (ret < 0)
return ret; return ret;
kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(entry_bh->b_page);
/* last virtual block number in this block */ /* last virtual block number in this block */
first = vinfo->vi_vblocknr; first = vinfo->vi_vblocknr;
do_div(first, entries_per_block); do_div(first, entries_per_block);
...@@ -456,7 +456,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, ...@@ -456,7 +456,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
vinfo->vi_end = le64_to_cpu(entry->de_end); vinfo->vi_end = le64_to_cpu(entry->de_end);
vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
} }
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
brelse(entry_bh); brelse(entry_bh);
} }
......
...@@ -602,7 +602,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) ...@@ -602,7 +602,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
unlock_page(page); unlock_page(page);
goto fail; goto fail;
} }
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
memset(kaddr, 0, chunk_size); memset(kaddr, 0, chunk_size);
de = (struct nilfs_dir_entry *)kaddr; de = (struct nilfs_dir_entry *)kaddr;
de->name_len = 1; de->name_len = 1;
...@@ -617,7 +617,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) ...@@ -617,7 +617,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
de->inode = cpu_to_le64(parent->i_ino); de->inode = cpu_to_le64(parent->i_ino);
memcpy(de->name, "..\0", 4); memcpy(de->name, "..\0", 4);
nilfs_set_de_type(de, inode); nilfs_set_de_type(de, inode);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
nilfs_commit_chunk(page, mapping, 0, chunk_size); nilfs_commit_chunk(page, mapping, 0, chunk_size);
fail: fail:
page_cache_release(page); page_cache_release(page);
......
...@@ -122,11 +122,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) ...@@ -122,11 +122,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
return ret; return ret;
} }
kaddr = kmap_atomic(req.pr_entry_bh->b_page, KM_USER0); kaddr = kmap_atomic(req.pr_entry_bh->b_page);
raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr, raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr,
req.pr_entry_bh, kaddr); req.pr_entry_bh, kaddr);
raw_inode->i_flags = 0; raw_inode->i_flags = 0;
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
mark_buffer_dirty(req.pr_entry_bh); mark_buffer_dirty(req.pr_entry_bh);
brelse(req.pr_entry_bh); brelse(req.pr_entry_bh);
......
...@@ -58,12 +58,12 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block, ...@@ -58,12 +58,12 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
set_buffer_mapped(bh); set_buffer_mapped(bh);
kaddr = kmap_atomic(bh->b_page, KM_USER0); kaddr = kmap_atomic(bh->b_page);
memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
if (init_block) if (init_block)
init_block(inode, bh, kaddr); init_block(inode, bh, kaddr);
flush_dcache_page(bh->b_page); flush_dcache_page(bh->b_page);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
set_buffer_uptodate(bh); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
......
...@@ -119,11 +119,11 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) ...@@ -119,11 +119,11 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
struct page *spage = sbh->b_page, *dpage = dbh->b_page; struct page *spage = sbh->b_page, *dpage = dbh->b_page;
struct buffer_head *bh; struct buffer_head *bh;
kaddr0 = kmap_atomic(spage, KM_USER0); kaddr0 = kmap_atomic(spage);
kaddr1 = kmap_atomic(dpage, KM_USER1); kaddr1 = kmap_atomic(dpage);
memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
kunmap_atomic(kaddr1, KM_USER1); kunmap_atomic(kaddr1);
kunmap_atomic(kaddr0, KM_USER0); kunmap_atomic(kaddr0);
dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
dbh->b_blocknr = sbh->b_blocknr; dbh->b_blocknr = sbh->b_blocknr;
......
...@@ -493,9 +493,9 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, ...@@ -493,9 +493,9 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
if (unlikely(!bh_org)) if (unlikely(!bh_org))
return -EIO; return -EIO;
kaddr = kmap_atomic(page, KM_USER0); kaddr = kmap_atomic(page);
memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
brelse(bh_org); brelse(bh_org);
return 0; return 0;
} }
......
...@@ -227,9 +227,9 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf, ...@@ -227,9 +227,9 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
crc = crc32_le(crc, bh->b_data, bh->b_size); crc = crc32_le(crc, bh->b_data, bh->b_size);
} }
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
kaddr = kmap_atomic(bh->b_page, KM_USER0); kaddr = kmap_atomic(bh->b_page);
crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size); crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
} }
raw_sum->ss_datasum = cpu_to_le32(crc); raw_sum->ss_datasum = cpu_to_le32(crc);
} }
......
...@@ -111,11 +111,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, ...@@ -111,11 +111,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
struct nilfs_sufile_header *header; struct nilfs_sufile_header *header;
void *kaddr; void *kaddr;
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
le64_add_cpu(&header->sh_ncleansegs, ncleanadd); le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
} }
...@@ -319,11 +319,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -319,11 +319,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
ret = nilfs_sufile_get_header_block(sufile, &header_bh); ret = nilfs_sufile_get_header_block(sufile, &header_bh);
if (ret < 0) if (ret < 0)
goto out_sem; goto out_sem;
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
ncleansegs = le64_to_cpu(header->sh_ncleansegs); ncleansegs = le64_to_cpu(header->sh_ncleansegs);
last_alloc = le64_to_cpu(header->sh_last_alloc); last_alloc = le64_to_cpu(header->sh_last_alloc);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
nsegments = nilfs_sufile_get_nsegments(sufile); nsegments = nilfs_sufile_get_nsegments(sufile);
maxsegnum = sui->allocmax; maxsegnum = sui->allocmax;
...@@ -356,7 +356,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -356,7 +356,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
&su_bh); &su_bh);
if (ret < 0) if (ret < 0)
goto out_header; goto out_header;
kaddr = kmap_atomic(su_bh->b_page, KM_USER0); kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage( su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr); sufile, segnum, su_bh, kaddr);
...@@ -367,14 +367,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -367,14 +367,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
continue; continue;
/* found a clean segment */ /* found a clean segment */
nilfs_segment_usage_set_dirty(su); nilfs_segment_usage_set_dirty(su);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
le64_add_cpu(&header->sh_ncleansegs, -1); le64_add_cpu(&header->sh_ncleansegs, -1);
le64_add_cpu(&header->sh_ndirtysegs, 1); le64_add_cpu(&header->sh_ndirtysegs, 1);
header->sh_last_alloc = cpu_to_le64(segnum); header->sh_last_alloc = cpu_to_le64(segnum);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
sui->ncleansegs--; sui->ncleansegs--;
mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
...@@ -385,7 +385,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) ...@@ -385,7 +385,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
goto out_header; goto out_header;
} }
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
brelse(su_bh); brelse(su_bh);
} }
...@@ -407,16 +407,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, ...@@ -407,16 +407,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
struct nilfs_segment_usage *su; struct nilfs_segment_usage *su;
void *kaddr; void *kaddr;
kaddr = kmap_atomic(su_bh->b_page, KM_USER0); kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (unlikely(!nilfs_segment_usage_clean(su))) { if (unlikely(!nilfs_segment_usage_clean(su))) {
printk(KERN_WARNING "%s: segment %llu must be clean\n", printk(KERN_WARNING "%s: segment %llu must be clean\n",
__func__, (unsigned long long)segnum); __func__, (unsigned long long)segnum);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
return; return;
} }
nilfs_segment_usage_set_dirty(su); nilfs_segment_usage_set_dirty(su);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
nilfs_sufile_mod_counter(header_bh, -1, 1); nilfs_sufile_mod_counter(header_bh, -1, 1);
NILFS_SUI(sufile)->ncleansegs--; NILFS_SUI(sufile)->ncleansegs--;
...@@ -433,11 +433,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, ...@@ -433,11 +433,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
void *kaddr; void *kaddr;
int clean, dirty; int clean, dirty;
kaddr = kmap_atomic(su_bh->b_page, KM_USER0); kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
su->su_nblocks == cpu_to_le32(0)) { su->su_nblocks == cpu_to_le32(0)) {
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
return; return;
} }
clean = nilfs_segment_usage_clean(su); clean = nilfs_segment_usage_clean(su);
...@@ -447,7 +447,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, ...@@ -447,7 +447,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
su->su_lastmod = cpu_to_le64(0); su->su_lastmod = cpu_to_le64(0);
su->su_nblocks = cpu_to_le32(0); su->su_nblocks = cpu_to_le32(0);
su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
NILFS_SUI(sufile)->ncleansegs -= clean; NILFS_SUI(sufile)->ncleansegs -= clean;
...@@ -464,12 +464,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, ...@@ -464,12 +464,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
void *kaddr; void *kaddr;
int sudirty; int sudirty;
kaddr = kmap_atomic(su_bh->b_page, KM_USER0); kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (nilfs_segment_usage_clean(su)) { if (nilfs_segment_usage_clean(su)) {
printk(KERN_WARNING "%s: segment %llu is already clean\n", printk(KERN_WARNING "%s: segment %llu is already clean\n",
__func__, (unsigned long long)segnum); __func__, (unsigned long long)segnum);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
return; return;
} }
WARN_ON(nilfs_segment_usage_error(su)); WARN_ON(nilfs_segment_usage_error(su));
...@@ -477,7 +477,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, ...@@ -477,7 +477,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
sudirty = nilfs_segment_usage_dirty(su); sudirty = nilfs_segment_usage_dirty(su);
nilfs_segment_usage_set_clean(su); nilfs_segment_usage_set_clean(su);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
...@@ -525,13 +525,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, ...@@ -525,13 +525,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
if (ret < 0) if (ret < 0)
goto out_sem; goto out_sem;
kaddr = kmap_atomic(bh->b_page, KM_USER0); kaddr = kmap_atomic(bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
WARN_ON(nilfs_segment_usage_error(su)); WARN_ON(nilfs_segment_usage_error(su));
if (modtime) if (modtime)
su->su_lastmod = cpu_to_le64(modtime); su->su_lastmod = cpu_to_le64(modtime);
su->su_nblocks = cpu_to_le32(nblocks); su->su_nblocks = cpu_to_le32(nblocks);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
...@@ -572,7 +572,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) ...@@ -572,7 +572,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
if (ret < 0) if (ret < 0)
goto out_sem; goto out_sem;
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
...@@ -582,7 +582,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) ...@@ -582,7 +582,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
spin_lock(&nilfs->ns_last_segment_lock); spin_lock(&nilfs->ns_last_segment_lock);
sustat->ss_prot_seq = nilfs->ns_prot_seq; sustat->ss_prot_seq = nilfs->ns_prot_seq;
spin_unlock(&nilfs->ns_last_segment_lock); spin_unlock(&nilfs->ns_last_segment_lock);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
brelse(header_bh); brelse(header_bh);
out_sem: out_sem:
...@@ -598,15 +598,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, ...@@ -598,15 +598,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
void *kaddr; void *kaddr;
int suclean; int suclean;
kaddr = kmap_atomic(su_bh->b_page, KM_USER0); kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (nilfs_segment_usage_error(su)) { if (nilfs_segment_usage_error(su)) {
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
return; return;
} }
suclean = nilfs_segment_usage_clean(su); suclean = nilfs_segment_usage_clean(su);
nilfs_segment_usage_set_error(su); nilfs_segment_usage_set_error(su);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
if (suclean) { if (suclean) {
nilfs_sufile_mod_counter(header_bh, -1, 0); nilfs_sufile_mod_counter(header_bh, -1, 0);
...@@ -675,7 +675,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, ...@@ -675,7 +675,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
/* hole */ /* hole */
continue; continue;
} }
kaddr = kmap_atomic(su_bh->b_page, KM_USER0); kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage( su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr); sufile, segnum, su_bh, kaddr);
su2 = su; su2 = su;
...@@ -684,7 +684,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, ...@@ -684,7 +684,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
nilfs_segment_is_active(nilfs, segnum + j)) { nilfs_segment_is_active(nilfs, segnum + j)) {
ret = -EBUSY; ret = -EBUSY;
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
brelse(su_bh); brelse(su_bh);
goto out_header; goto out_header;
} }
...@@ -696,7 +696,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile, ...@@ -696,7 +696,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
nc++; nc++;
} }
} }
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
if (nc > 0) { if (nc > 0) {
mark_buffer_dirty(su_bh); mark_buffer_dirty(su_bh);
ncleaned += nc; ncleaned += nc;
...@@ -772,10 +772,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) ...@@ -772,10 +772,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
sui->ncleansegs -= nsegs - newnsegs; sui->ncleansegs -= nsegs - newnsegs;
} }
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
mark_buffer_dirty(header_bh); mark_buffer_dirty(header_bh);
nilfs_mdt_mark_dirty(sufile); nilfs_mdt_mark_dirty(sufile);
...@@ -840,7 +840,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, ...@@ -840,7 +840,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
continue; continue;
} }
kaddr = kmap_atomic(su_bh->b_page, KM_USER0); kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage( su = nilfs_sufile_block_get_segment_usage(
sufile, segnum, su_bh, kaddr); sufile, segnum, su_bh, kaddr);
for (j = 0; j < n; for (j = 0; j < n;
...@@ -853,7 +853,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, ...@@ -853,7 +853,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
si->sui_flags |= si->sui_flags |=
(1UL << NILFS_SEGMENT_USAGE_ACTIVE); (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
} }
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
brelse(su_bh); brelse(su_bh);
} }
ret = nsegs; ret = nsegs;
...@@ -902,10 +902,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, ...@@ -902,10 +902,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
goto failed; goto failed;
sui = NILFS_SUI(sufile); sui = NILFS_SUI(sufile);
kaddr = kmap_atomic(header_bh->b_page, KM_USER0); kaddr = kmap_atomic(header_bh->b_page);
header = kaddr + bh_offset(header_bh); header = kaddr + bh_offset(header_bh);
sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
kunmap_atomic(kaddr, KM_USER0); kunmap_atomic(kaddr);
brelse(header_bh); brelse(header_bh);
sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment