Commit b012b323 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (patches from Andrew)

Merge still more updates from Andrew Morton:
 "16 patches.

  Subsystems affected by this patch series: ofs2, nilfs2, mailmap, and
  mm (madvise, mlock, mfence, memory-failure, kasan, debug, kmemleak,
  and damon)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm/damon: prevent activated scheme from sleeping by deactivated schemes
  mm/kmemleak: reset tag when compare object pointer
  doc/vm/page_owner.rst: remove content related to -c option
  tools/vm/page_owner_sort.c: remove -c option
  mm, kasan: fix __GFP_BITS_SHIFT definition breaking LOCKDEP
  mm,hwpoison: unmap poisoned page before invalidation
  mailmap: update Kirill's email
  mm: kfence: fix objcgs vector allocation
  mm/munlock: protect the per-CPU pagevec by a local_lock_t
  mm/munlock: update Documentation/vm/unevictable-lru.rst
  mm/munlock: add lru_add_drain() to fix memcg_stat_test
  nilfs2: get rid of nilfs_mapping_init()
  nilfs2: fix lockdep warnings during disk space reclamation
  nilfs2: fix lockdep warnings in page operations for btree nodes
  ocfs2: fix crash when mount with quota enabled
  Revert "mm: madvise: skip unmapped vma holes passed to process_madvise"
parents d0d642a5 78049e94
...@@ -213,6 +213,7 @@ Kees Cook <keescook@chromium.org> <kees@ubuntu.com> ...@@ -213,6 +213,7 @@ Kees Cook <keescook@chromium.org> <kees@ubuntu.com>
Keith Busch <kbusch@kernel.org> <keith.busch@intel.com> Keith Busch <kbusch@kernel.org> <keith.busch@intel.com>
Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com> Keith Busch <kbusch@kernel.org> <keith.busch@linux.intel.com>
Kenneth W Chen <kenneth.w.chen@intel.com> Kenneth W Chen <kenneth.w.chen@intel.com>
Kirill Tkhai <kirill.tkhai@openvz.org> <ktkhai@virtuozzo.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru> Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com> Koushik <raghavendra.koushik@neterion.com>
......
...@@ -125,7 +125,6 @@ Usage ...@@ -125,7 +125,6 @@ Usage
additional function: additional function:
Cull: Cull:
-c Cull by comparing stacktrace instead of total block.
--cull <rules> --cull <rules>
Specify culling rules.Culling syntax is key[,key[,...]].Choose a Specify culling rules.Culling syntax is key[,key[,...]].Choose a
multi-letter key from the **STANDARD FORMAT SPECIFIERS** section. multi-letter key from the **STANDARD FORMAT SPECIFIERS** section.
......
This diff is collapsed.
...@@ -20,6 +20,23 @@ ...@@ -20,6 +20,23 @@
#include "page.h" #include "page.h"
#include "btnode.h" #include "btnode.h"
/**
* nilfs_init_btnc_inode - initialize B-tree node cache inode
* @btnc_inode: inode to be initialized
*
* nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
*/
void nilfs_init_btnc_inode(struct inode *btnc_inode)
{
struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
btnc_inode->i_mode = S_IFREG;
ii->i_flags = 0;
memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
}
void nilfs_btnode_cache_clear(struct address_space *btnc) void nilfs_btnode_cache_clear(struct address_space *btnc)
{ {
invalidate_mapping_pages(btnc, 0, -1); invalidate_mapping_pages(btnc, 0, -1);
...@@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc) ...@@ -29,7 +46,7 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
struct buffer_head * struct buffer_head *
nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
{ {
struct inode *inode = NILFS_BTNC_I(btnc); struct inode *inode = btnc->host;
struct buffer_head *bh; struct buffer_head *bh;
bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node)); bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
...@@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, ...@@ -57,7 +74,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
struct buffer_head **pbh, sector_t *submit_ptr) struct buffer_head **pbh, sector_t *submit_ptr)
{ {
struct buffer_head *bh; struct buffer_head *bh;
struct inode *inode = NILFS_BTNC_I(btnc); struct inode *inode = btnc->host;
struct page *page; struct page *page;
int err; int err;
...@@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, ...@@ -157,7 +174,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc,
struct nilfs_btnode_chkey_ctxt *ctxt) struct nilfs_btnode_chkey_ctxt *ctxt)
{ {
struct buffer_head *obh, *nbh; struct buffer_head *obh, *nbh;
struct inode *inode = NILFS_BTNC_I(btnc); struct inode *inode = btnc->host;
__u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
int err; int err;
......
...@@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt { ...@@ -30,6 +30,7 @@ struct nilfs_btnode_chkey_ctxt {
struct buffer_head *newbh; struct buffer_head *newbh;
}; };
void nilfs_init_btnc_inode(struct inode *btnc_inode);
void nilfs_btnode_cache_clear(struct address_space *); void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr); __u64 blocknr);
......
...@@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path) ...@@ -58,7 +58,8 @@ static void nilfs_btree_free_path(struct nilfs_btree_path *path)
static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree, static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
__u64 ptr, struct buffer_head **bhp) __u64 ptr, struct buffer_head **bhp)
{ {
struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btnc = btnc_inode->i_mapping;
struct buffer_head *bh; struct buffer_head *bh;
bh = nilfs_btnode_create_block(btnc, ptr); bh = nilfs_btnode_create_block(btnc, ptr);
...@@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, ...@@ -470,7 +471,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
struct buffer_head **bhp, struct buffer_head **bhp,
const struct nilfs_btree_readahead_info *ra) const struct nilfs_btree_readahead_info *ra)
{ {
struct address_space *btnc = &NILFS_BMAP_I(btree)->i_btnode_cache; struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btnc = btnc_inode->i_mapping;
struct buffer_head *bh, *ra_bh; struct buffer_head *bh, *ra_bh;
sector_t submit_ptr = 0; sector_t submit_ptr = 0;
int ret; int ret;
...@@ -1741,6 +1743,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, ...@@ -1741,6 +1743,10 @@ nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key,
dat = nilfs_bmap_get_dat(btree); dat = nilfs_bmap_get_dat(btree);
} }
ret = nilfs_attach_btree_node_cache(&NILFS_BMAP_I(btree)->vfs_inode);
if (ret < 0)
return ret;
ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1913,7 +1919,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree, ...@@ -1913,7 +1919,7 @@ static int nilfs_btree_prepare_update_v(struct nilfs_bmap *btree,
path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr; path[level].bp_ctxt.newkey = path[level].bp_newreq.bpr_ptr;
path[level].bp_ctxt.bh = path[level].bp_bh; path[level].bp_ctxt.bh = path[level].bp_bh;
ret = nilfs_btnode_prepare_change_key( ret = nilfs_btnode_prepare_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache, NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt); &path[level].bp_ctxt);
if (ret < 0) { if (ret < 0) {
nilfs_dat_abort_update(dat, nilfs_dat_abort_update(dat,
...@@ -1939,7 +1945,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree, ...@@ -1939,7 +1945,7 @@ static void nilfs_btree_commit_update_v(struct nilfs_bmap *btree,
if (buffer_nilfs_node(path[level].bp_bh)) { if (buffer_nilfs_node(path[level].bp_bh)) {
nilfs_btnode_commit_change_key( nilfs_btnode_commit_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache, NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt); &path[level].bp_ctxt);
path[level].bp_bh = path[level].bp_ctxt.bh; path[level].bp_bh = path[level].bp_ctxt.bh;
} }
...@@ -1958,7 +1964,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree, ...@@ -1958,7 +1964,7 @@ static void nilfs_btree_abort_update_v(struct nilfs_bmap *btree,
&path[level].bp_newreq.bpr_req); &path[level].bp_newreq.bpr_req);
if (buffer_nilfs_node(path[level].bp_bh)) if (buffer_nilfs_node(path[level].bp_bh))
nilfs_btnode_abort_change_key( nilfs_btnode_abort_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache, NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt); &path[level].bp_ctxt);
} }
...@@ -2134,7 +2140,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree, ...@@ -2134,7 +2140,8 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree, static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *btree,
struct list_head *listp) struct list_head *listp)
{ {
struct address_space *btcache = &NILFS_BMAP_I(btree)->i_btnode_cache; struct inode *btnc_inode = NILFS_BMAP_I(btree)->i_assoc_inode;
struct address_space *btcache = btnc_inode->i_mapping;
struct list_head lists[NILFS_BTREE_LEVEL_MAX]; struct list_head lists[NILFS_BTREE_LEVEL_MAX];
struct pagevec pvec; struct pagevec pvec;
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
...@@ -2188,12 +2195,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree, ...@@ -2188,12 +2195,12 @@ static int nilfs_btree_assign_p(struct nilfs_bmap *btree,
path[level].bp_ctxt.newkey = blocknr; path[level].bp_ctxt.newkey = blocknr;
path[level].bp_ctxt.bh = *bh; path[level].bp_ctxt.bh = *bh;
ret = nilfs_btnode_prepare_change_key( ret = nilfs_btnode_prepare_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache, NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt); &path[level].bp_ctxt);
if (ret < 0) if (ret < 0)
return ret; return ret;
nilfs_btnode_commit_change_key( nilfs_btnode_commit_change_key(
&NILFS_BMAP_I(btree)->i_btnode_cache, NILFS_BMAP_I(btree)->i_assoc_inode->i_mapping,
&path[level].bp_ctxt); &path[level].bp_ctxt);
*bh = path[level].bp_ctxt.bh; *bh = path[level].bp_ctxt.bh;
} }
...@@ -2398,6 +2405,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap) ...@@ -2398,6 +2405,10 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode)) if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
ret = -EIO; ret = -EIO;
else
ret = nilfs_attach_btree_node_cache(
&NILFS_BMAP_I(bmap)->vfs_inode);
return ret; return ret;
} }
......
...@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size, ...@@ -497,7 +497,9 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
di = NILFS_DAT_I(dat); di = NILFS_DAT_I(dat);
lockdep_set_class(&di->mi.mi_sem, &dat_lock_key); lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
nilfs_palloc_setup_cache(dat, &di->palloc_cache); nilfs_palloc_setup_cache(dat, &di->palloc_cache);
nilfs_mdt_setup_shadow_map(dat, &di->shadow); err = nilfs_mdt_setup_shadow_map(dat, &di->shadow);
if (err)
goto failed;
err = nilfs_read_inode_common(dat, raw_inode); err = nilfs_read_inode_common(dat, raw_inode);
if (err) if (err)
......
...@@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, ...@@ -126,9 +126,10 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
__u64 vbn, struct buffer_head **out_bh) __u64 vbn, struct buffer_head **out_bh)
{ {
struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode;
int ret; int ret;
ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, ret = nilfs_btnode_submit_block(btnc_inode->i_mapping,
vbn ? : pbn, pbn, REQ_OP_READ, 0, vbn ? : pbn, pbn, REQ_OP_READ, 0,
out_bh, &pbn); out_bh, &pbn);
if (ret == -EEXIST) /* internal code (cache hit) */ if (ret == -EEXIST) /* internal code (cache hit) */
...@@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode) ...@@ -170,7 +171,7 @@ int nilfs_init_gcinode(struct inode *inode)
ii->i_flags = 0; ii->i_flags = 0;
nilfs_bmap_init_gc(ii->i_bmap); nilfs_bmap_init_gc(ii->i_bmap);
return 0; return nilfs_attach_btree_node_cache(inode);
} }
/** /**
...@@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) ...@@ -185,7 +186,7 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
list_del_init(&ii->i_dirty); list_del_init(&ii->i_dirty);
truncate_inode_pages(&ii->vfs_inode.i_data, 0); truncate_inode_pages(&ii->vfs_inode.i_data, 0);
nilfs_btnode_cache_clear(&ii->i_btnode_cache); nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
iput(&ii->vfs_inode); iput(&ii->vfs_inode);
} }
} }
...@@ -29,12 +29,16 @@ ...@@ -29,12 +29,16 @@
* @cno: checkpoint number * @cno: checkpoint number
* @root: pointer on NILFS root object (mounted checkpoint) * @root: pointer on NILFS root object (mounted checkpoint)
* @for_gc: inode for GC flag * @for_gc: inode for GC flag
* @for_btnc: inode for B-tree node cache flag
* @for_shadow: inode for shadowed page cache flag
*/ */
struct nilfs_iget_args { struct nilfs_iget_args {
u64 ino; u64 ino;
__u64 cno; __u64 cno;
struct nilfs_root *root; struct nilfs_root *root;
int for_gc; bool for_gc;
bool for_btnc;
bool for_shadow;
}; };
static int nilfs_iget_test(struct inode *inode, void *opaque); static int nilfs_iget_test(struct inode *inode, void *opaque);
...@@ -312,7 +316,8 @@ static int nilfs_insert_inode_locked(struct inode *inode, ...@@ -312,7 +316,8 @@ static int nilfs_insert_inode_locked(struct inode *inode,
unsigned long ino) unsigned long ino)
{ {
struct nilfs_iget_args args = { struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = 0 .ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = false
}; };
return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
...@@ -525,6 +530,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque) ...@@ -525,6 +530,19 @@ static int nilfs_iget_test(struct inode *inode, void *opaque)
return 0; return 0;
ii = NILFS_I(inode); ii = NILFS_I(inode);
if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
if (!args->for_btnc)
return 0;
} else if (args->for_btnc) {
return 0;
}
if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
if (!args->for_shadow)
return 0;
} else if (args->for_shadow) {
return 0;
}
if (!test_bit(NILFS_I_GCINODE, &ii->i_state)) if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
return !args->for_gc; return !args->for_gc;
...@@ -536,15 +554,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque) ...@@ -536,15 +554,17 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
struct nilfs_iget_args *args = opaque; struct nilfs_iget_args *args = opaque;
inode->i_ino = args->ino; inode->i_ino = args->ino;
if (args->for_gc) {
NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
NILFS_I(inode)->i_cno = args->cno; NILFS_I(inode)->i_cno = args->cno;
NILFS_I(inode)->i_root = NULL; NILFS_I(inode)->i_root = args->root;
} else {
if (args->root && args->ino == NILFS_ROOT_INO) if (args->root && args->ino == NILFS_ROOT_INO)
nilfs_get_root(args->root); nilfs_get_root(args->root);
NILFS_I(inode)->i_root = args->root;
} if (args->for_gc)
NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
if (args->for_btnc)
NILFS_I(inode)->i_state |= BIT(NILFS_I_BTNC);
if (args->for_shadow)
NILFS_I(inode)->i_state |= BIT(NILFS_I_SHADOW);
return 0; return 0;
} }
...@@ -552,7 +572,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root, ...@@ -552,7 +572,8 @@ struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
unsigned long ino) unsigned long ino)
{ {
struct nilfs_iget_args args = { struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = 0 .ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = false
}; };
return ilookup5(sb, ino, nilfs_iget_test, &args); return ilookup5(sb, ino, nilfs_iget_test, &args);
...@@ -562,7 +583,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root, ...@@ -562,7 +583,8 @@ struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
unsigned long ino) unsigned long ino)
{ {
struct nilfs_iget_args args = { struct nilfs_iget_args args = {
.ino = ino, .root = root, .cno = 0, .for_gc = 0 .ino = ino, .root = root, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = false
}; };
return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args); return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
...@@ -593,7 +615,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, ...@@ -593,7 +615,8 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
__u64 cno) __u64 cno)
{ {
struct nilfs_iget_args args = { struct nilfs_iget_args args = {
.ino = ino, .root = NULL, .cno = cno, .for_gc = 1 .ino = ino, .root = NULL, .cno = cno, .for_gc = true,
.for_btnc = false, .for_shadow = false
}; };
struct inode *inode; struct inode *inode;
int err; int err;
...@@ -613,6 +636,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, ...@@ -613,6 +636,113 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
return inode; return inode;
} }
/**
* nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode
* @inode: inode object
*
* nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode,
* or does nothing if the inode already has it. This function allocates
* an additional inode to maintain page cache of B-tree nodes one-on-one.
*
* Return Value: On success, 0 is returned. On errors, one of the following
* negative error code is returned.
*
* %-ENOMEM - Insufficient memory available.
*/
int nilfs_attach_btree_node_cache(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
struct inode *btnc_inode;
struct nilfs_iget_args args;
if (ii->i_assoc_inode)
return 0;
args.ino = inode->i_ino;
args.root = ii->i_root;
args.cno = ii->i_cno;
args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
args.for_btnc = true;
args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
btnc_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args);
if (unlikely(!btnc_inode))
return -ENOMEM;
if (btnc_inode->i_state & I_NEW) {
nilfs_init_btnc_inode(btnc_inode);
unlock_new_inode(btnc_inode);
}
NILFS_I(btnc_inode)->i_assoc_inode = inode;
NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
ii->i_assoc_inode = btnc_inode;
return 0;
}
/**
* nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode
* @inode: inode object
*
* nilfs_detach_btree_node_cache() detaches the B-tree node cache and its
* holder inode bound to @inode, or does nothing if @inode doesn't have it.
*/
void nilfs_detach_btree_node_cache(struct inode *inode)
{
struct nilfs_inode_info *ii = NILFS_I(inode);
struct inode *btnc_inode = ii->i_assoc_inode;
if (btnc_inode) {
NILFS_I(btnc_inode)->i_assoc_inode = NULL;
ii->i_assoc_inode = NULL;
iput(btnc_inode);
}
}
/**
* nilfs_iget_for_shadow - obtain inode for shadow mapping
* @inode: inode object that uses shadow mapping
*
* nilfs_iget_for_shadow() allocates a pair of inodes that holds page
* caches for shadow mapping. The page cache for data pages is set up
* in one inode and the one for b-tree node pages is set up in the
* other inode, which is attached to the former inode.
*
* Return Value: On success, a pointer to the inode for data pages is
* returned. On errors, one of the following negative error code is returned
* in a pointer type.
*
* %-ENOMEM - Insufficient memory available.
*/
struct inode *nilfs_iget_for_shadow(struct inode *inode)
{
struct nilfs_iget_args args = {
.ino = inode->i_ino, .root = NULL, .cno = 0, .for_gc = false,
.for_btnc = false, .for_shadow = true
};
struct inode *s_inode;
int err;
s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args);
if (unlikely(!s_inode))
return ERR_PTR(-ENOMEM);
if (!(s_inode->i_state & I_NEW))
return inode;
NILFS_I(s_inode)->i_flags = 0;
memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap));
mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS);
err = nilfs_attach_btree_node_cache(s_inode);
if (unlikely(err)) {
iget_failed(s_inode);
return ERR_PTR(err);
}
unlock_new_inode(s_inode);
return s_inode;
}
void nilfs_write_inode_common(struct inode *inode, void nilfs_write_inode_common(struct inode *inode,
struct nilfs_inode *raw_inode, int has_bmap) struct nilfs_inode *raw_inode, int has_bmap)
{ {
...@@ -760,7 +890,8 @@ static void nilfs_clear_inode(struct inode *inode) ...@@ -760,7 +890,8 @@ static void nilfs_clear_inode(struct inode *inode)
if (test_bit(NILFS_I_BMAP, &ii->i_state)) if (test_bit(NILFS_I_BMAP, &ii->i_state))
nilfs_bmap_clear(ii->i_bmap); nilfs_bmap_clear(ii->i_bmap);
nilfs_btnode_cache_clear(&ii->i_btnode_cache); if (!test_bit(NILFS_I_BTNC, &ii->i_state))
nilfs_detach_btree_node_cache(inode);
if (ii->i_root && inode->i_ino == NILFS_ROOT_INO) if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
nilfs_put_root(ii->i_root); nilfs_put_root(ii->i_root);
......
...@@ -471,9 +471,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) ...@@ -471,9 +471,18 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz)
void nilfs_mdt_clear(struct inode *inode) void nilfs_mdt_clear(struct inode *inode)
{ {
struct nilfs_mdt_info *mdi = NILFS_MDT(inode); struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
struct nilfs_shadow_map *shadow = mdi->mi_shadow;
if (mdi->mi_palloc_cache) if (mdi->mi_palloc_cache)
nilfs_palloc_destroy_cache(inode); nilfs_palloc_destroy_cache(inode);
if (shadow) {
struct inode *s_inode = shadow->inode;
shadow->inode = NULL;
iput(s_inode);
mdi->mi_shadow = NULL;
}
} }
/** /**
...@@ -507,12 +516,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, ...@@ -507,12 +516,15 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
struct nilfs_shadow_map *shadow) struct nilfs_shadow_map *shadow)
{ {
struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct inode *s_inode;
INIT_LIST_HEAD(&shadow->frozen_buffers); INIT_LIST_HEAD(&shadow->frozen_buffers);
address_space_init_once(&shadow->frozen_data);
nilfs_mapping_init(&shadow->frozen_data, inode); s_inode = nilfs_iget_for_shadow(inode);
address_space_init_once(&shadow->frozen_btnodes); if (IS_ERR(s_inode))
nilfs_mapping_init(&shadow->frozen_btnodes, inode); return PTR_ERR(s_inode);
shadow->inode = s_inode;
mi->mi_shadow = shadow; mi->mi_shadow = shadow;
return 0; return 0;
} }
...@@ -526,14 +538,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode) ...@@ -526,14 +538,15 @@ int nilfs_mdt_save_to_shadow_map(struct inode *inode)
struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_inode_info *ii = NILFS_I(inode); struct nilfs_inode_info *ii = NILFS_I(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow; struct nilfs_shadow_map *shadow = mi->mi_shadow;
struct inode *s_inode = shadow->inode;
int ret; int ret;
ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping); ret = nilfs_copy_dirty_pages(s_inode->i_mapping, inode->i_mapping);
if (ret) if (ret)
goto out; goto out;
ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes, ret = nilfs_copy_dirty_pages(NILFS_I(s_inode)->i_assoc_inode->i_mapping,
&ii->i_btnode_cache); ii->i_assoc_inode->i_mapping);
if (ret) if (ret)
goto out; goto out;
...@@ -549,7 +562,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh) ...@@ -549,7 +562,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page; struct page *page;
int blkbits = inode->i_blkbits; int blkbits = inode->i_blkbits;
page = grab_cache_page(&shadow->frozen_data, bh->b_page->index); page = grab_cache_page(shadow->inode->i_mapping, bh->b_page->index);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
...@@ -581,7 +594,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh) ...@@ -581,7 +594,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh)
struct page *page; struct page *page;
int n; int n;
page = find_lock_page(&shadow->frozen_data, bh->b_page->index); page = find_lock_page(shadow->inode->i_mapping, bh->b_page->index);
if (page) { if (page) {
if (page_has_buffers(page)) { if (page_has_buffers(page)) {
n = bh_offset(bh) >> inode->i_blkbits; n = bh_offset(bh) >> inode->i_blkbits;
...@@ -622,10 +635,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode) ...@@ -622,10 +635,11 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode)
nilfs_palloc_clear_cache(inode); nilfs_palloc_clear_cache(inode);
nilfs_clear_dirty_pages(inode->i_mapping, true); nilfs_clear_dirty_pages(inode->i_mapping, true);
nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data); nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping);
nilfs_clear_dirty_pages(&ii->i_btnode_cache, true); nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true);
nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes); nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping,
NILFS_I(shadow->inode)->i_assoc_inode->i_mapping);
nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store); nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store);
...@@ -640,10 +654,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode) ...@@ -640,10 +654,11 @@ void nilfs_mdt_clear_shadow_map(struct inode *inode)
{ {
struct nilfs_mdt_info *mi = NILFS_MDT(inode); struct nilfs_mdt_info *mi = NILFS_MDT(inode);
struct nilfs_shadow_map *shadow = mi->mi_shadow; struct nilfs_shadow_map *shadow = mi->mi_shadow;
struct inode *shadow_btnc_inode = NILFS_I(shadow->inode)->i_assoc_inode;
down_write(&mi->mi_sem); down_write(&mi->mi_sem);
nilfs_release_frozen_buffers(shadow); nilfs_release_frozen_buffers(shadow);
truncate_inode_pages(&shadow->frozen_data, 0); truncate_inode_pages(shadow->inode->i_mapping, 0);
truncate_inode_pages(&shadow->frozen_btnodes, 0); truncate_inode_pages(shadow_btnc_inode->i_mapping, 0);
up_write(&mi->mi_sem); up_write(&mi->mi_sem);
} }
...@@ -18,14 +18,12 @@ ...@@ -18,14 +18,12 @@
/** /**
* struct nilfs_shadow_map - shadow mapping of meta data file * struct nilfs_shadow_map - shadow mapping of meta data file
* @bmap_store: shadow copy of bmap state * @bmap_store: shadow copy of bmap state
* @frozen_data: shadowed dirty data pages * @inode: holder of page caches used in shadow mapping
* @frozen_btnodes: shadowed dirty b-tree nodes' pages
* @frozen_buffers: list of frozen buffers * @frozen_buffers: list of frozen buffers
*/ */
struct nilfs_shadow_map { struct nilfs_shadow_map {
struct nilfs_bmap_store bmap_store; struct nilfs_bmap_store bmap_store;
struct address_space frozen_data; struct inode *inode;
struct address_space frozen_btnodes;
struct list_head frozen_buffers; struct list_head frozen_buffers;
}; };
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
* @i_xattr: <TODO> * @i_xattr: <TODO>
* @i_dir_start_lookup: page index of last successful search * @i_dir_start_lookup: page index of last successful search
* @i_cno: checkpoint number for GC inode * @i_cno: checkpoint number for GC inode
* @i_btnode_cache: cached pages of b-tree nodes * @i_assoc_inode: associated inode (B-tree node cache holder or back pointer)
* @i_dirty: list for connecting dirty files * @i_dirty: list for connecting dirty files
* @xattr_sem: semaphore for extended attributes processing * @xattr_sem: semaphore for extended attributes processing
* @i_bh: buffer contains disk inode * @i_bh: buffer contains disk inode
...@@ -43,7 +43,7 @@ struct nilfs_inode_info { ...@@ -43,7 +43,7 @@ struct nilfs_inode_info {
__u64 i_xattr; /* sector_t ??? */ __u64 i_xattr; /* sector_t ??? */
__u32 i_dir_start_lookup; __u32 i_dir_start_lookup;
__u64 i_cno; /* check point number for GC inode */ __u64 i_cno; /* check point number for GC inode */
struct address_space i_btnode_cache; struct inode *i_assoc_inode;
struct list_head i_dirty; /* List for connecting dirty files */ struct list_head i_dirty; /* List for connecting dirty files */
#ifdef CONFIG_NILFS_XATTR #ifdef CONFIG_NILFS_XATTR
...@@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap) ...@@ -75,13 +75,6 @@ NILFS_BMAP_I(const struct nilfs_bmap *bmap)
return container_of(bmap, struct nilfs_inode_info, i_bmap_data); return container_of(bmap, struct nilfs_inode_info, i_bmap_data);
} }
static inline struct inode *NILFS_BTNC_I(struct address_space *btnc)
{
struct nilfs_inode_info *ii =
container_of(btnc, struct nilfs_inode_info, i_btnode_cache);
return &ii->vfs_inode;
}
/* /*
* Dynamic state flags of NILFS on-memory inode (i_state) * Dynamic state flags of NILFS on-memory inode (i_state)
*/ */
...@@ -98,6 +91,8 @@ enum { ...@@ -98,6 +91,8 @@ enum {
NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */ NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */
NILFS_I_BMAP, /* has bmap and btnode_cache */ NILFS_I_BMAP, /* has bmap and btnode_cache */
NILFS_I_GCINODE, /* inode for GC, on memory only */ NILFS_I_GCINODE, /* inode for GC, on memory only */
NILFS_I_BTNC, /* inode for btree node cache */
NILFS_I_SHADOW, /* inode for shadowed page cache */
}; };
/* /*
...@@ -267,6 +262,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, ...@@ -267,6 +262,9 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
unsigned long ino); unsigned long ino);
extern struct inode *nilfs_iget_for_gc(struct super_block *sb, extern struct inode *nilfs_iget_for_gc(struct super_block *sb,
unsigned long ino, __u64 cno); unsigned long ino, __u64 cno);
int nilfs_attach_btree_node_cache(struct inode *inode);
void nilfs_detach_btree_node_cache(struct inode *inode);
struct inode *nilfs_iget_for_shadow(struct inode *inode);
extern void nilfs_update_inode(struct inode *, struct buffer_head *, int); extern void nilfs_update_inode(struct inode *, struct buffer_head *, int);
extern void nilfs_truncate(struct inode *); extern void nilfs_truncate(struct inode *);
extern void nilfs_evict_inode(struct inode *); extern void nilfs_evict_inode(struct inode *);
......
...@@ -436,22 +436,12 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page, ...@@ -436,22 +436,12 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page,
return nc; return nc;
} }
void nilfs_mapping_init(struct address_space *mapping, struct inode *inode)
{
mapping->host = inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->private_data = NULL;
mapping->a_ops = &empty_aops;
}
/* /*
* NILFS2 needs clear_page_dirty() in the following two cases: * NILFS2 needs clear_page_dirty() in the following two cases:
* *
* 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
* page dirty flags when it copies back pages from the shadow cache * flag of pages when it copies back pages from shadow cache to the
* (gcdat->{i_mapping,i_btnode_cache}) to its original cache * original cache.
* (dat->{i_mapping,i_btnode_cache}).
* *
* 2) Some B-tree operations like insertion or deletion may dispose buffers * 2) Some B-tree operations like insertion or deletion may dispose buffers
* in dirty state, and this needs to cancel the dirty state of their pages. * in dirty state, and this needs to cancel the dirty state of their pages.
......
...@@ -43,7 +43,6 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); ...@@ -43,7 +43,6 @@ int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
void nilfs_copy_back_pages(struct address_space *, struct address_space *); void nilfs_copy_back_pages(struct address_space *, struct address_space *);
void nilfs_clear_dirty_page(struct page *, bool); void nilfs_clear_dirty_page(struct page *, bool);
void nilfs_clear_dirty_pages(struct address_space *, bool); void nilfs_clear_dirty_pages(struct address_space *, bool);
void nilfs_mapping_init(struct address_space *mapping, struct inode *inode);
unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int, unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int,
unsigned int); unsigned int);
unsigned long nilfs_find_uncommitted_extent(struct inode *inode, unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
......
...@@ -733,15 +733,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode, ...@@ -733,15 +733,18 @@ static void nilfs_lookup_dirty_node_buffers(struct inode *inode,
struct list_head *listp) struct list_head *listp)
{ {
struct nilfs_inode_info *ii = NILFS_I(inode); struct nilfs_inode_info *ii = NILFS_I(inode);
struct address_space *mapping = &ii->i_btnode_cache; struct inode *btnc_inode = ii->i_assoc_inode;
struct pagevec pvec; struct pagevec pvec;
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
unsigned int i; unsigned int i;
pgoff_t index = 0; pgoff_t index = 0;
if (!btnc_inode)
return;
pagevec_init(&pvec); pagevec_init(&pvec);
while (pagevec_lookup_tag(&pvec, mapping, &index, while (pagevec_lookup_tag(&pvec, btnc_inode->i_mapping, &index,
PAGECACHE_TAG_DIRTY)) { PAGECACHE_TAG_DIRTY)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
bh = head = page_buffers(pvec.pages[i]); bh = head = page_buffers(pvec.pages[i]);
...@@ -2410,7 +2413,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head) ...@@ -2410,7 +2413,7 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
continue; continue;
list_del_init(&ii->i_dirty); list_del_init(&ii->i_dirty);
truncate_inode_pages(&ii->vfs_inode.i_data, 0); truncate_inode_pages(&ii->vfs_inode.i_data, 0);
nilfs_btnode_cache_clear(&ii->i_btnode_cache); nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping);
iput(&ii->vfs_inode); iput(&ii->vfs_inode);
} }
} }
......
...@@ -157,7 +157,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb) ...@@ -157,7 +157,8 @@ struct inode *nilfs_alloc_inode(struct super_block *sb)
ii->i_bh = NULL; ii->i_bh = NULL;
ii->i_state = 0; ii->i_state = 0;
ii->i_cno = 0; ii->i_cno = 0;
nilfs_mapping_init(&ii->i_btnode_cache, &ii->vfs_inode); ii->i_assoc_inode = NULL;
ii->i_bmap = &ii->i_bmap_data;
return &ii->vfs_inode; return &ii->vfs_inode;
} }
...@@ -1377,8 +1378,6 @@ static void nilfs_inode_init_once(void *obj) ...@@ -1377,8 +1378,6 @@ static void nilfs_inode_init_once(void *obj)
#ifdef CONFIG_NILFS_XATTR #ifdef CONFIG_NILFS_XATTR
init_rwsem(&ii->xattr_sem); init_rwsem(&ii->xattr_sem);
#endif #endif
address_space_init_once(&ii->i_btnode_cache);
ii->i_bmap = &ii->i_bmap_data;
inode_init_once(&ii->vfs_inode); inode_init_once(&ii->vfs_inode);
} }
......
...@@ -337,7 +337,6 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex) ...@@ -337,7 +337,6 @@ void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex)
/* Read information header from global quota file */ /* Read information header from global quota file */
int ocfs2_global_read_info(struct super_block *sb, int type) int ocfs2_global_read_info(struct super_block *sb, int type)
{ {
struct inode *gqinode = NULL;
unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE, unsigned int ino[OCFS2_MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE,
GROUP_QUOTA_SYSTEM_INODE }; GROUP_QUOTA_SYSTEM_INODE };
struct ocfs2_global_disk_dqinfo dinfo; struct ocfs2_global_disk_dqinfo dinfo;
...@@ -346,29 +345,31 @@ int ocfs2_global_read_info(struct super_block *sb, int type) ...@@ -346,29 +345,31 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
u64 pcount; u64 pcount;
int status; int status;
oinfo->dqi_gi.dqi_sb = sb;
oinfo->dqi_gi.dqi_type = type;
ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
oinfo->dqi_gqi_bh = NULL;
oinfo->dqi_gqi_count = 0;
/* Read global header */ /* Read global header */
gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type], oinfo->dqi_gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type],
OCFS2_INVALID_SLOT); OCFS2_INVALID_SLOT);
if (!gqinode) { if (!oinfo->dqi_gqinode) {
mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n", mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n",
type); type);
status = -EINVAL; status = -EINVAL;
goto out_err; goto out_err;
} }
oinfo->dqi_gi.dqi_sb = sb;
oinfo->dqi_gi.dqi_type = type;
oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk);
oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops;
oinfo->dqi_gqi_bh = NULL;
oinfo->dqi_gqi_count = 0;
oinfo->dqi_gqinode = gqinode;
status = ocfs2_lock_global_qf(oinfo, 0); status = ocfs2_lock_global_qf(oinfo, 0);
if (status < 0) { if (status < 0) {
mlog_errno(status); mlog_errno(status);
goto out_err; goto out_err;
} }
status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk, status = ocfs2_extent_map_get_blocks(oinfo->dqi_gqinode, 0, &oinfo->dqi_giblk,
&pcount, NULL); &pcount, NULL);
if (status < 0) if (status < 0)
goto out_unlock; goto out_unlock;
......
...@@ -702,8 +702,6 @@ static int ocfs2_local_read_info(struct super_block *sb, int type) ...@@ -702,8 +702,6 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
info->dqi_priv = oinfo; info->dqi_priv = oinfo;
oinfo->dqi_type = type; oinfo->dqi_type = type;
INIT_LIST_HEAD(&oinfo->dqi_chunk); INIT_LIST_HEAD(&oinfo->dqi_chunk);
oinfo->dqi_gqinode = NULL;
ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo);
oinfo->dqi_rec = NULL; oinfo->dqi_rec = NULL;
oinfo->dqi_lqi_bh = NULL; oinfo->dqi_lqi_bh = NULL;
oinfo->dqi_libh = NULL; oinfo->dqi_libh = NULL;
......
...@@ -264,9 +264,7 @@ struct vm_area_struct; ...@@ -264,9 +264,7 @@ struct vm_area_struct;
#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
/* Room for N __GFP_FOO bits */ /* Room for N __GFP_FOO bits */
#define __GFP_BITS_SHIFT (24 + \ #define __GFP_BITS_SHIFT (27 + IS_ENABLED(CONFIG_LOCKDEP))
3 * IS_ENABLED(CONFIG_KASAN_HW_TAGS) + \
IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/** /**
......
...@@ -1019,13 +1019,16 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) ...@@ -1019,13 +1019,16 @@ static int kdamond_wait_activation(struct damon_ctx *ctx)
struct damos *s; struct damos *s;
unsigned long wait_time; unsigned long wait_time;
unsigned long min_wait_time = 0; unsigned long min_wait_time = 0;
bool init_wait_time = false;
while (!kdamond_need_stop(ctx)) { while (!kdamond_need_stop(ctx)) {
damon_for_each_scheme(s, ctx) { damon_for_each_scheme(s, ctx) {
wait_time = damos_wmark_wait_us(s); wait_time = damos_wmark_wait_us(s);
if (!min_wait_time || wait_time < min_wait_time) if (!init_wait_time || wait_time < min_wait_time) {
init_wait_time = true;
min_wait_time = wait_time; min_wait_time = wait_time;
} }
}
if (!min_wait_time) if (!min_wait_time)
return 0; return 0;
......
...@@ -1404,6 +1404,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, ...@@ -1404,6 +1404,7 @@ long populate_vma_page_range(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE; unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags; int gup_flags;
long ret;
VM_BUG_ON(!PAGE_ALIGNED(start)); VM_BUG_ON(!PAGE_ALIGNED(start));
VM_BUG_ON(!PAGE_ALIGNED(end)); VM_BUG_ON(!PAGE_ALIGNED(end));
...@@ -1438,8 +1439,10 @@ long populate_vma_page_range(struct vm_area_struct *vma, ...@@ -1438,8 +1439,10 @@ long populate_vma_page_range(struct vm_area_struct *vma,
* We made sure addr is within a VMA, so the following will * We made sure addr is within a VMA, so the following will
* not result in a stack expansion that recurses back here. * not result in a stack expansion that recurses back here.
*/ */
return __get_user_pages(mm, start, nr_pages, gup_flags, ret = __get_user_pages(mm, start, nr_pages, gup_flags,
NULL, NULL, locked); NULL, NULL, locked);
lru_add_drain();
return ret;
} }
/* /*
...@@ -1471,6 +1474,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, ...@@ -1471,6 +1474,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
unsigned long nr_pages = (end - start) / PAGE_SIZE; unsigned long nr_pages = (end - start) / PAGE_SIZE;
int gup_flags; int gup_flags;
long ret;
VM_BUG_ON(!PAGE_ALIGNED(start)); VM_BUG_ON(!PAGE_ALIGNED(start));
VM_BUG_ON(!PAGE_ALIGNED(end)); VM_BUG_ON(!PAGE_ALIGNED(end));
...@@ -1498,8 +1502,10 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, ...@@ -1498,8 +1502,10 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start,
if (check_vma_flags(vma, gup_flags)) if (check_vma_flags(vma, gup_flags))
return -EINVAL; return -EINVAL;
return __get_user_pages(mm, start, nr_pages, gup_flags, ret = __get_user_pages(mm, start, nr_pages, gup_flags,
NULL, NULL, locked); NULL, NULL, locked);
lru_add_drain();
return ret;
} }
/* /*
......
...@@ -456,7 +456,8 @@ static inline void munlock_vma_page(struct page *page, ...@@ -456,7 +456,8 @@ static inline void munlock_vma_page(struct page *page,
} }
void mlock_new_page(struct page *page); void mlock_new_page(struct page *page);
bool need_mlock_page_drain(int cpu); bool need_mlock_page_drain(int cpu);
void mlock_page_drain(int cpu); void mlock_page_drain_local(void);
void mlock_page_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
...@@ -539,7 +540,8 @@ static inline void munlock_vma_page(struct page *page, ...@@ -539,7 +540,8 @@ static inline void munlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { } struct vm_area_struct *vma, bool compound) { }
static inline void mlock_new_page(struct page *page) { } static inline void mlock_new_page(struct page *page) { }
static inline bool need_mlock_page_drain(int cpu) { return false; } static inline bool need_mlock_page_drain(int cpu) { return false; }
static inline void mlock_page_drain(int cpu) { } static inline void mlock_page_drain_local(void) { }
static inline void mlock_page_drain_remote(int cpu) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end) static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
{ {
} }
......
...@@ -566,6 +566,8 @@ static unsigned long kfence_init_pool(void) ...@@ -566,6 +566,8 @@ static unsigned long kfence_init_pool(void)
* enters __slab_free() slow-path. * enters __slab_free() slow-path.
*/ */
for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
struct slab *slab = page_slab(&pages[i]);
if (!i || (i % 2)) if (!i || (i % 2))
continue; continue;
...@@ -573,7 +575,11 @@ static unsigned long kfence_init_pool(void) ...@@ -573,7 +575,11 @@ static unsigned long kfence_init_pool(void)
if (WARN_ON(compound_head(&pages[i]) != &pages[i])) if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
return addr; return addr;
__SetPageSlab(&pages[i]); __folio_set_slab(slab_folio(slab));
#ifdef CONFIG_MEMCG
slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg |
MEMCG_DATA_OBJCGS;
#endif
} }
/* /*
...@@ -1033,6 +1039,9 @@ void __kfence_free(void *addr) ...@@ -1033,6 +1039,9 @@ void __kfence_free(void *addr)
{ {
struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
#ifdef CONFIG_MEMCG
KFENCE_WARN_ON(meta->objcg);
#endif
/* /*
* If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
* the object, as the object page may be recycled for other-typed * the object, as the object page may be recycled for other-typed
......
...@@ -89,6 +89,9 @@ struct kfence_metadata { ...@@ -89,6 +89,9 @@ struct kfence_metadata {
struct kfence_track free_track; struct kfence_track free_track;
/* For updating alloc_covered on frees. */ /* For updating alloc_covered on frees. */
u32 alloc_stack_hash; u32 alloc_stack_hash;
#ifdef CONFIG_MEMCG
struct obj_cgroup *objcg;
#endif
}; };
extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; extern struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
......
...@@ -796,6 +796,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) ...@@ -796,6 +796,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
unsigned long flags; unsigned long flags;
struct kmemleak_object *object; struct kmemleak_object *object;
struct kmemleak_scan_area *area = NULL; struct kmemleak_scan_area *area = NULL;
unsigned long untagged_ptr;
unsigned long untagged_objp;
object = find_and_get_object(ptr, 1); object = find_and_get_object(ptr, 1);
if (!object) { if (!object) {
...@@ -804,6 +806,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) ...@@ -804,6 +806,9 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
return; return;
} }
untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
if (scan_area_cache) if (scan_area_cache)
area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
...@@ -815,8 +820,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) ...@@ -815,8 +820,8 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
goto out_unlock; goto out_unlock;
} }
if (size == SIZE_MAX) { if (size == SIZE_MAX) {
size = object->pointer + object->size - ptr; size = untagged_objp + object->size - untagged_ptr;
} else if (ptr + size > object->pointer + object->size) { } else if (untagged_ptr + size > untagged_objp + object->size) {
kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
dump_object_info(object); dump_object_info(object);
kmem_cache_free(scan_area_cache, area); kmem_cache_free(scan_area_cache, area);
......
...@@ -1464,16 +1464,9 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec, ...@@ -1464,16 +1464,9 @@ SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
while (iov_iter_count(&iter)) { while (iov_iter_count(&iter)) {
iovec = iov_iter_iovec(&iter); iovec = iov_iter_iovec(&iter);
/*
* do_madvise returns ENOMEM if unmapped holes are present
* in the passed VMA. process_madvise() is expected to skip
* unmapped holes passed to it in the 'struct iovec' list
* and not fail because of them. Thus treat -ENOMEM return
* from do_madvise as valid and continue processing.
*/
ret = do_madvise(mm, (unsigned long)iovec.iov_base, ret = do_madvise(mm, (unsigned long)iovec.iov_base,
iovec.iov_len, behavior); iovec.iov_len, behavior);
if (ret < 0 && ret != -ENOMEM) if (ret < 0)
break; break;
iov_iter_advance(&iter, iovec.iov_len); iov_iter_advance(&iter, iovec.iov_len);
} }
......
...@@ -3918,14 +3918,18 @@ static vm_fault_t __do_fault(struct vm_fault *vmf) ...@@ -3918,14 +3918,18 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
return ret; return ret;
if (unlikely(PageHWPoison(vmf->page))) { if (unlikely(PageHWPoison(vmf->page))) {
struct page *page = vmf->page;
vm_fault_t poisonret = VM_FAULT_HWPOISON; vm_fault_t poisonret = VM_FAULT_HWPOISON;
if (ret & VM_FAULT_LOCKED) { if (ret & VM_FAULT_LOCKED) {
if (page_mapped(page))
unmap_mapping_pages(page_mapping(page),
page->index, 1, false);
/* Retry if a clean page was removed from the cache. */ /* Retry if a clean page was removed from the cache. */
if (invalidate_inode_page(vmf->page)) if (invalidate_inode_page(page))
poisonret = 0; poisonret = VM_FAULT_NOPAGE;
unlock_page(vmf->page); unlock_page(page);
} }
put_page(vmf->page); put_page(page);
vmf->page = NULL; vmf->page = NULL;
return poisonret; return poisonret;
} }
......
...@@ -246,7 +246,7 @@ static bool remove_migration_pte(struct folio *folio, ...@@ -246,7 +246,7 @@ static bool remove_migration_pte(struct folio *folio,
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
} }
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_page_drain(smp_processor_id()); mlock_page_drain_local();
trace_remove_migration_pte(pvmw.address, pte_val(pte), trace_remove_migration_pte(pvmw.address, pte_val(pte),
compound_order(new)); compound_order(new));
......
...@@ -28,7 +28,14 @@ ...@@ -28,7 +28,14 @@
#include "internal.h" #include "internal.h"
static DEFINE_PER_CPU(struct pagevec, mlock_pvec); struct mlock_pvec {
local_lock_t lock;
struct pagevec vec;
};
static DEFINE_PER_CPU(struct mlock_pvec, mlock_pvec) = {
.lock = INIT_LOCAL_LOCK(lock),
};
bool can_do_mlock(void) bool can_do_mlock(void)
{ {
...@@ -203,18 +210,30 @@ static void mlock_pagevec(struct pagevec *pvec) ...@@ -203,18 +210,30 @@ static void mlock_pagevec(struct pagevec *pvec)
pagevec_reinit(pvec); pagevec_reinit(pvec);
} }
void mlock_page_drain(int cpu) void mlock_page_drain_local(void)
{
struct pagevec *pvec;
local_lock(&mlock_pvec.lock);
pvec = this_cpu_ptr(&mlock_pvec.vec);
if (pagevec_count(pvec))
mlock_pagevec(pvec);
local_unlock(&mlock_pvec.lock);
}
void mlock_page_drain_remote(int cpu)
{ {
struct pagevec *pvec; struct pagevec *pvec;
pvec = &per_cpu(mlock_pvec, cpu); WARN_ON_ONCE(cpu_online(cpu));
pvec = &per_cpu(mlock_pvec.vec, cpu);
if (pagevec_count(pvec)) if (pagevec_count(pvec))
mlock_pagevec(pvec); mlock_pagevec(pvec);
} }
bool need_mlock_page_drain(int cpu) bool need_mlock_page_drain(int cpu)
{ {
return pagevec_count(&per_cpu(mlock_pvec, cpu)); return pagevec_count(&per_cpu(mlock_pvec.vec, cpu));
} }
/** /**
...@@ -223,7 +242,10 @@ bool need_mlock_page_drain(int cpu) ...@@ -223,7 +242,10 @@ bool need_mlock_page_drain(int cpu)
*/ */
void mlock_folio(struct folio *folio) void mlock_folio(struct folio *folio)
{ {
struct pagevec *pvec = &get_cpu_var(mlock_pvec); struct pagevec *pvec;
local_lock(&mlock_pvec.lock);
pvec = this_cpu_ptr(&mlock_pvec.vec);
if (!folio_test_set_mlocked(folio)) { if (!folio_test_set_mlocked(folio)) {
int nr_pages = folio_nr_pages(folio); int nr_pages = folio_nr_pages(folio);
...@@ -236,7 +258,7 @@ void mlock_folio(struct folio *folio) ...@@ -236,7 +258,7 @@ void mlock_folio(struct folio *folio)
if (!pagevec_add(pvec, mlock_lru(&folio->page)) || if (!pagevec_add(pvec, mlock_lru(&folio->page)) ||
folio_test_large(folio) || lru_cache_disabled()) folio_test_large(folio) || lru_cache_disabled())
mlock_pagevec(pvec); mlock_pagevec(pvec);
put_cpu_var(mlock_pvec); local_unlock(&mlock_pvec.lock);
} }
/** /**
...@@ -245,9 +267,11 @@ void mlock_folio(struct folio *folio) ...@@ -245,9 +267,11 @@ void mlock_folio(struct folio *folio)
*/ */
void mlock_new_page(struct page *page) void mlock_new_page(struct page *page)
{ {
struct pagevec *pvec = &get_cpu_var(mlock_pvec); struct pagevec *pvec;
int nr_pages = thp_nr_pages(page); int nr_pages = thp_nr_pages(page);
local_lock(&mlock_pvec.lock);
pvec = this_cpu_ptr(&mlock_pvec.vec);
SetPageMlocked(page); SetPageMlocked(page);
mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
__count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); __count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
...@@ -256,7 +280,7 @@ void mlock_new_page(struct page *page) ...@@ -256,7 +280,7 @@ void mlock_new_page(struct page *page)
if (!pagevec_add(pvec, mlock_new(page)) || if (!pagevec_add(pvec, mlock_new(page)) ||
PageHead(page) || lru_cache_disabled()) PageHead(page) || lru_cache_disabled())
mlock_pagevec(pvec); mlock_pagevec(pvec);
put_cpu_var(mlock_pvec); local_unlock(&mlock_pvec.lock);
} }
/** /**
...@@ -265,8 +289,10 @@ void mlock_new_page(struct page *page) ...@@ -265,8 +289,10 @@ void mlock_new_page(struct page *page)
*/ */
void munlock_page(struct page *page) void munlock_page(struct page *page)
{ {
struct pagevec *pvec = &get_cpu_var(mlock_pvec); struct pagevec *pvec;
local_lock(&mlock_pvec.lock);
pvec = this_cpu_ptr(&mlock_pvec.vec);
/* /*
* TestClearPageMlocked(page) must be left to __munlock_page(), * TestClearPageMlocked(page) must be left to __munlock_page(),
* which will check whether the page is multiply mlocked. * which will check whether the page is multiply mlocked.
...@@ -276,7 +302,7 @@ void munlock_page(struct page *page) ...@@ -276,7 +302,7 @@ void munlock_page(struct page *page)
if (!pagevec_add(pvec, page) || if (!pagevec_add(pvec, page) ||
PageHead(page) || lru_cache_disabled()) PageHead(page) || lru_cache_disabled())
mlock_pagevec(pvec); mlock_pagevec(pvec);
put_cpu_var(mlock_pvec); local_unlock(&mlock_pvec.lock);
} }
static int mlock_pte_range(pmd_t *pmd, unsigned long addr, static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
......
...@@ -8367,6 +8367,7 @@ static int page_alloc_cpu_dead(unsigned int cpu) ...@@ -8367,6 +8367,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
struct zone *zone; struct zone *zone;
lru_add_drain_cpu(cpu); lru_add_drain_cpu(cpu);
mlock_page_drain_remote(cpu);
drain_pages(cpu); drain_pages(cpu);
/* /*
......
...@@ -1683,7 +1683,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1683,7 +1683,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/ */
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_page_drain(smp_processor_id()); mlock_page_drain_local();
folio_put(folio); folio_put(folio);
} }
...@@ -1961,7 +1961,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, ...@@ -1961,7 +1961,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/ */
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_page_drain(smp_processor_id()); mlock_page_drain_local();
folio_put(folio); folio_put(folio);
} }
......
...@@ -624,7 +624,6 @@ void lru_add_drain_cpu(int cpu) ...@@ -624,7 +624,6 @@ void lru_add_drain_cpu(int cpu)
pagevec_lru_move_fn(pvec, lru_lazyfree_fn); pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
activate_page_drain(cpu); activate_page_drain(cpu);
mlock_page_drain(cpu);
} }
/** /**
...@@ -706,6 +705,7 @@ void lru_add_drain(void) ...@@ -706,6 +705,7 @@ void lru_add_drain(void)
local_lock(&lru_pvecs.lock); local_lock(&lru_pvecs.lock);
lru_add_drain_cpu(smp_processor_id()); lru_add_drain_cpu(smp_processor_id());
local_unlock(&lru_pvecs.lock); local_unlock(&lru_pvecs.lock);
mlock_page_drain_local();
} }
/* /*
...@@ -720,6 +720,7 @@ static void lru_add_and_bh_lrus_drain(void) ...@@ -720,6 +720,7 @@ static void lru_add_and_bh_lrus_drain(void)
lru_add_drain_cpu(smp_processor_id()); lru_add_drain_cpu(smp_processor_id());
local_unlock(&lru_pvecs.lock); local_unlock(&lru_pvecs.lock);
invalidate_bh_lrus_cpu(); invalidate_bh_lrus_cpu();
mlock_page_drain_local();
} }
void lru_add_drain_cpu_zone(struct zone *zone) void lru_add_drain_cpu_zone(struct zone *zone)
...@@ -728,6 +729,7 @@ void lru_add_drain_cpu_zone(struct zone *zone) ...@@ -728,6 +729,7 @@ void lru_add_drain_cpu_zone(struct zone *zone)
lru_add_drain_cpu(smp_processor_id()); lru_add_drain_cpu(smp_processor_id());
drain_local_pages(zone); drain_local_pages(zone);
local_unlock(&lru_pvecs.lock); local_unlock(&lru_pvecs.lock);
mlock_page_drain_local();
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
......
...@@ -441,7 +441,6 @@ static void usage(void) ...@@ -441,7 +441,6 @@ static void usage(void)
"-n\t\tSort by task command name.\n" "-n\t\tSort by task command name.\n"
"-a\t\tSort by memory allocate time.\n" "-a\t\tSort by memory allocate time.\n"
"-r\t\tSort by memory release time.\n" "-r\t\tSort by memory release time.\n"
"-c\t\tCull by comparing stacktrace instead of total block.\n"
"-f\t\tFilter out the information of blocks whose memory has been released.\n" "-f\t\tFilter out the information of blocks whose memory has been released.\n"
"--pid <PID>\tSelect by pid. This selects the information of blocks whose process ID number equals to <PID>.\n" "--pid <PID>\tSelect by pid. This selects the information of blocks whose process ID number equals to <PID>.\n"
"--tgid <TGID>\tSelect by tgid. This selects the information of blocks whose Thread Group ID number equals to <TGID>.\n" "--tgid <TGID>\tSelect by tgid. This selects the information of blocks whose Thread Group ID number equals to <TGID>.\n"
...@@ -466,14 +465,11 @@ int main(int argc, char **argv) ...@@ -466,14 +465,11 @@ int main(int argc, char **argv)
{ 0, 0, 0, 0}, { 0, 0, 0, 0},
}; };
while ((opt = getopt_long(argc, argv, "acfmnprstP", longopts, NULL)) != -1) while ((opt = getopt_long(argc, argv, "afmnprstP", longopts, NULL)) != -1)
switch (opt) { switch (opt) {
case 'a': case 'a':
cmp = compare_ts; cmp = compare_ts;
break; break;
case 'c':
cull = cull | CULL_STACKTRACE;
break;
case 'f': case 'f':
filter = filter | FILTER_UNRELEASE; filter = filter | FILTER_UNRELEASE;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment