Commit cb152a1a authored by Shijie Luo's avatar Shijie Luo Committed by Linus Torvalds

mm: fix some typos and code style problems

fix some typos and code style problems in mm.

gfp.h: s/MAXNODES/MAX_NUMNODES
mmzone.h: s/then/than
rmap.c: s/__vma_split()/__vma_adjust()
swap.c: s/__mod_zone_page_stat/__mod_zone_page_state, s/is is/is
swap_state.c: s/whoes/whose
z3fold.c: code style problem fix in z3fold_unregister_migration
zsmalloc.c: s/of/or, s/give/given

Link: https://lkml.kernel.org/r/20210419083057.64820-1-luoshijie1@huawei.comSigned-off-by: default avatarShijie Luo <luoshijie1@huawei.com>
Signed-off-by: default avatarMiaohe Lin <linmiaohe@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b1989a3d
...@@ -490,7 +490,7 @@ static inline int gfp_zonelist(gfp_t flags) ...@@ -490,7 +490,7 @@ static inline int gfp_zonelist(gfp_t flags)
/* /*
* We get the zone list from the current node and the gfp_mask. * We get the zone list from the current node and the gfp_mask.
* This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones. * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
* There are two zonelists per node, one for all zones with memory and * There are two zonelists per node, one for all zones with memory and
* one containing just zones from the node the zonelist belongs to. * one containing just zones from the node the zonelist belongs to.
* *
......
...@@ -55,7 +55,7 @@ enum migratetype { ...@@ -55,7 +55,7 @@ enum migratetype {
* pageblocks to MIGRATE_CMA which can be done by * pageblocks to MIGRATE_CMA which can be done by
* __free_pageblock_cma() function. What is important though * __free_pageblock_cma() function. What is important though
* is that a range of pageblocks must be aligned to * is that a range of pageblocks must be aligned to
* MAX_ORDER_NR_PAGES should biggest page be bigger then * MAX_ORDER_NR_PAGES should biggest page be bigger than
* a single pageblock. * a single pageblock.
*/ */
MIGRATE_CMA, MIGRATE_CMA,
......
...@@ -257,7 +257,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root) ...@@ -257,7 +257,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
* Attach the anon_vmas from src to dst. * Attach the anon_vmas from src to dst.
* Returns 0 on success, -ENOMEM on failure. * Returns 0 on success, -ENOMEM on failure.
* *
* anon_vma_clone() is called by __vma_split(), __split_vma(), copy_vma() and * anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and
* anon_vma_fork(). The first three want an exact copy of src, while the last * anon_vma_fork(). The first three want an exact copy of src, while the last
* one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent * one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
* endless growth of anon_vma. Since dst->anon_vma is set to NULL before call, * endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
......
...@@ -496,7 +496,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page, ...@@ -496,7 +496,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
if (unlikely(unevictable) && !TestSetPageMlocked(page)) { if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
int nr_pages = thp_nr_pages(page); int nr_pages = thp_nr_pages(page);
/* /*
* We use the irq-unsafe __mod_zone_page_stat because this * We use the irq-unsafe __mod_zone_page_state because this
* counter is not modified from interrupt context, and the pte * counter is not modified from interrupt context, and the pte
* lock is held(spinlock), which implies preemption disabled. * lock is held(spinlock), which implies preemption disabled.
*/ */
...@@ -808,7 +808,7 @@ inline void __lru_add_drain_all(bool force_all_cpus) ...@@ -808,7 +808,7 @@ inline void __lru_add_drain_all(bool force_all_cpus)
* below which drains the page vectors. * below which drains the page vectors.
* *
* Let x, y, and z represent some system CPU numbers, where x < y < z. * Let x, y, and z represent some system CPU numbers, where x < y < z.
* Assume CPU #z is is in the middle of the for_each_online_cpu loop * Assume CPU #z is in the middle of the for_each_online_cpu loop
* below and has already reached CPU #y's per-cpu data. CPU #x comes * below and has already reached CPU #y's per-cpu data. CPU #x comes
* along, adds some pages to its per-cpu vectors, then calls * along, adds some pages to its per-cpu vectors, then calls
* lru_add_drain_all(). * lru_add_drain_all().
......
...@@ -792,7 +792,7 @@ static void swap_ra_info(struct vm_fault *vmf, ...@@ -792,7 +792,7 @@ static void swap_ra_info(struct vm_fault *vmf,
* *
* Returns the struct page for entry and addr, after queueing swapin. * Returns the struct page for entry and addr, after queueing swapin.
* *
* Primitive swap readahead code. We simply read in a few pages whoes * Primitive swap readahead code. We simply read in a few pages whose
* virtual addresses are around the fault address in the same vma. * virtual addresses are around the fault address in the same vma.
* *
* Caller must hold read mmap_lock if vmf->vma is not NULL. * Caller must hold read mmap_lock if vmf->vma is not NULL.
......
...@@ -391,7 +391,7 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool) ...@@ -391,7 +391,7 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
{ {
if (pool->inode) if (pool->inode)
iput(pool->inode); iput(pool->inode);
} }
/* Initializes the z3fold header of a newly allocated z3fold page */ /* Initializes the z3fold header of a newly allocated z3fold page */
static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
#define ZSPAGE_MAGIC 0x58 #define ZSPAGE_MAGIC 0x58
/* /*
* This must be power of 2 and greater than of equal to sizeof(link_free). * This must be power of 2 and greater than or equal to sizeof(link_free).
* These two conditions ensure that any 'struct link_free' itself doesn't * These two conditions ensure that any 'struct link_free' itself doesn't
* span more than 1 page which avoids complex case of mapping 2 pages simply * span more than 1 page which avoids complex case of mapping 2 pages simply
* to restore link_free pointer values. * to restore link_free pointer values.
...@@ -530,7 +530,7 @@ static void set_zspage_mapping(struct zspage *zspage, ...@@ -530,7 +530,7 @@ static void set_zspage_mapping(struct zspage *zspage,
* class maintains a list of zspages where each zspage is divided * class maintains a list of zspages where each zspage is divided
* into equal sized chunks. Each allocation falls into one of these * into equal sized chunks. Each allocation falls into one of these
* classes depending on its size. This function returns index of the * classes depending on its size. This function returns index of the
* size class which has chunk size big enough to hold the give size. * size class which has chunk size big enough to hold the given size.
*/ */
static int get_size_class_index(int size) static int get_size_class_index(int size)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment