Commit b8af2941 authored by Pintu Kumar's avatar Pintu Kumar Committed by Linus Torvalds

mm/page_alloc.c: fix coding style and spelling

Fix all errors reported by checkpatch and some small spelling mistakes.
Signed-off-by: default avatarPintu Kumar <pintu.k@samsung.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ebc2a1a6
...@@ -721,7 +721,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order) ...@@ -721,7 +721,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
return false; return false;
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); debug_check_no_locks_freed(page_address(page),
PAGE_SIZE << order);
debug_check_no_obj_freed(page_address(page), debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order); PAGE_SIZE << order);
} }
...@@ -885,7 +886,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, ...@@ -885,7 +886,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
int migratetype) int migratetype)
{ {
unsigned int current_order; unsigned int current_order;
struct free_area * area; struct free_area *area;
struct page *page; struct page *page;
/* Find a page of the appropriate size in the preferred list */ /* Find a page of the appropriate size in the preferred list */
...@@ -1011,7 +1012,7 @@ static void change_pageblock_range(struct page *pageblock_page, ...@@ -1011,7 +1012,7 @@ static void change_pageblock_range(struct page *pageblock_page,
static inline struct page * static inline struct page *
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype) __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
{ {
struct free_area * area; struct free_area *area;
int current_order; int current_order;
struct page *page; struct page *page;
int migratetype, i; int migratetype, i;
...@@ -3104,7 +3105,7 @@ void show_free_areas(unsigned int filter) ...@@ -3104,7 +3105,7 @@ void show_free_areas(unsigned int filter)
} }
for_each_populated_zone(zone) { for_each_populated_zone(zone) {
unsigned long nr[MAX_ORDER], flags, order, total = 0; unsigned long nr[MAX_ORDER], flags, order, total = 0;
unsigned char types[MAX_ORDER]; unsigned char types[MAX_ORDER];
if (skip_free_areas_node(filter, zone_to_nid(zone))) if (skip_free_areas_node(filter, zone_to_nid(zone)))
...@@ -3416,11 +3417,11 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes) ...@@ -3416,11 +3417,11 @@ static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
static int default_zonelist_order(void) static int default_zonelist_order(void)
{ {
int nid, zone_type; int nid, zone_type;
unsigned long low_kmem_size,total_size; unsigned long low_kmem_size, total_size;
struct zone *z; struct zone *z;
int average_size; int average_size;
/* /*
* ZONE_DMA and ZONE_DMA32 can be very small area in the system. * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
* If they are really small and used heavily, the system can fall * If they are really small and used heavily, the system can fall
* into OOM very easily. * into OOM very easily.
* This function detect ZONE_DMA/DMA32 size and configures zone order. * This function detect ZONE_DMA/DMA32 size and configures zone order.
...@@ -3452,9 +3453,9 @@ static int default_zonelist_order(void) ...@@ -3452,9 +3453,9 @@ static int default_zonelist_order(void)
return ZONELIST_ORDER_NODE; return ZONELIST_ORDER_NODE;
/* /*
* look into each node's config. * look into each node's config.
* If there is a node whose DMA/DMA32 memory is very big area on * If there is a node whose DMA/DMA32 memory is very big area on
* local memory, NODE_ORDER may be suitable. * local memory, NODE_ORDER may be suitable.
*/ */
average_size = total_size / average_size = total_size /
(nodes_weight(node_states[N_MEMORY]) + 1); (nodes_weight(node_states[N_MEMORY]) + 1);
for_each_online_node(nid) { for_each_online_node(nid) {
...@@ -4180,7 +4181,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages) ...@@ -4180,7 +4181,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
if (!zone->wait_table) if (!zone->wait_table)
return -ENOMEM; return -ENOMEM;
for(i = 0; i < zone->wait_table_hash_nr_entries; ++i) for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
init_waitqueue_head(zone->wait_table + i); init_waitqueue_head(zone->wait_table + i);
return 0; return 0;
...@@ -4930,7 +4931,7 @@ static unsigned long __init early_calculate_totalpages(void) ...@@ -4930,7 +4931,7 @@ static unsigned long __init early_calculate_totalpages(void)
if (pages) if (pages)
node_set_state(nid, N_MEMORY); node_set_state(nid, N_MEMORY);
} }
return totalpages; return totalpages;
} }
/* /*
...@@ -5047,7 +5048,7 @@ static void __init find_zone_movable_pfns_for_nodes(void) ...@@ -5047,7 +5048,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
/* /*
* Some kernelcore has been met, update counts and * Some kernelcore has been met, update counts and
* break if the kernelcore for this node has been * break if the kernelcore for this node has been
* satisified * satisfied
*/ */
required_kernelcore -= min(required_kernelcore, required_kernelcore -= min(required_kernelcore,
size_pages); size_pages);
...@@ -5061,7 +5062,7 @@ static void __init find_zone_movable_pfns_for_nodes(void) ...@@ -5061,7 +5062,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
* If there is still required_kernelcore, we do another pass with one * If there is still required_kernelcore, we do another pass with one
* less node in the count. This will push zone_movable_pfn[nid] further * less node in the count. This will push zone_movable_pfn[nid] further
* along on the nodes that still have memory until kernelcore is * along on the nodes that still have memory until kernelcore is
* satisified * satisfied
*/ */
usable_nodes--; usable_nodes--;
if (usable_nodes && required_kernelcore > usable_nodes) if (usable_nodes && required_kernelcore > usable_nodes)
...@@ -5286,8 +5287,10 @@ void __init mem_init_print_info(const char *str) ...@@ -5286,8 +5287,10 @@ void __init mem_init_print_info(const char *str)
* 3) .rodata.* may be embedded into .text or .data sections. * 3) .rodata.* may be embedded into .text or .data sections.
*/ */
#define adj_init_size(start, end, size, pos, adj) \ #define adj_init_size(start, end, size, pos, adj) \
if (start <= pos && pos < end && size > adj) \ do { \
size -= adj; if (start <= pos && pos < end && size > adj) \
size -= adj; \
} while (0)
adj_init_size(__init_begin, __init_end, init_data_size, adj_init_size(__init_begin, __init_end, init_data_size,
_sinittext, init_code_size); _sinittext, init_code_size);
...@@ -5570,7 +5573,7 @@ static void __meminit setup_per_zone_inactive_ratio(void) ...@@ -5570,7 +5573,7 @@ static void __meminit setup_per_zone_inactive_ratio(void)
* we want it large (64MB max). But it is not linear, because network * we want it large (64MB max). But it is not linear, because network
* bandwidth does not increase linearly with machine size. We use * bandwidth does not increase linearly with machine size. We use
* *
* min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy: * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
* min_free_kbytes = sqrt(lowmem_kbytes * 16) * min_free_kbytes = sqrt(lowmem_kbytes * 16)
* *
* which yields * which yields
...@@ -5614,11 +5617,11 @@ int __meminit init_per_zone_wmark_min(void) ...@@ -5614,11 +5617,11 @@ int __meminit init_per_zone_wmark_min(void)
module_init(init_per_zone_wmark_min) module_init(init_per_zone_wmark_min)
/* /*
* min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
* that we can call two helper functions whenever min_free_kbytes * that we can call two helper functions whenever min_free_kbytes
* changes. * changes.
*/ */
int min_free_kbytes_sysctl_handler(ctl_table *table, int write, int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos) void __user *buffer, size_t *length, loff_t *ppos)
{ {
proc_dointvec(table, write, buffer, length, ppos); proc_dointvec(table, write, buffer, length, ppos);
...@@ -5682,8 +5685,8 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write, ...@@ -5682,8 +5685,8 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
/* /*
* percpu_pagelist_fraction - changes the pcp->high for each zone on each * percpu_pagelist_fraction - changes the pcp->high for each zone on each
* cpu. It is the fraction of total pages in each zone that a hot per cpu pagelist * cpu. It is the fraction of total pages in each zone that a hot per cpu
* can have before it gets flushed back to buddy allocator. * pagelist can have before it gets flushed back to buddy allocator.
*/ */
int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos) void __user *buffer, size_t *length, loff_t *ppos)
...@@ -5901,7 +5904,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, ...@@ -5901,7 +5904,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
* This function checks whether pageblock includes unmovable pages or not. * This function checks whether pageblock includes unmovable pages or not.
* If @count is not zero, it is okay to include less @count unmovable pages * If @count is not zero, it is okay to include less @count unmovable pages
* *
* PageLRU check wihtout isolation or lru_lock could race so that * PageLRU check without isolation or lru_lock could race so that
* MIGRATE_MOVABLE block might include unmovable pages. It means you can't * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
* expect this function should be exact. * expect this function should be exact.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment