Commit 3b32123d authored by Gideon Israel Dsouza's avatar Gideon Israel Dsouza Committed by Linus Torvalds

mm: use macros from compiler.h instead of __attribute__((...))

To increase compiler portability there is <linux/compiler.h> which
provides convenience macros for various gcc constructs.  Eg: __weak for
__attribute__((weak)).  I've replaced all instances of gcc attributes with
the right macro in the memory management (/mm) subsystem.

[akpm@linux-foundation.org: while-we're-there consistency tweaks]
Signed-off-by: default avatarGideon Israel Dsouza <gidisrael@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 615d6e87
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/mempolicy.h> #include <linux/mempolicy.h>
#include <linux/compiler.h>
#include <linux/cpuset.h> #include <linux/cpuset.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
...@@ -3521,7 +3522,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address, ...@@ -3521,7 +3522,7 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */ #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
/* Can be overriden by architectures */ /* Can be overriden by architectures */
__attribute__((weak)) struct page * struct page * __weak
follow_huge_pud(struct mm_struct *mm, unsigned long address, follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write) pud_t *pud, int write)
{ {
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/compiler.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/security.h> #include <linux/security.h>
...@@ -460,7 +461,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases); ...@@ -460,7 +461,7 @@ EXPORT_SYMBOL_GPL(vm_unmap_aliases);
* Implement a stub for vmalloc_sync_all() if the architecture chose not to * Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one. * have one.
*/ */
void __attribute__((weak)) vmalloc_sync_all(void) void __weak vmalloc_sync_all(void)
{ {
} }
......
...@@ -5,10 +5,12 @@ ...@@ -5,10 +5,12 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/compiler.h>
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include "internal.h" #include "internal.h"
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -461,7 +463,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) ...@@ -461,7 +463,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
} }
#endif #endif
void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) void __weak __meminit vmemmap_populate_print_last(void)
{ {
} }
......
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/compiler.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -307,7 +308,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) ...@@ -307,7 +308,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* If the architecture not support this function, simply return with no * If the architecture not support this function, simply return with no
* page pinned * page pinned
*/ */
int __attribute__((weak)) __get_user_pages_fast(unsigned long start, int __weak __get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages) int nr_pages, int write, struct page **pages)
{ {
return 0; return 0;
...@@ -338,7 +339,7 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast); ...@@ -338,7 +339,7 @@ EXPORT_SYMBOL_GPL(__get_user_pages_fast);
* callers need to carefully consider what to use. On many architectures, * callers need to carefully consider what to use. On many architectures,
* get_user_pages_fast simply falls back to get_user_pages. * get_user_pages_fast simply falls back to get_user_pages.
*/ */
int __attribute__((weak)) get_user_pages_fast(unsigned long start, int __weak get_user_pages_fast(unsigned long start,
int nr_pages, int write, struct page **pages) int nr_pages, int write, struct page **pages)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
......
...@@ -27,7 +27,9 @@ ...@@ -27,7 +27,9 @@
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/llist.h> #include <linux/llist.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/shmparam.h> #include <asm/shmparam.h>
...@@ -2181,7 +2183,7 @@ EXPORT_SYMBOL(remap_vmalloc_range); ...@@ -2181,7 +2183,7 @@ EXPORT_SYMBOL(remap_vmalloc_range);
* Implement a stub for vmalloc_sync_all() if the architecture chose not to * Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one. * have one.
*/ */
void __attribute__((weak)) vmalloc_sync_all(void) void __weak vmalloc_sync_all(void)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment