Commit 3f50dbc1 authored by Paolo Ciarrocchi's avatar Paolo Ciarrocchi Committed by Ingo Molnar

x86: coding style fixes to arch/x86/lib/usercopy_32.c

Before:
 total: 63 errors, 2 warnings, 878 lines checked
After:
 total: 0 errors, 2 warnings, 878 lines checked

Compile tested, no change in the binary output:

text    data     bss     dec     hex filename
3231       0       0    3231     c9f usercopy_32.o.after
3231       0       0    3231     c9f usercopy_32.o.before

md5sum:
9f9a3eb43970359ae7cecfd1c9e7cf42  usercopy_32.o.after
9f9a3eb43970359ae7cecfd1c9e7cf42  usercopy_32.o.before
Signed-off-by: default avatarPaolo Ciarrocchi <paolo.ciarrocchi@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent bdd3cee2
...@@ -22,14 +22,14 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon ...@@ -22,14 +22,14 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
#endif #endif
return 1; return 1;
} }
#define movsl_is_ok(a1,a2,n) \ #define movsl_is_ok(a1, a2, n) \
__movsl_is_ok((unsigned long)(a1),(unsigned long)(a2),(n)) __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
/* /*
* Copy a null terminated string from userspace. * Copy a null terminated string from userspace.
*/ */
#define __do_strncpy_from_user(dst,src,count,res) \ #define __do_strncpy_from_user(dst, src, count, res) \
do { \ do { \
int __d0, __d1, __d2; \ int __d0, __d1, __d2; \
might_sleep(); \ might_sleep(); \
...@@ -629,7 +629,7 @@ unsigned long __copy_user_zeroing_intel_nocache(void *to, ...@@ -629,7 +629,7 @@ unsigned long __copy_user_zeroing_intel_nocache(void *to,
#endif /* CONFIG_X86_INTEL_USERCOPY */ #endif /* CONFIG_X86_INTEL_USERCOPY */
/* Generic arbitrary sized copy. */ /* Generic arbitrary sized copy. */
#define __copy_user(to,from,size) \ #define __copy_user(to, from, size) \
do { \ do { \
int __d0, __d1, __d2; \ int __d0, __d1, __d2; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -665,7 +665,7 @@ do { \ ...@@ -665,7 +665,7 @@ do { \
: "memory"); \ : "memory"); \
} while (0) } while (0)
#define __copy_user_zeroing(to,from,size) \ #define __copy_user_zeroing(to, from, size) \
do { \ do { \
int __d0, __d1, __d2; \ int __d0, __d1, __d2; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -712,7 +712,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, ...@@ -712,7 +712,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
{ {
#ifndef CONFIG_X86_WP_WORKS_OK #ifndef CONFIG_X86_WP_WORKS_OK
if (unlikely(boot_cpu_data.wp_works_ok == 0) && if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
((unsigned long )to) < TASK_SIZE) { ((unsigned long)to) < TASK_SIZE) {
/* /*
* When we are in an atomic section (see * When we are in an atomic section (see
* mm/filemap.c:file_read_actor), return the full * mm/filemap.c:file_read_actor), return the full
...@@ -740,7 +740,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, ...@@ -740,7 +740,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
survive: survive:
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
retval = get_user_pages(current, current->mm, retval = get_user_pages(current, current->mm,
(unsigned long )to, 1, 1, 0, &pg, NULL); (unsigned long)to, 1, 1, 0, &pg, NULL);
if (retval == -ENOMEM && is_global_init(current)) { if (retval == -ENOMEM && is_global_init(current)) {
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
...@@ -802,7 +802,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, ...@@ -802,7 +802,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
if ( n > 64 && cpu_has_xmm2) if (n > 64 && cpu_has_xmm2)
n = __copy_user_zeroing_intel_nocache(to, from, n); n = __copy_user_zeroing_intel_nocache(to, from, n);
else else
__copy_user_zeroing(to, from, n); __copy_user_zeroing(to, from, n);
...@@ -817,7 +817,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr ...@@ -817,7 +817,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
unsigned long n) unsigned long n)
{ {
#ifdef CONFIG_X86_INTEL_USERCOPY #ifdef CONFIG_X86_INTEL_USERCOPY
if ( n > 64 && cpu_has_xmm2) if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n); n = __copy_user_intel_nocache(to, from, n);
else else
__copy_user(to, from, n); __copy_user(to, from, n);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment