Commit 4baa098a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_misc_for_v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc x86 updates from Borislav Petkov:

 - Remove the local symbols prefix of the get/put_user() exception
   handling symbols so that tools do not get confused by the presence of
   code belonging to the wrong symbol/not belonging to any symbol

 - Improve csum_partial()'s performance

 - Some improvements to the kcpuid tool

* tag 'x86_misc_for_v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/lib: Make get/put_user() exception handling a visible symbol
  x86/csum: Fix clang -Wuninitialized in csum_partial()
  x86/csum: Improve performance of `csum_partial`
  tools/x86/kcpuid: Add .gitignore
  tools/x86/kcpuid: Dump the correct CPUID function in error
parents 4aacacee 5516c89d
......@@ -5,22 +5,34 @@
* This file contains network checksum routines that are better done
* in an architecture-specific manner due to speed.
*/
#include <linux/compiler.h>
#include <linux/export.h>
#include <asm/checksum.h>
#include <asm/word-at-a-time.h>
static inline unsigned short from32to16(unsigned a)
static inline unsigned short from32to16(unsigned a)
{
unsigned short b = a >> 16;
unsigned short b = a >> 16;
asm("addw %w2,%w0\n\t"
"adcw $0,%w0\n"
"adcw $0,%w0\n"
: "=r" (b)
: "0" (b), "r" (a));
return b;
}
static inline __wsum csum_tail(u64 temp64, int odd)
{
unsigned int result;
result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
if (unlikely(odd)) {
result = from32to16(result);
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
}
return (__force __wsum)result;
}
/*
* Do a checksum on an arbitrary memory area.
* Returns a 32bit checksum.
......@@ -35,7 +47,7 @@ static inline unsigned short from32to16(unsigned a)
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
u64 temp64 = (__force u64)sum;
unsigned odd, result;
unsigned odd;
odd = 1 & (unsigned long) buff;
if (unlikely(odd)) {
......@@ -47,21 +59,52 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
buff++;
}
while (unlikely(len >= 64)) {
/*
* len == 40 is the hot case due to IPv6 headers, but annotating it likely()
* has noticeable negative affect on codegen for all other cases with
* minimal performance benefit here.
*/
if (len == 40) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq 4*8(%[src]),%[res]\n\t"
"adcq 5*8(%[src]),%[res]\n\t"
"adcq 6*8(%[src]),%[res]\n\t"
"adcq 7*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
buff += 64;
len -= 64;
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[40])buff));
return csum_tail(temp64, odd);
}
if (unlikely(len >= 64)) {
/*
* Extra accumulators for better ILP in the loop.
*/
u64 tmp_accum, tmp_carries;
asm("xorl %k[tmp_accum],%k[tmp_accum]\n\t"
"xorl %k[tmp_carries],%k[tmp_carries]\n\t"
"subl $64, %[len]\n\t"
"1:\n\t"
"addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcl $0,%k[tmp_carries]\n\t"
"addq 4*8(%[src]),%[tmp_accum]\n\t"
"adcq 5*8(%[src]),%[tmp_accum]\n\t"
"adcq 6*8(%[src]),%[tmp_accum]\n\t"
"adcq 7*8(%[src]),%[tmp_accum]\n\t"
"adcl $0,%k[tmp_carries]\n\t"
"addq $64, %[src]\n\t"
"subl $64, %[len]\n\t"
"jge 1b\n\t"
"addq %[tmp_accum],%[res]\n\t"
"adcq %[tmp_carries],%[res]\n\t"
"adcq $0,%[res]"
: [tmp_accum] "=&r"(tmp_accum),
[tmp_carries] "=&r"(tmp_carries), [res] "+r"(temp64),
[len] "+r"(len), [src] "+r"(buff)
: "m"(*(const char *)buff));
}
if (len & 32) {
......@@ -70,45 +113,37 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[32])buff));
buff += 32;
}
if (len & 16) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[16])buff));
buff += 16;
}
if (len & 8) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[8])buff));
buff += 8;
}
if (len & 7) {
unsigned int shift = (8 - (len & 7)) * 8;
unsigned int shift = (-len << 3) & 63;
unsigned long trail;
trail = (load_unaligned_zeropad(buff) << shift) >> shift;
asm("addq %[trail],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [trail] "r" (trail));
: [res] "+r"(temp64)
: [trail] "r"(trail));
}
result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
if (unlikely(odd)) {
result = from32to16(result);
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
}
return (__force __wsum)result;
return csum_tail(temp64, odd);
}
EXPORT_SYMBOL(csum_partial);
......@@ -118,6 +153,6 @@ EXPORT_SYMBOL(csum_partial);
*/
__sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff,len,0));
return csum_fold(csum_partial(buff, len, 0));
}
EXPORT_SYMBOL(ip_compute_csum);
......@@ -143,43 +143,43 @@ SYM_FUNC_END(__get_user_nocheck_8)
EXPORT_SYMBOL(__get_user_nocheck_8)
SYM_CODE_START_LOCAL(.Lbad_get_user_clac)
SYM_CODE_START_LOCAL(__get_user_handle_exception)
ASM_CLAC
.Lbad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
RET
SYM_CODE_END(.Lbad_get_user_clac)
SYM_CODE_END(__get_user_handle_exception)
#ifdef CONFIG_X86_32
SYM_CODE_START_LOCAL(.Lbad_get_user_8_clac)
SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
ASM_CLAC
bad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
RET
SYM_CODE_END(.Lbad_get_user_8_clac)
SYM_CODE_END(__get_user_8_handle_exception)
#endif
/* get_user */
_ASM_EXTABLE(1b, .Lbad_get_user_clac)
_ASM_EXTABLE(2b, .Lbad_get_user_clac)
_ASM_EXTABLE(3b, .Lbad_get_user_clac)
_ASM_EXTABLE(1b, __get_user_handle_exception)
_ASM_EXTABLE(2b, __get_user_handle_exception)
_ASM_EXTABLE(3b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
_ASM_EXTABLE(4b, .Lbad_get_user_clac)
_ASM_EXTABLE(4b, __get_user_handle_exception)
#else
_ASM_EXTABLE(4b, .Lbad_get_user_8_clac)
_ASM_EXTABLE(5b, .Lbad_get_user_8_clac)
_ASM_EXTABLE(4b, __get_user_8_handle_exception)
_ASM_EXTABLE(5b, __get_user_8_handle_exception)
#endif
/* __get_user */
_ASM_EXTABLE(6b, .Lbad_get_user_clac)
_ASM_EXTABLE(7b, .Lbad_get_user_clac)
_ASM_EXTABLE(8b, .Lbad_get_user_clac)
_ASM_EXTABLE(6b, __get_user_handle_exception)
_ASM_EXTABLE(7b, __get_user_handle_exception)
_ASM_EXTABLE(8b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
_ASM_EXTABLE(9b, .Lbad_get_user_clac)
_ASM_EXTABLE(9b, __get_user_handle_exception)
#else
_ASM_EXTABLE(9b, .Lbad_get_user_8_clac)
_ASM_EXTABLE(10b, .Lbad_get_user_8_clac)
_ASM_EXTABLE(9b, __get_user_8_handle_exception)
_ASM_EXTABLE(10b, __get_user_8_handle_exception)
#endif
......@@ -131,22 +131,22 @@ SYM_FUNC_START(__put_user_nocheck_8)
SYM_FUNC_END(__put_user_nocheck_8)
EXPORT_SYMBOL(__put_user_nocheck_8)
SYM_CODE_START_LOCAL(.Lbad_put_user_clac)
SYM_CODE_START_LOCAL(__put_user_handle_exception)
ASM_CLAC
.Lbad_put_user:
movl $-EFAULT,%ecx
RET
SYM_CODE_END(.Lbad_put_user_clac)
SYM_CODE_END(__put_user_handle_exception)
_ASM_EXTABLE(1b, .Lbad_put_user_clac)
_ASM_EXTABLE(2b, .Lbad_put_user_clac)
_ASM_EXTABLE(3b, .Lbad_put_user_clac)
_ASM_EXTABLE(4b, .Lbad_put_user_clac)
_ASM_EXTABLE(5b, .Lbad_put_user_clac)
_ASM_EXTABLE(6b, .Lbad_put_user_clac)
_ASM_EXTABLE(7b, .Lbad_put_user_clac)
_ASM_EXTABLE(9b, .Lbad_put_user_clac)
_ASM_EXTABLE(1b, __put_user_handle_exception)
_ASM_EXTABLE(2b, __put_user_handle_exception)
_ASM_EXTABLE(3b, __put_user_handle_exception)
_ASM_EXTABLE(4b, __put_user_handle_exception)
_ASM_EXTABLE(5b, __put_user_handle_exception)
_ASM_EXTABLE(6b, __put_user_handle_exception)
_ASM_EXTABLE(7b, __put_user_handle_exception)
_ASM_EXTABLE(9b, __put_user_handle_exception)
#ifdef CONFIG_X86_32
_ASM_EXTABLE(8b, .Lbad_put_user_clac)
_ASM_EXTABLE(10b, .Lbad_put_user_clac)
_ASM_EXTABLE(8b, __put_user_handle_exception)
_ASM_EXTABLE(10b, __put_user_handle_exception)
#endif
......@@ -2453,6 +2453,23 @@ config BITFIELD_KUNIT
If unsure, say N.
config CHECKSUM_KUNIT
tristate "KUnit test checksum functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
Enable this option to test the checksum functions at boot.
KUnit tests run during boot and output the results to the debug log
in TAP format (http://testanything.org/). Only useful for kernel devs
running the KUnit test harness, and not intended for inclusion into a
production build.
For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
config HASH_KUNIT_TEST
tristate "KUnit Test for integer hash functions" if !KUNIT_ALL_TESTS
depends on KUNIT
......
......@@ -377,6 +377,7 @@ obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
......
This diff is collapsed.
......@@ -517,15 +517,16 @@ static void show_range(struct cpuid_range *range)
static inline struct cpuid_func *index_to_func(u32 index)
{
struct cpuid_range *range;
u32 func_idx;
range = (index & 0x80000000) ? leafs_ext : leafs_basic;
index &= 0x7FFFFFFF;
func_idx = index & 0xffff;
if (((index & 0xFFFF) + 1) > (u32)range->nr) {
if ((func_idx + 1) > (u32)range->nr) {
printf("ERR: invalid input index (0x%x)\n", index);
return NULL;
}
return &range->funcs[index];
return &range->funcs[func_idx];
}
static void show_info(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment