Skip to content

Commit

Permalink
Merge tag 'x86_misc_for_v6.5' of git://git.kernel.org/pub/scm/linux/k…
Browse files Browse the repository at this point in the history
…ernel/git/tip/tip

Pull misc x86 updates from Borislav Petkov:

 - Remove the local symbols prefix of the get/put_user() exception
   handling symbols so that tools do not get confused by the presence of
   code belonging to the wrong symbol/not belonging to any symbol

 - Improve csum_partial()'s performance

 - Some improvements to the kcpuid tool

* tag 'x86_misc_for_v6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/lib: Make get/put_user() exception handling a visible symbol
  x86/csum: Fix clang -Wuninitialized in csum_partial()
  x86/csum: Improve performance of `csum_partial`
  tools/x86/kcpuid: Add .gitignore
  tools/x86/kcpuid: Dump the correct CPUID function in error
  • Loading branch information
torvalds committed Jun 27, 2023
2 parents 4aacace + 5516c89 commit 4baa098
Show file tree
Hide file tree
Showing 8 changed files with 453 additions and 64 deletions.
101 changes: 68 additions & 33 deletions arch/x86/lib/csum-partial_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,22 +5,34 @@
* This file contains network checksum routines that are better done
* in an architecture-specific manner due to speed.
*/

#include <linux/compiler.h>
#include <linux/export.h>
#include <asm/checksum.h>
#include <asm/word-at-a-time.h>

static inline unsigned short from32to16(unsigned a)
static inline unsigned short from32to16(unsigned a)
{
unsigned short b = a >> 16;
unsigned short b = a >> 16;
asm("addw %w2,%w0\n\t"
"adcw $0,%w0\n"
"adcw $0,%w0\n"
: "=r" (b)
: "0" (b), "r" (a));
return b;
}

static inline __wsum csum_tail(u64 temp64, int odd)
{
unsigned int result;

result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
if (unlikely(odd)) {
result = from32to16(result);
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
}
return (__force __wsum)result;
}

/*
* Do a checksum on an arbitrary memory area.
* Returns a 32bit checksum.
Expand All @@ -35,7 +47,7 @@ static inline unsigned short from32to16(unsigned a)
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
u64 temp64 = (__force u64)sum;
unsigned odd, result;
unsigned odd;

odd = 1 & (unsigned long) buff;
if (unlikely(odd)) {
Expand All @@ -47,21 +59,52 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
buff++;
}

while (unlikely(len >= 64)) {
/*
* len == 40 is the hot case due to IPv6 headers, but annotating it likely()
* has noticeable negative affect on codegen for all other cases with
* minimal performance benefit here.
*/
if (len == 40) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq 4*8(%[src]),%[res]\n\t"
"adcq 5*8(%[src]),%[res]\n\t"
"adcq 6*8(%[src]),%[res]\n\t"
"adcq 7*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
buff += 64;
len -= 64;
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[40])buff));
return csum_tail(temp64, odd);
}
if (unlikely(len >= 64)) {
/*
* Extra accumulators for better ILP in the loop.
*/
u64 tmp_accum, tmp_carries;

asm("xorl %k[tmp_accum],%k[tmp_accum]\n\t"
"xorl %k[tmp_carries],%k[tmp_carries]\n\t"
"subl $64, %[len]\n\t"
"1:\n\t"
"addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcl $0,%k[tmp_carries]\n\t"
"addq 4*8(%[src]),%[tmp_accum]\n\t"
"adcq 5*8(%[src]),%[tmp_accum]\n\t"
"adcq 6*8(%[src]),%[tmp_accum]\n\t"
"adcq 7*8(%[src]),%[tmp_accum]\n\t"
"adcl $0,%k[tmp_carries]\n\t"
"addq $64, %[src]\n\t"
"subl $64, %[len]\n\t"
"jge 1b\n\t"
"addq %[tmp_accum],%[res]\n\t"
"adcq %[tmp_carries],%[res]\n\t"
"adcq $0,%[res]"
: [tmp_accum] "=&r"(tmp_accum),
[tmp_carries] "=&r"(tmp_carries), [res] "+r"(temp64),
[len] "+r"(len), [src] "+r"(buff)
: "m"(*(const char *)buff));
}

if (len & 32) {
Expand All @@ -70,45 +113,37 @@ __wsum csum_partial(const void *buff, int len, __wsum sum)
"adcq 2*8(%[src]),%[res]\n\t"
"adcq 3*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[32])buff));
buff += 32;
}
if (len & 16) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq 1*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[16])buff));
buff += 16;
}
if (len & 8) {
asm("addq 0*8(%[src]),%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [src] "r" (buff)
: "memory");
: [res] "+r"(temp64)
: [src] "r"(buff), "m"(*(const char(*)[8])buff));
buff += 8;
}
if (len & 7) {
unsigned int shift = (8 - (len & 7)) * 8;
unsigned int shift = (-len << 3) & 63;
unsigned long trail;

trail = (load_unaligned_zeropad(buff) << shift) >> shift;

asm("addq %[trail],%[res]\n\t"
"adcq $0,%[res]"
: [res] "+r" (temp64)
: [trail] "r" (trail));
: [res] "+r"(temp64)
: [trail] "r"(trail));
}
result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
if (unlikely(odd)) {
result = from32to16(result);
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
}
return (__force __wsum)result;
return csum_tail(temp64, odd);
}
EXPORT_SYMBOL(csum_partial);

Expand All @@ -118,6 +153,6 @@ EXPORT_SYMBOL(csum_partial);
*/
__sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff,len,0));
return csum_fold(csum_partial(buff, len, 0));
}
EXPORT_SYMBOL(ip_compute_csum);
32 changes: 16 additions & 16 deletions arch/x86/lib/getuser.S
Original file line number Diff line number Diff line change
Expand Up @@ -143,43 +143,43 @@ SYM_FUNC_END(__get_user_nocheck_8)
EXPORT_SYMBOL(__get_user_nocheck_8)


SYM_CODE_START_LOCAL(.Lbad_get_user_clac)
SYM_CODE_START_LOCAL(__get_user_handle_exception)
ASM_CLAC
.Lbad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
RET
SYM_CODE_END(.Lbad_get_user_clac)
SYM_CODE_END(__get_user_handle_exception)

#ifdef CONFIG_X86_32
SYM_CODE_START_LOCAL(.Lbad_get_user_8_clac)
SYM_CODE_START_LOCAL(__get_user_8_handle_exception)
ASM_CLAC
bad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
RET
SYM_CODE_END(.Lbad_get_user_8_clac)
SYM_CODE_END(__get_user_8_handle_exception)
#endif

/* get_user */
_ASM_EXTABLE(1b, .Lbad_get_user_clac)
_ASM_EXTABLE(2b, .Lbad_get_user_clac)
_ASM_EXTABLE(3b, .Lbad_get_user_clac)
_ASM_EXTABLE(1b, __get_user_handle_exception)
_ASM_EXTABLE(2b, __get_user_handle_exception)
_ASM_EXTABLE(3b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
_ASM_EXTABLE(4b, .Lbad_get_user_clac)
_ASM_EXTABLE(4b, __get_user_handle_exception)
#else
_ASM_EXTABLE(4b, .Lbad_get_user_8_clac)
_ASM_EXTABLE(5b, .Lbad_get_user_8_clac)
_ASM_EXTABLE(4b, __get_user_8_handle_exception)
_ASM_EXTABLE(5b, __get_user_8_handle_exception)
#endif

/* __get_user */
_ASM_EXTABLE(6b, .Lbad_get_user_clac)
_ASM_EXTABLE(7b, .Lbad_get_user_clac)
_ASM_EXTABLE(8b, .Lbad_get_user_clac)
_ASM_EXTABLE(6b, __get_user_handle_exception)
_ASM_EXTABLE(7b, __get_user_handle_exception)
_ASM_EXTABLE(8b, __get_user_handle_exception)
#ifdef CONFIG_X86_64
_ASM_EXTABLE(9b, .Lbad_get_user_clac)
_ASM_EXTABLE(9b, __get_user_handle_exception)
#else
_ASM_EXTABLE(9b, .Lbad_get_user_8_clac)
_ASM_EXTABLE(10b, .Lbad_get_user_8_clac)
_ASM_EXTABLE(9b, __get_user_8_handle_exception)
_ASM_EXTABLE(10b, __get_user_8_handle_exception)
#endif
24 changes: 12 additions & 12 deletions arch/x86/lib/putuser.S
Original file line number Diff line number Diff line change
Expand Up @@ -131,22 +131,22 @@ SYM_FUNC_START(__put_user_nocheck_8)
SYM_FUNC_END(__put_user_nocheck_8)
EXPORT_SYMBOL(__put_user_nocheck_8)

SYM_CODE_START_LOCAL(.Lbad_put_user_clac)
SYM_CODE_START_LOCAL(__put_user_handle_exception)
ASM_CLAC
.Lbad_put_user:
movl $-EFAULT,%ecx
RET
SYM_CODE_END(.Lbad_put_user_clac)
SYM_CODE_END(__put_user_handle_exception)

_ASM_EXTABLE(1b, .Lbad_put_user_clac)
_ASM_EXTABLE(2b, .Lbad_put_user_clac)
_ASM_EXTABLE(3b, .Lbad_put_user_clac)
_ASM_EXTABLE(4b, .Lbad_put_user_clac)
_ASM_EXTABLE(5b, .Lbad_put_user_clac)
_ASM_EXTABLE(6b, .Lbad_put_user_clac)
_ASM_EXTABLE(7b, .Lbad_put_user_clac)
_ASM_EXTABLE(9b, .Lbad_put_user_clac)
_ASM_EXTABLE(1b, __put_user_handle_exception)
_ASM_EXTABLE(2b, __put_user_handle_exception)
_ASM_EXTABLE(3b, __put_user_handle_exception)
_ASM_EXTABLE(4b, __put_user_handle_exception)
_ASM_EXTABLE(5b, __put_user_handle_exception)
_ASM_EXTABLE(6b, __put_user_handle_exception)
_ASM_EXTABLE(7b, __put_user_handle_exception)
_ASM_EXTABLE(9b, __put_user_handle_exception)
#ifdef CONFIG_X86_32
_ASM_EXTABLE(8b, .Lbad_put_user_clac)
_ASM_EXTABLE(10b, .Lbad_put_user_clac)
_ASM_EXTABLE(8b, __put_user_handle_exception)
_ASM_EXTABLE(10b, __put_user_handle_exception)
#endif
17 changes: 17 additions & 0 deletions lib/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -2453,6 +2453,23 @@ config BITFIELD_KUNIT

If unsure, say N.

config CHECKSUM_KUNIT
tristate "KUnit test checksum functions at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
Enable this option to test the checksum functions at boot.

KUnit tests run during boot and output the results to the debug log
in TAP format (http://testanything.org/). Only useful for kernel devs
running the KUnit test harness, and not intended for inclusion into a
production build.

For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.

If unsure, say N.

config HASH_KUNIT_TEST
tristate "KUnit Test for integer hash functions" if !KUNIT_ALL_TESTS
depends on KUNIT
Expand Down
1 change: 1 addition & 0 deletions lib/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,7 @@ obj-$(CONFIG_PLDMFW) += pldmfw/
# KUnit tests
CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o
Expand Down
Loading

0 comments on commit 4baa098

Please sign in to comment.