Skip to content

Commit

Permalink
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/…
Browse files Browse the repository at this point in the history
…git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "The bulk of the diffstat consists of changes to our uaccess routines
  so that they fall back to bytewise copying prior to reporting complete
  failure when the initial (multi-byte) access faults.

  However, the most disappointing change here is that we've had to bump
  ARCH_DMA_MINALIGN back to 128 bytes thanks to Qualcomm's "Kryo" CPU,
  which ended up in the MSM8996 mobile SoC. Still, at least we're now
  aware of this design and one of the hardware designers confirmed the
  L2 cacheline size for us.

  Summary:

   - Fix instrumentation annotations for entry code

   - Ensure kernel MTE state is restored correctly on resume from suspend

   - Fix MTE fault from new strlen() routine

   - Fallback to byte-wise accesses on initial uaccess fault

   - Bump Clang requirement for BTI

   - Revert ARCH_DMA_MINALIGN back to 128 bytes (shakes fist at Qualcomm)"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: entry: fix KCOV suppression
  arm64: entry: add missing noinstr
  arm64: mte: fix restoration of GCR_EL1 from suspend
  arm64: Avoid premature usercopy failure
  arm64: Restrict ARM64_BTI_KERNEL to clang 12.0.0 and newer
  Revert "arm64: cache: Lower ARCH_DMA_MINALIGN to 64 (L1_CACHE_BYTES)"
  arm64: Add missing header <asm/smp.h> in two files
  arm64: fix strlen() with CONFIG_KASAN_HW_TAGS
  • Loading branch information
torvalds committed Jul 17, 2021
2 parents 872f8ed + e6f85cb commit 5f06a79
Show file tree
Hide file tree
Showing 11 changed files with 54 additions and 30 deletions.
3 changes: 2 additions & 1 deletion arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1605,7 +1605,8 @@ config ARM64_BTI_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
depends on !(CC_IS_CLANG && GCOV_KERNEL)
# https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Build the kernel with Branch Target Identification annotations
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/include/asm/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#define ARCH_DMA_MINALIGN (128)

#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/asm/smp_plat.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

#include <linux/cpumask.h>

#include <asm/smp.h>
#include <asm/types.h>

struct mpidr_hash {
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ CFLAGS_syscall.o += -fno-stack-protector
# It's not safe to invoke KCOV when portions of the kernel environment aren't
# available or are out-of-sync with HW state. Since `noinstr` doesn't always
# inhibit KCOV instrumentation, disable it for the entire compilation unit.
KCOV_INSTRUMENT_entry.o := n
KCOV_INSTRUMENT_entry-common.o := n
KCOV_INSTRUMENT_idle.o := n

# Object file lists.
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/kernel/cpufeature.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@
#include <asm/mmu_context.h>
#include <asm/mte.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
#include <asm/virt.h>
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/entry-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -604,7 +604,7 @@ asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
__el0_fiq_handler_common(regs);
}

static void __el0_error_handler_common(struct pt_regs *regs)
static void noinstr __el0_error_handler_common(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);

Expand Down
15 changes: 2 additions & 13 deletions arch/arm64/kernel/mte.c
Original file line number Diff line number Diff line change
Expand Up @@ -193,18 +193,6 @@ void mte_check_tfsr_el1(void)
}
#endif

static void update_gcr_el1_excl(u64 excl)
{

/*
* Note that the mask controlled by the user via prctl() is an
* include while GCR_EL1 accepts an exclude mask.
* No need for ISB since this only affects EL0 currently, implicit
* with ERET.
*/
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl);
}

static void set_gcr_el1_excl(u64 excl)
{
current->thread.gcr_user_excl = excl;
Expand Down Expand Up @@ -265,7 +253,8 @@ void mte_suspend_exit(void)
if (!system_supports_mte())
return;

update_gcr_el1_excl(gcr_kernel_excl);
sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, gcr_kernel_excl);
isb();
}

long set_mte_ctrl(struct task_struct *task, unsigned long arg)
Expand Down
13 changes: 10 additions & 3 deletions arch/arm64/lib/copy_from_user.S
Original file line number Diff line number Diff line change
Expand Up @@ -29,32 +29,34 @@
.endm

.macro ldrh1 reg, ptr, val
user_ldst 9998f, ldtrh, \reg, \ptr, \val
user_ldst 9997f, ldtrh, \reg, \ptr, \val
.endm

.macro strh1 reg, ptr, val
strh \reg, [\ptr], \val
.endm

.macro ldr1 reg, ptr, val
user_ldst 9998f, ldtr, \reg, \ptr, \val
user_ldst 9997f, ldtr, \reg, \ptr, \val
.endm

.macro str1 reg, ptr, val
str \reg, [\ptr], \val
.endm

.macro ldp1 reg1, reg2, ptr, val
user_ldp 9998f, \reg1, \reg2, \ptr, \val
user_ldp 9997f, \reg1, \reg2, \ptr, \val
.endm

.macro stp1 reg1, reg2, ptr, val
stp \reg1, \reg2, [\ptr], \val
.endm

end .req x5
srcin .req x15
SYM_FUNC_START(__arch_copy_from_user)
add end, x0, x2
mov srcin, x1
#include "copy_template.S"
mov x0, #0 // Nothing to copy
ret
Expand All @@ -63,6 +65,11 @@ EXPORT_SYMBOL(__arch_copy_from_user)

.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
USER(9998f, ldtrb tmp1w, [srcin])
strb tmp1w, [dst], #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
21 changes: 14 additions & 7 deletions arch/arm64/lib/copy_in_user.S
Original file line number Diff line number Diff line change
Expand Up @@ -30,33 +30,34 @@
.endm

.macro ldrh1 reg, ptr, val
user_ldst 9998f, ldtrh, \reg, \ptr, \val
user_ldst 9997f, ldtrh, \reg, \ptr, \val
.endm

.macro strh1 reg, ptr, val
user_ldst 9998f, sttrh, \reg, \ptr, \val
user_ldst 9997f, sttrh, \reg, \ptr, \val
.endm

.macro ldr1 reg, ptr, val
user_ldst 9998f, ldtr, \reg, \ptr, \val
user_ldst 9997f, ldtr, \reg, \ptr, \val
.endm

.macro str1 reg, ptr, val
user_ldst 9998f, sttr, \reg, \ptr, \val
user_ldst 9997f, sttr, \reg, \ptr, \val
.endm

.macro ldp1 reg1, reg2, ptr, val
user_ldp 9998f, \reg1, \reg2, \ptr, \val
user_ldp 9997f, \reg1, \reg2, \ptr, \val
.endm

.macro stp1 reg1, reg2, ptr, val
user_stp 9998f, \reg1, \reg2, \ptr, \val
user_stp 9997f, \reg1, \reg2, \ptr, \val
.endm

end .req x5

srcin .req x15
SYM_FUNC_START(__arch_copy_in_user)
add end, x0, x2
mov srcin, x1
#include "copy_template.S"
mov x0, #0
ret
Expand All @@ -65,6 +66,12 @@ EXPORT_SYMBOL(__arch_copy_in_user)

.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
USER(9998f, ldtrb tmp1w, [srcin])
USER(9998f, sttrb tmp1w, [dst])
add dst, dst, #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
14 changes: 11 additions & 3 deletions arch/arm64/lib/copy_to_user.S
Original file line number Diff line number Diff line change
Expand Up @@ -32,28 +32,30 @@
.endm

.macro strh1 reg, ptr, val
user_ldst 9998f, sttrh, \reg, \ptr, \val
user_ldst 9997f, sttrh, \reg, \ptr, \val
.endm

.macro ldr1 reg, ptr, val
ldr \reg, [\ptr], \val
.endm

.macro str1 reg, ptr, val
user_ldst 9998f, sttr, \reg, \ptr, \val
user_ldst 9997f, sttr, \reg, \ptr, \val
.endm

.macro ldp1 reg1, reg2, ptr, val
ldp \reg1, \reg2, [\ptr], \val
.endm

.macro stp1 reg1, reg2, ptr, val
user_stp 9998f, \reg1, \reg2, \ptr, \val
user_stp 9997f, \reg1, \reg2, \ptr, \val
.endm

end .req x5
srcin .req x15
SYM_FUNC_START(__arch_copy_to_user)
add end, x0, x2
mov srcin, x1
#include "copy_template.S"
mov x0, #0
ret
Expand All @@ -62,6 +64,12 @@ EXPORT_SYMBOL(__arch_copy_to_user)

.section .fixup,"ax"
.align 2
9997: cmp dst, dstin
b.ne 9998f
// Before being absolutely sure we couldn't copy anything, try harder
ldrb tmp1w, [srcin]
USER(9998f, sttrb tmp1w, [dst])
add dst, dst, #1
9998: sub x0, end, dst // bytes not copied
ret
.previous
10 changes: 10 additions & 0 deletions arch/arm64/lib/strlen.S
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/mte-def.h>

/* Assumptions:
*
Expand Down Expand Up @@ -42,7 +43,16 @@
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080

/*
* When KASAN_HW_TAGS is in use, memory is checked at MTE_GRANULE_SIZE
* (16-byte) granularity, and we must ensure that no access straddles this
* alignment boundary.
*/
#ifdef CONFIG_KASAN_HW_TAGS
#define MIN_PAGE_SIZE MTE_GRANULE_SIZE
#else
#define MIN_PAGE_SIZE 4096
#endif

/* Since strings are short on average, we check the first 16 bytes
of the string for a NUL character. In order to do an unaligned ldp
Expand Down

0 comments on commit 5f06a79

Please sign in to comment.