Skip to content

Commit

Permalink
Merge branch 'x86/asm' into x86/mm, to resolve conflicts
Browse files Browse the repository at this point in the history
 Conflicts:
	tools/testing/selftests/x86/Makefile

Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Ingo Molnar committed Jul 15, 2016
2 parents dcb32d9 + be8a18e commit 38452af
Show file tree
Hide file tree
Showing 162 changed files with 3,790 additions and 817 deletions.
6 changes: 3 additions & 3 deletions Documentation/x86/intel_mpx.txt
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
MPX-instrumented.
3) The kernel detects that the CPU has MPX, allows the new prctl() to
succeed, and notes the location of the bounds directory. Userspace is
expected to keep the bounds directory at that locationWe note it
expected to keep the bounds directory at that location. We note it
instead of reading it each time because the 'xsave' operation needed
to access the bounds directory register is an expensive operation.
4) If the application needs to spill bounds out of the 4 registers, it
Expand Down Expand Up @@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
We need to decode MPX instructions to get violation address and
set this address into extended struct siginfo.

The _sigfault feild of struct siginfo is extended as follow:
The _sigfault field of struct siginfo is extended as follow:

87 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
88 struct {
Expand Down Expand Up @@ -240,5 +240,5 @@ them at the same bounds table.
This is allowed architecturally. See more information "Intel(R) Architecture
Instruction Set Extensions Programming Reference" (9.3.4).

However, if users did this, the kernel might be fooled in to unmaping an
However, if users did this, the kernel might be fooled in to unmapping an
in-use bounds table since it does not recognize sharing.
4 changes: 2 additions & 2 deletions Documentation/x86/tlb.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ memory, it has two choices:
from areas other than the one we are trying to flush will be
destroyed and must be refilled later, at some cost.
2. Use the invlpg instruction to invalidate a single page at a
time. This could potentialy cost many more instructions, but
time. This could potentially cost many more instructions, but
it is a much more precise operation, causing no collateral
damage to other TLB entries.

Expand All @@ -19,7 +19,7 @@ Which method to do depends on a few things:
work.
3. The size of the TLB. The larger the TLB, the more collateral
damage we do with a full flush. So, the larger the TLB, the
more attrative an individual flush looks. Data and
more attractive an individual flush looks. Data and
instructions have separate TLBs, as do different page sizes.
4. The microarchitecture. The TLB has become a multi-level
cache on modern CPUs, and the global flushes have become more
Expand Down
2 changes: 1 addition & 1 deletion Documentation/x86/x86_64/machinecheck
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ between all CPUs.

check_interval
How often to poll for corrected machine check errors, in seconds
(Note output is hexademical). Default 5 minutes. When the poller
(Note output is hexadecimal). Default 5 minutes. When the poller
finds MCEs it triggers an exponential speedup (poll more often) on
the polling interval. When the poller stops finding MCEs, it
triggers an exponential backoff (poll less often) on the polling
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/include/asm/cputype.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,12 +80,14 @@
#define APM_CPU_PART_POTENZA 0x000

#define CAVIUM_CPU_PART_THUNDERX 0x0A1
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2

#define BRCM_CPU_PART_VULCAN 0x516

#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)

#ifndef __ASSEMBLY__

Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,8 @@ struct pt_regs {
};
u64 orig_x0;
u64 syscallno;
u64 orig_addr_limit;
u64 unused; // maintain 16 byte alignment
};

#define arch_has_single_step() (1)
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ int main(void)
DEFINE(S_PC, offsetof(struct pt_regs, pc));
DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
BLANK();
DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
Expand Down
6 changes: 6 additions & 0 deletions arch/arm64/kernel/cpu_errata.c
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
MIDR_RANGE(MIDR_THUNDERX, 0x00,
(1 << MIDR_VARIANT_SHIFT) | 1),
},
{
/* Cavium ThunderX, T81 pass 1.0 */
.desc = "Cavium erratum 27456",
.capability = ARM64_WORKAROUND_CAVIUM_27456,
MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
},
#endif
{
}
Expand Down
19 changes: 17 additions & 2 deletions arch/arm64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/irq.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/unistd.h>

Expand Down Expand Up @@ -97,7 +98,14 @@
mov x29, xzr // fp pointed to user-space
.else
add x21, sp, #S_FRAME_SIZE
.endif
get_thread_info tsk
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
ldr x20, [tsk, #TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #TASK_SIZE_64
str x20, [tsk, #TI_ADDR_LIMIT]
ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
.endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
stp lr, x21, [sp, #S_LR]
Expand Down Expand Up @@ -128,6 +136,14 @@
.endm

.macro kernel_exit, el
.if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
str x20, [tsk, #TI_ADDR_LIMIT]

/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif

ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
Expand Down Expand Up @@ -406,7 +422,6 @@ el1_irq:
bl trace_hardirqs_off
#endif

get_thread_info tsk
irq_handler

#ifdef CONFIG_PREEMPT
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
}

if (permission_fault(esr) && (addr < USER_DS)) {
if (get_fs() == KERNEL_DS)
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);

if (!search_exception_tables(regs->pc))
Expand Down
5 changes: 0 additions & 5 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -294,11 +294,6 @@ config X86_32_LAZY_GS
def_bool y
depends on X86_32 && !CC_STACKPROTECTOR

config ARCH_HWEIGHT_CFLAGS
string
default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64

config ARCH_SUPPORTS_UPROBES
def_bool y

Expand Down
8 changes: 5 additions & 3 deletions arch/x86/boot/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,16 @@
#define BOOT_BITOPS_H
#define _LINUX_BITOPS_H /* Inhibit inclusion of <linux/bitops.h> */

static inline int constant_test_bit(int nr, const void *addr)
#include <linux/types.h>

static inline bool constant_test_bit(int nr, const void *addr)
{
const u32 *p = (const u32 *)addr;
return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0;
}
static inline int variable_test_bit(int nr, const void *addr)
static inline bool variable_test_bit(int nr, const void *addr)
{
u8 v;
bool v;
const u32 *p = (const u32 *)addr;

asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
Expand Down
17 changes: 9 additions & 8 deletions arch/x86/boot/boot.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <linux/types.h>
#include <linux/edd.h>
#include <asm/setup.h>
#include <asm/asm.h>
#include "bitops.h"
#include "ctype.h"
#include "cpuflags.h"
Expand Down Expand Up @@ -176,18 +177,18 @@ static inline void wrgs32(u32 v, addr_t addr)
}

/* Note: these only return true/false, not a signed return value! */
static inline int memcmp_fs(const void *s1, addr_t s2, size_t len)
static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len)
{
u8 diff;
asm volatile("fs; repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
bool diff;
asm volatile("fs; repe; cmpsb" CC_SET(nz)
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
static inline int memcmp_gs(const void *s1, addr_t s2, size_t len)
static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len)
{
u8 diff;
asm volatile("gs; repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
bool diff;
asm volatile("gs; repe; cmpsb" CC_SET(nz)
: CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/boot/string.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

int memcmp(const void *s1, const void *s2, size_t len)
{
u8 diff;
bool diff;
asm("repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/entry/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,10 @@ static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)

#ifdef CONFIG_CONTEXT_TRACKING
/* Called on entry from user mode with IRQs off. */
__visible void enter_from_user_mode(void)
__visible inline void enter_from_user_mode(void)
{
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit();
user_exit_irqoff();
}
#else
static inline void enter_from_user_mode(void) {}
Expand Down Expand Up @@ -274,7 +274,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
ti->status &= ~TS_COMPAT;
#endif

user_enter();
user_enter_irqoff();
}

#define SYSCALL_EXIT_WORK_FLAGS \
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/entry/thunk_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
.endif

call \func
jmp restore
jmp .L_restore
_ASM_NOKPROBE(\name)
.endm

Expand All @@ -54,7 +54,7 @@
#if defined(CONFIG_TRACE_IRQFLAGS) \
|| defined(CONFIG_DEBUG_LOCK_ALLOC) \
|| defined(CONFIG_PREEMPT)
restore:
.L_restore:
popq %r11
popq %r10
popq %r9
Expand All @@ -66,5 +66,5 @@ restore:
popq %rdi
popq %rbp
ret
_ASM_NOKPROBE(restore)
_ASM_NOKPROBE(.L_restore)
#endif
5 changes: 3 additions & 2 deletions arch/x86/entry/vdso/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
override obj-dirs = $(dir $(obj)) $(obj)/vdso32/

targets += vdso32/vdso32.lds
targets += vdso32/note.o vdso32/vclock_gettime.o vdso32/system_call.o
targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
targets += vdso32/vclock_gettime.o

KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
Expand All @@ -156,7 +156,8 @@ $(obj)/vdso32.so.dbg: FORCE \
$(obj)/vdso32/vdso32.lds \
$(obj)/vdso32/vclock_gettime.o \
$(obj)/vdso32/note.o \
$(obj)/vdso32/system_call.o
$(obj)/vdso32/system_call.o \
$(obj)/vdso32/sigreturn.o
$(call if_changed,vdso)

#
Expand Down
8 changes: 0 additions & 8 deletions arch/x86/entry/vdso/vdso32/sigreturn.S
Original file line number Diff line number Diff line change
@@ -1,11 +1,3 @@
/*
* Common code for the sigreturn entry points in vDSO images.
* So far this code is the same for both int80 and sysenter versions.
* This file is #include'd by int80.S et al to define them first thing.
* The kernel assumes that the addresses of these routines are constant
* for all vDSO implementations.
*/

#include <linux/linkage.h>
#include <asm/unistd_32.h>
#include <asm/asm-offsets.h>
Expand Down
7 changes: 1 addition & 6 deletions arch/x86/entry/vdso/vdso32/system_call.S
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,11 @@
* AT_SYSINFO entry point
*/

#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>

/*
* First get the common code for the sigreturn entry points.
* This must come first.
*/
#include "sigreturn.S"

.text
.globl __kernel_vsyscall
.type __kernel_vsyscall,@function
Expand Down
11 changes: 6 additions & 5 deletions arch/x86/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2319,7 +2319,7 @@ void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct stack_frame frame;
const void __user *fp;
const unsigned long __user *fp;

if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
Expand All @@ -2332,7 +2332,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
return;

fp = (void __user *)regs->bp;
fp = (unsigned long __user *)regs->bp;

perf_callchain_store(entry, regs->ip);

Expand All @@ -2345,16 +2345,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
pagefault_disable();
while (entry->nr < entry->max_stack) {
unsigned long bytes;

frame.next_frame = NULL;
frame.return_address = 0;

if (!access_ok(VERIFY_READ, fp, 16))
if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
break;

bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
if (bytes != 0)
break;
bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
if (bytes != 0)
break;

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/events/intel/Makefile
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o cqm.o
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl.o
intel-rapl-objs := rapl.o
obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL) += intel-rapl-perf.o
intel-rapl-perf-objs := rapl.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
Expand Down
Loading

0 comments on commit 38452af

Please sign in to comment.