Skip to content

Commit

Permalink
Merge tag 'x86_cpu_for_v6.2' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/tip/tip

Pull x86 cpu updates from Borislav Petkov:

 - Split MTRR and PAT init code to accomodate at least Xen PV and TDX
   guests which do not get MTRRs exposed but only PAT. (TDX guests do
   not support the cache disabling dance when setting up MTRRs so they
   fall under the same category)

   This is a cleanup work to remove all the ugly workarounds for such
   guests and init things separately (Juergen Gross)

 - Add two new Intel CPUs to the list of CPUs with "normal" Energy
   Performance Bias, leading to power savings

 - Do not do bus master arbitration in C3 (ARB_DISABLE) on modern
   Centaur CPUs

* tag 'x86_cpu_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits)
  x86/mtrr: Make message for disabled MTRRs more descriptive
  x86/pat: Handle TDX guest PAT initialization
  x86/cpuid: Carve out all CPUID functionality
  x86/cpu: Switch to cpu_feature_enabled() for X86_FEATURE_XENPV
  x86/cpu: Remove X86_FEATURE_XENPV usage in setup_cpu_entry_area()
  x86/cpu: Drop 32-bit Xen PV guest code in update_task_stack()
  x86/cpu: Remove unneeded 64-bit dependency in arch_enter_from_user_mode()
  x86/cpufeatures: Add X86_FEATURE_XENPV to disabled-features.h
  x86/acpi/cstate: Optimize ARB_DISABLE on Centaur CPUs
  x86/mtrr: Simplify mtrr_ops initialization
  x86/cacheinfo: Switch cache_ap_init() to hotplug callback
  x86: Decouple PAT and MTRR handling
  x86/mtrr: Add a stop_machine() handler calling only cache_cpu_init()
  x86/mtrr: Let cache_aps_delayed_init replace mtrr_aps_delayed_init
  x86/mtrr: Get rid of __mtrr_enabled bool
  x86/mtrr: Simplify mtrr_bp_init()
  x86/mtrr: Remove set_all callback from struct mtrr_ops
  x86/mtrr: Disentangle MTRR init from PAT init
  x86/mtrr: Move cache control code to cacheinfo.c
  x86/mtrr: Split MTRR-specific handling from cache dis/enabling
  ...
  • Loading branch information
torvalds committed Dec 13, 2022
2 parents 4eb77fa + 7882b69 commit 3ef3ace
Show file tree
Hide file tree
Showing 30 changed files with 538 additions and 662 deletions.
13 changes: 13 additions & 0 deletions arch/x86/include/asm/cacheinfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,20 @@
#ifndef _ASM_X86_CACHEINFO_H
#define _ASM_X86_CACHEINFO_H

/* Kernel controls MTRR and/or PAT MSRs. */
extern unsigned int memory_caching_control;
#define CACHE_MTRR 0x01
#define CACHE_PAT 0x02

void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);

void cache_disable(void);
void cache_enable(void);
void set_cache_aps_delayed_init(bool val);
bool get_cache_aps_delayed_init(void);
void cache_bp_init(void);
void cache_bp_restore(void);
void cache_aps_init(void);

#endif /* _ASM_X86_CACHEINFO_H */
141 changes: 139 additions & 2 deletions arch/x86/include/asm/cpuid.h
Original file line number Diff line number Diff line change
@@ -1,13 +1,132 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* CPUID-related helpers/definitions
*
* Derived from arch/x86/kvm/cpuid.c
*/

#ifndef _ASM_X86_CPUID_H
#define _ASM_X86_CPUID_H

#include <asm/string.h>

struct cpuid_regs {
u32 eax, ebx, ecx, edx;
};

enum cpuid_regs_idx {
CPUID_EAX = 0,
CPUID_EBX,
CPUID_ECX,
CPUID_EDX,
};

#ifdef CONFIG_X86_32
extern int have_cpuid_p(void);
#else
static inline int have_cpuid_p(void)
{
return 1;
}
#endif
static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
/* ecx is often an input as well as an output. */
asm volatile("cpuid"
: "=a" (*eax),
"=b" (*ebx),
"=c" (*ecx),
"=d" (*edx)
: "0" (*eax), "2" (*ecx)
: "memory");
}

#define native_cpuid_reg(reg) \
static inline unsigned int native_cpuid_##reg(unsigned int op) \
{ \
unsigned int eax = op, ebx, ecx = 0, edx; \
\
native_cpuid(&eax, &ebx, &ecx, &edx); \
\
return reg; \
}

/*
* Native CPUID functions returning a single datum.
*/
native_cpuid_reg(eax)
native_cpuid_reg(ebx)
native_cpuid_reg(ecx)
native_cpuid_reg(edx)

#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
#define __cpuid native_cpuid
#endif

/*
* Generic CPUID function
* clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
* resulting in stale register contents being returned.
*/
static inline void cpuid(unsigned int op,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = 0;
__cpuid(eax, ebx, ecx, edx);
}

/* Some CPUID calls want 'count' to be placed in ecx */
static inline void cpuid_count(unsigned int op, int count,
unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx)
{
*eax = op;
*ecx = count;
__cpuid(eax, ebx, ecx, edx);
}

/*
* CPUID functions returning a single datum
*/
static inline unsigned int cpuid_eax(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;

cpuid(op, &eax, &ebx, &ecx, &edx);

return eax;
}

static inline unsigned int cpuid_ebx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;

cpuid(op, &eax, &ebx, &ecx, &edx);

return ebx;
}

static inline unsigned int cpuid_ecx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;

cpuid(op, &eax, &ebx, &ecx, &edx);

return ecx;
}

static inline unsigned int cpuid_edx(unsigned int op)
{
unsigned int eax, ebx, ecx, edx;

cpuid(op, &eax, &ebx, &ecx, &edx);

return edx;
}

static __always_inline bool cpuid_function_is_indexed(u32 function)
{
switch (function) {
Expand All @@ -31,4 +150,22 @@ static __always_inline bool cpuid_function_is_indexed(u32 function)
return false;
}

#define for_each_possible_hypervisor_cpuid_base(function) \
for (function = 0x40000000; function < 0x40010000; function += 0x100)

static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
{
uint32_t base, eax, signature[3];

for_each_possible_hypervisor_cpuid_base(base) {
cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);

if (!memcmp(sig, signature, 12) &&
(leaves == 0 || ((eax - base) >= leaves)))
return base;
}

return 0;
}

#endif /* _ASM_X86_CPUID_H */
8 changes: 7 additions & 1 deletion arch/x86/include/asm/disabled-features.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,12 @@
# define DISABLE_SGX (1 << (X86_FEATURE_SGX & 31))
#endif

#ifdef CONFIG_XEN_PV
# define DISABLE_XENPV 0
#else
# define DISABLE_XENPV (1 << (X86_FEATURE_XENPV & 31))
#endif

#ifdef CONFIG_INTEL_TDX_GUEST
# define DISABLE_TDX_GUEST 0
#else
Expand All @@ -98,7 +104,7 @@
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 (DISABLE_TDX_GUEST)
#define DISABLED_MASK8 (DISABLE_XENPV|DISABLE_TDX_GUEST)
#define DISABLED_MASK9 (DISABLE_SGX)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET)
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/entry-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs)
/*
* For !SMAP hardware we patch out CLAC on entry.
*/
if (boot_cpu_has(X86_FEATURE_SMAP) ||
(IS_ENABLED(CONFIG_64BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
if (cpu_feature_enabled(X86_FEATURE_SMAP) ||
cpu_feature_enabled(X86_FEATURE_XENPV))
mask |= X86_EFLAGS_AC;

WARN_ON_ONCE(flags & mask);
Expand Down
5 changes: 2 additions & 3 deletions arch/x86/include/asm/memtype.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,8 @@
#include <asm/pgtable_types.h>

extern bool pat_enabled(void);
extern void pat_disable(const char *reason);
extern void pat_init(void);
extern void init_cache_modes(void);
extern void pat_bp_init(void);
extern void pat_cpu_init(void);

extern int memtype_reserve(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/msr-index.h
Original file line number Diff line number Diff line change
Expand Up @@ -793,6 +793,7 @@
#define ENERGY_PERF_BIAS_PERFORMANCE 0
#define ENERGY_PERF_BIAS_BALANCE_PERFORMANCE 4
#define ENERGY_PERF_BIAS_NORMAL 6
#define ENERGY_PERF_BIAS_NORMAL_POWERSAVE 7
#define ENERGY_PERF_BIAS_BALANCE_POWERSAVE 8
#define ENERGY_PERF_BIAS_POWERSAVE 15

Expand Down
16 changes: 8 additions & 8 deletions arch/x86/include/asm/mtrr.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,12 @@

#include <uapi/asm/mtrr.h>

void mtrr_bp_init(void);

/*
* The following functions are for use by other drivers that cannot use
* arch_phys_wc_add and arch_phys_wc_del.
*/
# ifdef CONFIG_MTRR
void mtrr_bp_init(void);
extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
extern void mtrr_save_fixed_ranges(void *);
extern void mtrr_save_state(void);
Expand All @@ -42,12 +41,12 @@ extern int mtrr_add_page(unsigned long base, unsigned long size,
extern int mtrr_del(int reg, unsigned long base, unsigned long size);
extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
extern void mtrr_ap_init(void);
extern void set_mtrr_aps_delayed_init(void);
extern void mtrr_aps_init(void);
extern void mtrr_bp_restore(void);
extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
extern int amd_special_default_mtrr(void);
void mtrr_disable(void);
void mtrr_enable(void);
void mtrr_generic_set_state(void);
# else
static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
{
Expand Down Expand Up @@ -83,10 +82,11 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
{
}
#define mtrr_ap_init() do {} while (0)
#define set_mtrr_aps_delayed_init() do {} while (0)
#define mtrr_aps_init() do {} while (0)
#define mtrr_bp_init() do {} while (0)
#define mtrr_bp_restore() do {} while (0)
#define mtrr_disable() do {} while (0)
#define mtrr_enable() do {} while (0)
#define mtrr_generic_set_state() do {} while (0)
# endif

#ifdef CONFIG_COMPAT
Expand Down
Loading

0 comments on commit 3ef3ace

Please sign in to comment.