Skip to content

Commit

Permalink
x86/microcode/32: Move early loading after paging enable
Browse files Browse the repository at this point in the history
32-bit loads microcode before paging is enabled. The commit which
introduced that has zero justification in the changelog. The cover
letter has slightly more content, but it does not give any technical
justification either:

  "The problem in current microcode loading method is that we load a
   microcode way, way too late; ideally we should load it before turning
   paging on.  This may only be practical on 32 bits since we can't get
   to 64-bit mode without paging on, but we should still do it as early
   as at all possible."

Handwaving word salad with zero technical content.

Someone claimed in an offlist conversation that this is required for
curing the ATOM erratum AAE44/AAF40/AAG38/AAH41. That erratum requires
an microcode update in order to make the usage of PSE safe. But during
early boot, PSE is completely irrelevant and it is evaluated way later.

Neither is it relevant for the AP on single core HT enabled CPUs as the
microcode loading on the AP is not doing anything.

On dual core CPUs there is a theoretical problem if a split of an
executable large page between enabling paging including PSE and loading
the microcode happens. But that's only theoretical, it's practically
irrelevant because the affected dual core CPUs are 64bit enabled and
therefore have paging and PSE enabled before loading the microcode on
the second core. So why would it work on 64-bit but not on 32-bit?

The erratum:

  "AAG38 Code Fetch May Occur to Incorrect Address After a Large Page is
   Split Into 4-Kbyte Pages

   Problem: If software clears the PS (page size) bit in a present PDE
   (page directory entry), that will cause linear addresses mapped through
   this PDE to use 4-KByte pages instead of using a large page after old
   TLB entries are invalidated. Due to this erratum, if a code fetch uses
   this PDE before the TLB entry for the large page is invalidated then it
   may fetch from a different physical address than specified by either the
   old large page translation or the new 4-KByte page translation. This
   erratum may also cause speculative code fetches from incorrect addresses."

The practical relevance for this is exactly zero because there is no
splitting of large text pages during early boot-time, i.e. between paging
enable and microcode loading, and neither during CPU hotplug.

IOW, this load microcode before paging enable is yet another voodoo
programming solution in search of a problem. What's worse is that it causes
at least two serious problems:

 1) When stackprotector is enabled, the microcode loader code has the
    stackprotector mechanics enabled. The read from the per CPU variable
    __stack_chk_guard is always accessing the virtual address either
    directly on UP or via %fs on SMP. In physical address mode this
    results in an access to memory above 3GB. So this works by chance as
    the hardware returns the same value when there is no RAM at this
    physical address. When there is RAM populated above 3G then the read
    is by chance the same as nothing changes that memory during the very
    early boot stage. That's not necessarily true during runtime CPU
    hotplug.

 2) When function tracing is enabled, the relevant microcode loader
    functions and the functions invoked from there will call into the
    tracing code and evaluate global and per CPU variables in physical
    address mode. What could potentially go wrong?

Cure this and move the microcode loading after the early paging enable, use
the new temporary initrd mapping and remove the gunk in the microcode
loader which is required to handle physical address mode.

Signed-off-by: Thomas Gleixner <[email protected]>
Signed-off-by: Borislav Petkov (AMD) <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
  • Loading branch information
KAGA-KOKO authored and bp3tk0v committed Oct 18, 2023
1 parent 4c585af commit 0b62f6c
Show file tree
Hide file tree
Showing 9 changed files with 71 additions and 270 deletions.
5 changes: 0 additions & 5 deletions arch/x86/include/asm/microcode.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,11 +70,6 @@ static inline u32 intel_get_microcode_revision(void)

return rev;
}

void show_ucode_info_early(void);

#else /* CONFIG_CPU_SUP_INTEL */
static inline void show_ucode_info_early(void) { }
#endif /* !CONFIG_CPU_SUP_INTEL */

#endif /* _ASM_X86_MICROCODE_H */
12 changes: 0 additions & 12 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -2166,8 +2166,6 @@ static inline void setup_getcpu(int cpu)
}

#ifdef CONFIG_X86_64
static inline void ucode_cpu_init(int cpu) { }

static inline void tss_setup_ist(struct tss_struct *tss)
{
/* Set up the per-CPU TSS IST stacks */
Expand All @@ -2178,16 +2176,8 @@ static inline void tss_setup_ist(struct tss_struct *tss)
/* Only mapped when SEV-ES is active */
tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
}

#else /* CONFIG_X86_64 */

static inline void ucode_cpu_init(int cpu)
{
show_ucode_info_early();
}

static inline void tss_setup_ist(struct tss_struct *tss) { }

#endif /* !CONFIG_X86_64 */

static inline void tss_setup_io_bitmap(struct tss_struct *tss)
Expand Down Expand Up @@ -2243,8 +2233,6 @@ void cpu_init(void)
struct task_struct *cur = current;
int cpu = raw_smp_processor_id();

ucode_cpu_init(cpu);

#ifdef CONFIG_NUMA
if (this_cpu_read(numa_node) == 0 &&
early_cpu_to_node(cpu) != NUMA_NO_NODE)
Expand Down
103 changes: 32 additions & 71 deletions arch/x86/kernel/cpu/microcode/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,24 +121,20 @@ static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)

/*
* Check whether there is a valid microcode container file at the beginning
* of @buf of size @buf_size. Set @early to use this function in the early path.
* of @buf of size @buf_size.
*/
static bool verify_container(const u8 *buf, size_t buf_size, bool early)
static bool verify_container(const u8 *buf, size_t buf_size)
{
u32 cont_magic;

if (buf_size <= CONTAINER_HDR_SZ) {
if (!early)
pr_debug("Truncated microcode container header.\n");

pr_debug("Truncated microcode container header.\n");
return false;
}

cont_magic = *(const u32 *)buf;
if (cont_magic != UCODE_MAGIC) {
if (!early)
pr_debug("Invalid magic value (0x%08x).\n", cont_magic);

pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
return false;
}

Expand All @@ -147,23 +143,20 @@ static bool verify_container(const u8 *buf, size_t buf_size, bool early)

/*
* Check whether there is a valid, non-truncated CPU equivalence table at the
* beginning of @buf of size @buf_size. Set @early to use this function in the
* early path.
* beginning of @buf of size @buf_size.
*/
static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
{
const u32 *hdr = (const u32 *)buf;
u32 cont_type, equiv_tbl_len;

if (!verify_container(buf, buf_size, early))
if (!verify_container(buf, buf_size))
return false;

cont_type = hdr[1];
if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
if (!early)
pr_debug("Wrong microcode container equivalence table type: %u.\n",
cont_type);

pr_debug("Wrong microcode container equivalence table type: %u.\n",
cont_type);
return false;
}

Expand All @@ -172,9 +165,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
equiv_tbl_len = hdr[2];
if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
buf_size < equiv_tbl_len) {
if (!early)
pr_debug("Truncated equivalence table.\n");

pr_debug("Truncated equivalence table.\n");
return false;
}

Expand All @@ -183,22 +174,19 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)

/*
* Check whether there is a valid, non-truncated microcode patch section at the
* beginning of @buf of size @buf_size. Set @early to use this function in the
* early path.
* beginning of @buf of size @buf_size.
*
* On success, @sh_psize returns the patch size according to the section header,
* to the caller.
*/
static bool
__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early)
__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
{
u32 p_type, p_size;
const u32 *hdr;

if (buf_size < SECTION_HDR_SIZE) {
if (!early)
pr_debug("Truncated patch section.\n");

pr_debug("Truncated patch section.\n");
return false;
}

Expand All @@ -207,17 +195,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early
p_size = hdr[1];

if (p_type != UCODE_UCODE_TYPE) {
if (!early)
pr_debug("Invalid type field (0x%x) in container file section header.\n",
p_type);

pr_debug("Invalid type field (0x%x) in container file section header.\n",
p_type);
return false;
}

if (p_size < sizeof(struct microcode_header_amd)) {
if (!early)
pr_debug("Patch of size %u too short.\n", p_size);

pr_debug("Patch of size %u too short.\n", p_size);
return false;
}

Expand Down Expand Up @@ -269,15 +253,15 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size
* 0: success
*/
static int
verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early)
verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
{
struct microcode_header_amd *mc_hdr;
unsigned int ret;
u32 sh_psize;
u16 proc_id;
u8 patch_fam;

if (!__verify_patch_section(buf, buf_size, &sh_psize, early))
if (!__verify_patch_section(buf, buf_size, &sh_psize))
return -1;

/*
Expand All @@ -292,25 +276,21 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea
* size sh_psize, as the section claims.
*/
if (buf_size < sh_psize) {
if (!early)
pr_debug("Patch of size %u truncated.\n", sh_psize);

pr_debug("Patch of size %u truncated.\n", sh_psize);
return -1;
}

ret = __verify_patch_size(family, sh_psize, buf_size);
if (!ret) {
if (!early)
pr_debug("Per-family patch size mismatch.\n");
pr_debug("Per-family patch size mismatch.\n");
return -1;
}

*patch_size = sh_psize;

mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE);
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
if (!early)
pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id);
return -1;
}

Expand All @@ -337,7 +317,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
u16 eq_id;
u8 *buf;

if (!verify_equivalence_table(ucode, size, true))
if (!verify_equivalence_table(ucode, size))
return 0;

buf = ucode;
Expand All @@ -364,7 +344,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
u32 patch_size;
int ret;

ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true);
ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size);
if (ret < 0) {
/*
* Patch verification failed, skip to the next container, if
Expand Down Expand Up @@ -456,14 +436,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
{
struct cont_desc desc = { 0 };
struct microcode_amd *mc;
u32 rev, dummy, *new_rev;
bool ret = false;

#ifdef CONFIG_X86_32
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
#else
new_rev = &ucode_new_rev;
#endif
u32 rev, dummy;

desc.cpuid_1_eax = cpuid_1_eax;

Expand All @@ -484,8 +458,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size)
return ret;

if (!__apply_microcode_amd(mc)) {
*new_rev = mc->hdr.patch_id;
ret = true;
ucode_new_rev = mc->hdr.patch_id;
ret = true;
}

return ret;
Expand Down Expand Up @@ -514,26 +488,13 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)

static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
{
struct ucode_cpu_info *uci;
struct cpio_data cp;
const char *path;
bool use_pa;

if (IS_ENABLED(CONFIG_X86_32)) {
uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
path = (const char *)__pa_nodebug(ucode_path);
use_pa = true;
} else {
uci = ucode_cpu_info;
path = ucode_path;
use_pa = false;
}

if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
cp = find_microcode_in_initrd(path, use_pa);
cp = find_microcode_in_initrd(ucode_path);

/* Needed in load_microcode_amd() */
uci->cpu_sig.sig = cpuid_1_eax;
ucode_cpu_info->cpu_sig.sig = cpuid_1_eax;

*ret = cp;
}
Expand Down Expand Up @@ -562,7 +523,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
enum ucode_state ret;
struct cpio_data cp;

cp = find_microcode_in_initrd(ucode_path, false);
cp = find_microcode_in_initrd(ucode_path);
if (!(cp.data && cp.size))
return -EINVAL;

Expand Down Expand Up @@ -738,7 +699,7 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
u32 equiv_tbl_len;
const u32 *hdr;

if (!verify_equivalence_table(buf, buf_size, false))
if (!verify_equivalence_table(buf, buf_size))
return 0;

hdr = (const u32 *)buf;
Expand Down Expand Up @@ -784,7 +745,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
u16 proc_id;
int ret;

ret = verify_patch(family, fw, leftover, patch_size, false);
ret = verify_patch(family, fw, leftover, patch_size);
if (ret)
return ret;

Expand Down Expand Up @@ -918,7 +879,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device)
}

ret = UCODE_ERROR;
if (!verify_container(fw->data, fw->size, false))
if (!verify_container(fw->data, fw->size))
goto fw_release;

ret = load_microcode_amd(c->x86, fw->data, fw->size);
Expand Down
Loading

0 comments on commit 0b62f6c

Please sign in to comment.