Skip to content

Commit

Permalink
Merge branch 'x86/mm' into efi/core, to pick up dependencies
Browse files Browse the repository at this point in the history
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Ingo Molnar committed Jan 10, 2020
2 parents 02df083 + 186525b commit 57ad87d
Show file tree
Hide file tree
Showing 67 changed files with 797 additions and 681 deletions.
4 changes: 4 additions & 0 deletions arch/alpha/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_ALPHA_VMALLOC_H
#define _ASM_ALPHA_VMALLOC_H

#endif /* _ASM_ALPHA_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/arc/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_ARC_VMALLOC_H
#define _ASM_ARC_VMALLOC_H

#endif /* _ASM_ARC_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/arm/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_ARM_VMALLOC_H
#define _ASM_ARM_VMALLOC_H

#endif /* _ASM_ARM_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/arm64/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_ARM64_VMALLOC_H
#define _ASM_ARM64_VMALLOC_H

#endif /* _ASM_ARM64_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/c6x/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_C6X_VMALLOC_H
#define _ASM_C6X_VMALLOC_H

#endif /* _ASM_C6X_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/csky/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_CSKY_VMALLOC_H
#define _ASM_CSKY_VMALLOC_H

#endif /* _ASM_CSKY_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/h8300/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_H8300_VMALLOC_H
#define _ASM_H8300_VMALLOC_H

#endif /* _ASM_H8300_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/hexagon/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_HEXAGON_VMALLOC_H
#define _ASM_HEXAGON_VMALLOC_H

#endif /* _ASM_HEXAGON_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/ia64/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_IA64_VMALLOC_H
#define _ASM_IA64_VMALLOC_H

#endif /* _ASM_IA64_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/m68k/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_M68K_VMALLOC_H
#define _ASM_M68K_VMALLOC_H

#endif /* _ASM_M68K_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/microblaze/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_MICROBLAZE_VMALLOC_H
#define _ASM_MICROBLAZE_VMALLOC_H

#endif /* _ASM_MICROBLAZE_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/mips/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_MIPS_VMALLOC_H
#define _ASM_MIPS_VMALLOC_H

#endif /* _ASM_MIPS_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/nds32/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_NDS32_VMALLOC_H
#define _ASM_NDS32_VMALLOC_H

#endif /* _ASM_NDS32_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/nios2/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_NIOS2_VMALLOC_H
#define _ASM_NIOS2_VMALLOC_H

#endif /* _ASM_NIOS2_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/openrisc/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_OPENRISC_VMALLOC_H
#define _ASM_OPENRISC_VMALLOC_H

#endif /* _ASM_OPENRISC_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/parisc/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_PARISC_VMALLOC_H
#define _ASM_PARISC_VMALLOC_H

#endif /* _ASM_PARISC_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/powerpc/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_POWERPC_VMALLOC_H
#define _ASM_POWERPC_VMALLOC_H

#endif /* _ASM_POWERPC_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/riscv/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_RISCV_VMALLOC_H
#define _ASM_RISCV_VMALLOC_H

#endif /* _ASM_RISCV_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/s390/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_S390_VMALLOC_H
#define _ASM_S390_VMALLOC_H

#endif /* _ASM_S390_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/sh/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_SH_VMALLOC_H
#define _ASM_SH_VMALLOC_H

#endif /* _ASM_SH_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/sparc/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_SPARC_VMALLOC_H
#define _ASM_SPARC_VMALLOC_H

#endif /* _ASM_SPARC_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/um/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_UM_VMALLOC_H
#define _ASM_UM_VMALLOC_H

#endif /* _ASM_UM_VMALLOC_H */
4 changes: 4 additions & 0 deletions arch/unicore32/include/asm/vmalloc.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#ifndef _ASM_UNICORE32_VMALLOC_H
#define _ASM_UNICORE32_VMALLOC_H

#endif /* _ASM_UNICORE32_VMALLOC_H */
2 changes: 1 addition & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1512,7 +1512,7 @@ config X86_CPA_STATISTICS
bool "Enable statistic for Change Page Attribute"
depends on DEBUG_FS
---help---
Expose statistics about the Change Page Attribute mechanims, which
Expose statistics about the Change Page Attribute mechanism, which
helps to determine the effectiveness of preserving large and huge
page mappings when mapping protections are changed.

Expand Down
10 changes: 1 addition & 9 deletions arch/x86/include/asm/cpu_entry_area.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
#include <linux/percpu-defs.h>
#include <asm/processor.h>
#include <asm/intel_ds.h>
#include <asm/pgtable_areas.h>

#ifdef CONFIG_X86_64

Expand Down Expand Up @@ -134,15 +135,6 @@ DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
extern void setup_cpu_entry_areas(void);
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);

/* Single page reserved for the readonly IDT mapping: */
#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)

#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)

#define CPU_ENTRY_AREA_MAP_SIZE \
(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)

extern struct cpu_entry_area *get_cpu_entry_area(int cpu);

static inline struct entry_stack *cpu_entry_stack(int cpu)
Expand Down
27 changes: 27 additions & 0 deletions arch/x86/include/asm/memtype.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_X86_MEMTYPE_H
#define _ASM_X86_MEMTYPE_H

#include <linux/types.h>
#include <asm/pgtable_types.h>

extern bool pat_enabled(void);
extern void pat_disable(const char *reason);
extern void pat_init(void);
extern void init_cache_modes(void);

extern int memtype_reserve(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
extern int memtype_free(u64 start, u64 end);

extern int memtype_kernel_map_sync(u64 base, unsigned long size,
enum page_cache_mode pcm);

extern int memtype_reserve_io(resource_size_t start, resource_size_t end,
enum page_cache_mode *pcm);

extern void memtype_free_io(resource_size_t start, resource_size_t end);

extern bool pat_pfn_immune_to_uc_mtrr(unsigned long pfn);

#endif /* _ASM_X86_MEMTYPE_H */
86 changes: 6 additions & 80 deletions arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,14 +69,6 @@ struct ldt_struct {
int slot;
};

/* This is a multiple of PAGE_SIZE. */
#define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)

static inline void *ldt_slot_va(int slot)
{
return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
}

/*
* Used for LDT copy/destruction.
*/
Expand All @@ -99,87 +91,21 @@ static inline void destroy_context_ldt(struct mm_struct *mm) { }
static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
#endif

static inline void load_mm_ldt(struct mm_struct *mm)
{
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;

/* READ_ONCE synchronizes with smp_store_release */
ldt = READ_ONCE(mm->context.ldt);

/*
* Any change to mm->context.ldt is followed by an IPI to all
* CPUs with the mm active. The LDT will not be freed until
* after the IPI is handled by all such CPUs. This means that,
* if the ldt_struct changes before we return, the values we see
* will be safe, and the new values will be loaded before we run
* any user code.
*
* NB: don't try to convert this to use RCU without extreme care.
* We would still need IRQs off, because we don't want to change
* the local LDT after an IPI loaded a newer value than the one
* that we can see.
*/

if (unlikely(ldt)) {
if (static_cpu_has(X86_FEATURE_PTI)) {
if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
/*
* Whoops -- either the new LDT isn't mapped
* (if slot == -1) or is mapped into a bogus
* slot (if slot > 1).
*/
clear_LDT();
return;
}

/*
* If page table isolation is enabled, ldt->entries
* will not be mapped in the userspace pagetables.
* Tell the CPU to access the LDT through the alias
* at ldt_slot_va(ldt->slot).
*/
set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
} else {
set_ldt(ldt->entries, ldt->nr_entries);
}
} else {
clear_LDT();
}
extern void load_mm_ldt(struct mm_struct *mm);
extern void switch_ldt(struct mm_struct *prev, struct mm_struct *next);
#else
static inline void load_mm_ldt(struct mm_struct *mm)
{
clear_LDT();
#endif
}

static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
{
#ifdef CONFIG_MODIFY_LDT_SYSCALL
/*
* Load the LDT if either the old or new mm had an LDT.
*
* An mm will never go from having an LDT to not having an LDT. Two
* mms never share an LDT, so we don't gain anything by checking to
* see whether the LDT changed. There's also no guarantee that
* prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
* then prev->context.ldt will also be non-NULL.
*
* If we really cared, we could optimize the case where prev == next
* and we're exiting lazy mode. Most of the time, if this happens,
* we don't actually need to reload LDTR, but modify_ldt() is mostly
* used by legacy code and emulators where we don't need this level of
* performance.
*
* This uses | instead of || because it generates better code.
*/
if (unlikely((unsigned long)prev->context.ldt |
(unsigned long)next->context.ldt))
load_mm_ldt(next);
#endif

DEBUG_LOCKS_WARN_ON(preemptible());
}
#endif

void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);

/*
* Init a new mm. Used on mm copies, like at fork()
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/include/asm/mtrr.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#define _ASM_X86_MTRR_H

#include <uapi/asm/mtrr.h>
#include <asm/pat.h>
#include <asm/memtype.h>


/*
Expand Down Expand Up @@ -86,7 +86,7 @@ static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
}
static inline void mtrr_bp_init(void)
{
pat_disable("MTRRs disabled, skipping PAT initialization too.");
pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel.");
}

#define mtrr_ap_init() do {} while (0)
Expand Down
27 changes: 0 additions & 27 deletions arch/x86/include/asm/pat.h

This file was deleted.

2 changes: 1 addition & 1 deletion arch/x86/include/asm/pci.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
#include <linux/scatterlist.h>
#include <linux/numa.h>
#include <asm/io.h>
#include <asm/pat.h>
#include <asm/memtype.h>
#include <asm/x86_init.h>

struct pci_sysdata {
Expand Down
53 changes: 53 additions & 0 deletions arch/x86/include/asm/pgtable_32_areas.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
#ifndef _ASM_X86_PGTABLE_32_AREAS_H
#define _ASM_X86_PGTABLE_32_AREAS_H

#include <asm/cpu_entry_area.h>

/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
* current 8MB value just means that there will be a 8MB "hole" after the
* physical memory until the kernel virtual memory starts. That means that
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)

#ifndef __ASSEMBLY__
extern bool __vmalloc_start_set; /* set once high_memory is set */
#endif

#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
#endif

#define CPU_ENTRY_AREA_PAGES (NR_CPUS * DIV_ROUND_UP(sizeof(struct cpu_entry_area), PAGE_SIZE))

/* The +1 is for the readonly IDT page: */
#define CPU_ENTRY_AREA_BASE \
((FIXADDR_TOT_START - PAGE_SIZE*(CPU_ENTRY_AREA_PAGES+1)) & PMD_MASK)

#define LDT_BASE_ADDR \
((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)

#define LDT_END_ADDR (LDT_BASE_ADDR + PMD_SIZE)

#define PKMAP_BASE \
((LDT_BASE_ADDR - PAGE_SIZE) & PMD_MASK)

#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
#else
# define VMALLOC_END (LDT_BASE_ADDR - 2 * PAGE_SIZE)
#endif

#define MODULES_VADDR VMALLOC_START
#define MODULES_END VMALLOC_END
#define MODULES_LEN (MODULES_VADDR - MODULES_END)

#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)

#endif /* _ASM_X86_PGTABLE_32_AREAS_H */
Loading

0 comments on commit 57ad87d

Please sign in to comment.