Skip to content

Commit

Permalink
Revert "x86, mm: Include the entire kernel memory map in trampoline_pgd"
Browse files Browse the repository at this point in the history
This reverts commit 53b87cf.

It causes odd bootup problems on x86-64.  Markus Trippelsdorf gets a
repeatable oops, and I see a non-repeatable oops (or constant stream of
messages that scroll off too quickly to read) that seems to go away with
this commit reverted.

So we don't know exactly what is wrong with the commit, but it's
definitely problematic, and worth reverting sooner rather than later.

Bisected-by: Markus Trippelsdorf <[email protected]>
Cc: H Peter Anvin <[email protected]>
Cc: Jan Beulich <[email protected]>
Cc: Matt Fleming <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
torvalds committed Dec 15, 2012
1 parent 7a280cf commit be354f4
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 128 deletions.
9 changes: 1 addition & 8 deletions arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,13 +108,13 @@ void sync_global_pgds(unsigned long start, unsigned long end)
for (address = start; address <= end; address += PGDIR_SIZE) {
const pgd_t *pgd_ref = pgd_offset_k(address);
struct page *page;
pgd_t *pgd;

if (pgd_none(*pgd_ref))
continue;

spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
pgd_t *pgd;
spinlock_t *pgt_lock;

pgd = (pgd_t *)page_address(page) + pgd_index(address);
Expand All @@ -130,13 +130,6 @@ void sync_global_pgds(unsigned long start, unsigned long end)

spin_unlock(pgt_lock);
}

pgd = __va(real_mode_header->trampoline_pgd);
pgd += pgd_index(address);

if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);

spin_unlock(&pgd_lock);
}
}
Expand Down
105 changes: 0 additions & 105 deletions arch/x86/mm/ioremap.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,107 +50,6 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
return err;
}

#ifdef CONFIG_X86_64
static void ident_pte_range(unsigned long paddr, unsigned long vaddr,
pmd_t *ppmd, pmd_t *vpmd, unsigned long end)
{
pte_t *ppte = pte_offset_kernel(ppmd, paddr);
pte_t *vpte = pte_offset_kernel(vpmd, vaddr);

do {
set_pte(ppte, *vpte);
} while (ppte++, vpte++, vaddr += PAGE_SIZE, vaddr != end);
}

static int ident_pmd_range(unsigned long paddr, unsigned long vaddr,
pud_t *ppud, pud_t *vpud, unsigned long end)
{
pmd_t *ppmd = pmd_offset(ppud, paddr);
pmd_t *vpmd = pmd_offset(vpud, vaddr);
unsigned long next;

do {
next = pmd_addr_end(vaddr, end);

if (!pmd_present(*ppmd)) {
pte_t *ppte = (pte_t *)get_zeroed_page(GFP_KERNEL);
if (!ppte)
return 1;

set_pmd(ppmd, __pmd(_KERNPG_TABLE | __pa(ppte)));
}

ident_pte_range(paddr, vaddr, ppmd, vpmd, next);
} while (ppmd++, vpmd++, vaddr = next, vaddr != end);

return 0;
}

static int ident_pud_range(unsigned long paddr, unsigned long vaddr,
pgd_t *ppgd, pgd_t *vpgd, unsigned long end)
{
pud_t *ppud = pud_offset(ppgd, paddr);
pud_t *vpud = pud_offset(vpgd, vaddr);
unsigned long next;

do {
next = pud_addr_end(vaddr, end);

if (!pud_present(*ppud)) {
pmd_t *ppmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
if (!ppmd)
return 1;

set_pud(ppud, __pud(_KERNPG_TABLE | __pa(ppmd)));
}

if (ident_pmd_range(paddr, vaddr, ppud, vpud, next))
return 1;
} while (ppud++, vpud++, vaddr = next, vaddr != end);

return 0;
}

static int insert_identity_mapping(resource_size_t paddr, unsigned long vaddr,
unsigned long size)
{
unsigned long end = vaddr + size;
unsigned long next;
pgd_t *vpgd, *ppgd;

/* Don't map over the guard hole. */
if (paddr >= 0x800000000000 || paddr + size > 0x800000000000)
return 1;

ppgd = __va(real_mode_header->trampoline_pgd) + pgd_index(paddr);

vpgd = pgd_offset_k(vaddr);
do {
next = pgd_addr_end(vaddr, end);

if (!pgd_present(*ppgd)) {
pud_t *ppud = (pud_t *)get_zeroed_page(GFP_KERNEL);
if (!ppud)
return 1;

set_pgd(ppgd, __pgd(_KERNPG_TABLE | __pa(ppud)));
}

if (ident_pud_range(paddr, vaddr, ppgd, vpgd, next))
return 1;
} while (ppgd++, vpgd++, vaddr = next, vaddr != end);

return 0;
}
#else
static inline int insert_identity_mapping(resource_size_t paddr,
unsigned long vaddr,
unsigned long size)
{
return 0;
}
#endif /* CONFIG_X86_64 */

/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
Expand Down Expand Up @@ -264,10 +163,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
ret_addr = (void __iomem *) (vaddr + offset);
mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);

if (insert_identity_mapping(phys_addr, vaddr, size))
printk(KERN_WARNING "ioremap: unable to map 0x%llx in identity pagetable\n",
(unsigned long long)phys_addr);

/*
* Check if the request spans more than any BAR in the iomem resource
* tree.
Expand Down
17 changes: 2 additions & 15 deletions arch/x86/realmode/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,21 +78,8 @@ void __init setup_real_mode(void)
*trampoline_cr4_features = read_cr4();

trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);

/*
* Create an identity mapping for all of physical memory.
*/
for (i = 0; i <= pgd_index(max_pfn << PAGE_SHIFT); i++) {
int index = pgd_index(PAGE_OFFSET) + i;

trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[index]);
}

/*
* Copy the upper-half of the kernel pages tables.
*/
for (i = pgd_index(PAGE_OFFSET); i < PTRS_PER_PGD; i++)
trampoline_pgd[i] = (u64)pgd_val(swapper_pg_dir[i]);
trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
#endif
}

Expand Down

0 comments on commit be354f4

Please sign in to comment.