Skip to content

Commit

Permalink
Merge branch 'fixes' into next
Browse files Browse the repository at this point in the history
Merge our fixes branch into next.

That lets us resolve a conflict in arch/powerpc/sysdev/xive/common.c.

Between cbc06f0 ("powerpc/xive: Do not skip CPU-less nodes when
creating the IPIs"), which moved request_irq() out of xive_init_ipis(),
and 17df41f ("powerpc: use IRQF_NO_DEBUG for IPIs") which added
IRQF_NO_DEBUG to that request_irq() call, which has now moved.
  • Loading branch information
mpe committed Sep 3, 2021
2 parents e432fe9 + 787c70f commit a331426
Show file tree
Hide file tree
Showing 18 changed files with 125 additions and 81 deletions.
20 changes: 20 additions & 0 deletions arch/powerpc/include/asm/book3s/32/kup.h
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

#include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h>
#include <asm/mmu.h>
#include <asm/synch.h>

#ifndef __ASSEMBLY__

Expand All @@ -28,6 +30,15 @@ static inline void kuep_lock(void)
return;

update_user_segments(mfsr(0) | SR_NX);
/*
* This isync() shouldn't be necessary as the kernel is not excepted to
* run any instruction in userspace soon after the update of segments,
* but hash based cores (at least G3) seem to exhibit a random
* behaviour when the 'isync' is not there. 603 cores don't have this
* behaviour so don't do the 'isync' as it saves several CPU cycles.
*/
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
isync(); /* Context sync required after mtsr() */
}

static inline void kuep_unlock(void)
Expand All @@ -36,6 +47,15 @@ static inline void kuep_unlock(void)
return;

update_user_segments(mfsr(0) & ~SR_NX);
/*
* This isync() shouldn't be necessary as a 'rfi' will soon be executed
* to return to userspace, but hash based cores (at least G3) seem to
* exhibit a random behaviour when the 'isync' is not there. 603 cores
* don't have this behaviour so don't do the 'isync' as it saves several
* CPU cycles.
*/
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
isync(); /* Context sync required after mtsr() */
}

#ifdef CONFIG_PPC_KUAP
Expand Down
3 changes: 3 additions & 0 deletions arch/powerpc/include/asm/interrupt.h
Original file line number Diff line number Diff line change
Expand Up @@ -583,6 +583,9 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);

DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);

/* irq.c */
DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);

void __noreturn unrecoverable_exception(struct pt_regs *regs);

void replay_system_reset(void);
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/include/asm/irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS];

extern void do_IRQ(struct pt_regs *regs);
void __do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void);
extern void __do_irq(struct pt_regs *regs);

Expand Down
16 changes: 16 additions & 0 deletions arch/powerpc/include/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,22 @@ struct pt_regs
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
};
#endif
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
struct { /* Must be a multiple of 16 bytes */
unsigned long mas0;
unsigned long mas1;
unsigned long mas2;
unsigned long mas3;
unsigned long mas6;
unsigned long mas7;
unsigned long srr0;
unsigned long srr1;
unsigned long csrr0;
unsigned long csrr1;
unsigned long dsrr0;
unsigned long dsrr1;
};
#endif
};
#endif

Expand Down
31 changes: 14 additions & 17 deletions arch/powerpc/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -302,24 +302,21 @@ int main(void)
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif

#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
STACK_PT_REGS_OFFSET(MAS0, mas0);
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
#endif
STACK_PT_REGS_OFFSET(MMUCR, mas0);
STACK_PT_REGS_OFFSET(MAS1, mas1);
STACK_PT_REGS_OFFSET(MAS2, mas2);
STACK_PT_REGS_OFFSET(MAS3, mas3);
STACK_PT_REGS_OFFSET(MAS6, mas6);
STACK_PT_REGS_OFFSET(MAS7, mas7);
STACK_PT_REGS_OFFSET(_SRR0, srr0);
STACK_PT_REGS_OFFSET(_SRR1, srr1);
STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
#endif

/* About the CPU features table */
Expand Down
7 changes: 4 additions & 3 deletions arch/powerpc/kernel/exceptions-64s.S
Original file line number Diff line number Diff line change
Expand Up @@ -812,7 +812,6 @@ __start_interrupts:
* syscall register convention is in Documentation/powerpc/syscall64-abi.rst
*/
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
1:
/* SCV 0 */
mr r9,r13
GET_PACA(r13)
Expand Down Expand Up @@ -842,10 +841,12 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
b system_call_vectored_sigill
#endif
.endr
2:
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)

SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
// Treat scv vectors as soft-masked, see comment above.
// Use absolute values rather than labels here, so they don't get relocated,
// because this code runs unrelocated.
SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)

#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/head_book3s_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
EXCEPTION_PROLOG_1
EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
prepare_transfer_to_handler
lwz r5, _DSISR(r11)
lwz r5, _DSISR(r1)
andis. r0, r5, DSISR_DABRMATCH@h
bne- 1f
bl do_page_fault
Expand Down
27 changes: 3 additions & 24 deletions arch/powerpc/kernel/head_booke.h
Original file line number Diff line number Diff line change
Expand Up @@ -168,20 +168,18 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
/* only on e500mc */
#define DBG_STACK_BASE dbgirq_ctx

#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)

#ifdef CONFIG_SMP
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
mfspr r8,SPRN_PIR; \
slwi r8,r8,2; \
addis r8,r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \
addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#else
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
lis r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \
addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#endif

/*
Expand All @@ -208,7 +206,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
mtmsr r11; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\
beq 1f; \
/* COMING FROM USER MODE */ \
stw r9,_CCR(r11); /* save CR */\
Expand Down Expand Up @@ -516,24 +514,5 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
bl kernel_fp_unavailable_exception; \
b interrupt_return

#else /* __ASSEMBLY__ */
struct exception_regs {
unsigned long mas0;
unsigned long mas1;
unsigned long mas2;
unsigned long mas3;
unsigned long mas6;
unsigned long mas7;
unsigned long srr0;
unsigned long srr1;
unsigned long csrr0;
unsigned long csrr1;
unsigned long dsrr0;
unsigned long dsrr1;
};

/* ensure this structure is always sized to a multiple of the stack alignment */
#define STACK_EXC_LVL_FRAME_SIZE ALIGN(sizeof (struct exception_regs), 16)

#endif /* __ASSEMBLY__ */
#endif /* __HEAD_BOOKE_H__ */
7 changes: 6 additions & 1 deletion arch/powerpc/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -750,7 +750,7 @@ void __do_irq(struct pt_regs *regs)
trace_irq_exit(regs);
}

DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
void __do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
void *cursp, *irqsp, *sirqsp;
Expand All @@ -774,6 +774,11 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
set_irq_regs(old_regs);
}

DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
{
__do_IRQ(regs);
}

static void *__init alloc_vm_stack(void)
{
return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
Expand Down
3 changes: 2 additions & 1 deletion arch/powerpc/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,8 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs))
return 0;

if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))
if (!IS_ENABLED(CONFIG_BOOKE) &&
(!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
return 0;

/*
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1167,7 +1167,7 @@ static int __init topology_init(void)
* CPU. For instance, the boot cpu might never be valid
* for hotplugging.
*/
if (smp_ops->cpu_offline_self)
if (smp_ops && smp_ops->cpu_offline_self)
c->hotpluggable = 1;
#endif

Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -585,7 +585,7 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)

#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
__do_IRQ(regs);
#endif

old_regs = set_irq_regs(regs);
Expand Down
9 changes: 7 additions & 2 deletions arch/powerpc/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -1104,7 +1104,7 @@ DEFINE_INTERRUPT_HANDLER(RunModeException)
_exception(SIGTRAP, regs, TRAP_UNK, 0);
}

DEFINE_INTERRUPT_HANDLER(single_step_exception)
static void __single_step_exception(struct pt_regs *regs)
{
clear_single_step(regs);
clear_br_trace(regs);
Expand All @@ -1121,6 +1121,11 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
}

DEFINE_INTERRUPT_HANDLER(single_step_exception)
{
__single_step_exception(regs);
}

/*
* After we have successfully emulated an instruction, we have to
* check if the instruction was being single-stepped, and if so,
Expand All @@ -1130,7 +1135,7 @@ DEFINE_INTERRUPT_HANDLER(single_step_exception)
static void emulate_single_step(struct pt_regs *regs)
{
if (single_stepping(regs))
single_step_exception(regs);
__single_step_exception(regs);
}

static inline int __parse_fpscr(unsigned long fpscr)
Expand Down
7 changes: 7 additions & 0 deletions arch/powerpc/kernel/vdso64/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,13 @@ KASAN_SANITIZE := n

ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
-Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both

# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true
# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is
# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code
# generation is minimal, it will just use r29 instead.
ccflags-y += $(call cc-option, -ffixed-r30)

asflags-y := -D__VDSO64__ -s

targets += vdso64.lds
Expand Down
23 changes: 10 additions & 13 deletions arch/powerpc/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,12 @@
/*
* Updates the attributes of a page in three steps:
*
* 1. invalidate the page table entry
* 2. flush the TLB
* 3. install the new entry with the updated attributes
*
* Invalidating the pte means there are situations where this will not work
* when in theory it should.
* For example:
* - removing write from page whilst it is being executed
* - setting a page read-only whilst it is being read by another CPU
* 1. take the page_table_lock
* 2. install the new entry with the updated attributes
* 3. flush the TLB
*
* This sequence is safe against concurrent updates, and also allows updating the
* attributes of a page currently being executed or accessed.
*/
static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
{
Expand All @@ -36,9 +32,7 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)

spin_lock(&init_mm.page_table_lock);

/* invalidate the PTE so it's safe to modify */
pte = ptep_get_and_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
pte = ptep_get(ptep);

/* modify the PTE bits as desired, then apply */
switch (action) {
Expand All @@ -59,11 +53,14 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
break;
}

set_pte_at(&init_mm, addr, ptep, pte);
pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0);

/* See ptesync comment in radix__set_pte_at() */
if (radix_enabled())
asm volatile("ptesync": : :"memory");

flush_tlb_kernel_range(addr, addr + PAGE_SIZE);

spin_unlock(&init_mm.page_table_lock);

return 0;
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/platforms/Kconfig.cputype
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ config PPC_BOOK3S_64
select PPC_HAVE_PMU_SUPPORT
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_PMD_SPLIT_PTLOCK
select ARCH_ENABLE_SPLIT_PMD_PTLOCK
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
Expand Down
7 changes: 4 additions & 3 deletions arch/powerpc/platforms/pseries/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@
#include "../../../../drivers/pci/pci.h"

DEFINE_STATIC_KEY_FALSE(shared_processor);
EXPORT_SYMBOL_GPL(shared_processor);
EXPORT_SYMBOL(shared_processor);

int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
Expand Down Expand Up @@ -541,9 +541,10 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
* H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
* H_CPU_BEHAV_FAVOUR_SECURITY is.
*/
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
pseries_security_flavor = 0;
} else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
pseries_security_flavor = 1;
else
pseries_security_flavor = 2;
Expand Down
Loading

0 comments on commit a331426

Please sign in to comment.