Skip to content

Commit a8b0ca1

Browse files
Peter ZijlstraIngo Molnar
Peter Zijlstra
authored and
Ingo Molnar
committed
perf: Remove the nmi parameter from the swevent and overflow interface
The nmi parameter indicated if we could do wakeups from the current context, if not, we would set some state and self-IPI and let the resulting interrupt do the wakeup. For the various event classes: - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from the PMI-tail (ARM etc.) - tracepoint: nmi=0; since tracepoint could be from NMI context. - software: nmi=[0,1]; some, like the schedule thing cannot perform wakeups, and hence need 0. As one can see, there is very little nmi=1 usage, and the down-side of not using it is that on some platforms some software events can have a jiffy delay in wakeup (when arch_irq_work_raise isn't implemented). The up-side however is that we can remove the nmi parameter and save a bunch of conditionals in fast paths. Signed-off-by: Peter Zijlstra <[email protected]> Cc: Michael Cree <[email protected]> Cc: Will Deacon <[email protected]> Cc: Deng-Cheng Zhu <[email protected]> Cc: Anton Blanchard <[email protected]> Cc: Eric B Munson <[email protected]> Cc: Heiko Carstens <[email protected]> Cc: Paul Mundt <[email protected]> Cc: David S. Miller <[email protected]> Cc: Frederic Weisbecker <[email protected]> Cc: Jason Wessel <[email protected]> Cc: Don Zickus <[email protected]> Link: http://lkml.kernel.org/n/[email protected] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 1880c4a commit a8b0ca1

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+119
-141
lines changed

arch/alpha/kernel/perf_event.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
847847
data.period = event->hw.last_period;
848848

849849
if (alpha_perf_event_set_period(event, hwc, idx)) {
850-
if (perf_event_overflow(event, 1, &data, regs)) {
850+
if (perf_event_overflow(event, &data, regs)) {
851851
/* Interrupts coming too quickly; "throttle" the
852852
* counter, i.e., disable it for a little while.
853853
*/

arch/arm/kernel/perf_event_v6.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -479,7 +479,7 @@ armv6pmu_handle_irq(int irq_num,
479479
if (!armpmu_event_set_period(event, hwc, idx))
480480
continue;
481481

482-
if (perf_event_overflow(event, 0, &data, regs))
482+
if (perf_event_overflow(event, &data, regs))
483483
armpmu->disable(hwc, idx);
484484
}
485485

arch/arm/kernel/perf_event_v7.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -787,7 +787,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
787787
if (!armpmu_event_set_period(event, hwc, idx))
788788
continue;
789789

790-
if (perf_event_overflow(event, 0, &data, regs))
790+
if (perf_event_overflow(event, &data, regs))
791791
armpmu->disable(hwc, idx);
792792
}
793793

arch/arm/kernel/perf_event_xscale.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
251251
if (!armpmu_event_set_period(event, hwc, idx))
252252
continue;
253253

254-
if (perf_event_overflow(event, 0, &data, regs))
254+
if (perf_event_overflow(event, &data, regs))
255255
armpmu->disable(hwc, idx);
256256
}
257257

@@ -583,7 +583,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
583583
if (!armpmu_event_set_period(event, hwc, idx))
584584
continue;
585585

586-
if (perf_event_overflow(event, 0, &data, regs))
586+
if (perf_event_overflow(event, &data, regs))
587587
armpmu->disable(hwc, idx);
588588
}
589589

arch/arm/kernel/ptrace.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx)
396396
/*
397397
* Handle hitting a HW-breakpoint.
398398
*/
399-
static void ptrace_hbptriggered(struct perf_event *bp, int unused,
399+
static void ptrace_hbptriggered(struct perf_event *bp,
400400
struct perf_sample_data *data,
401401
struct pt_regs *regs)
402402
{

arch/arm/kernel/swp_emulate.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
183183
unsigned int address, destreg, data, type;
184184
unsigned int res = 0;
185185

186-
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
186+
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
187187

188188
if (current->pid != previous_pid) {
189189
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",

arch/arm/mm/fault.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
318318
fault = __do_page_fault(mm, addr, fsr, tsk);
319319
up_read(&mm->mmap_sem);
320320

321-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr);
321+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
322322
if (fault & VM_FAULT_MAJOR)
323-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr);
323+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
324324
else if (fault & VM_FAULT_MINOR)
325-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr);
325+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
326326

327327
/*
328328
* Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR

arch/mips/kernel/perf_event.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc,
527527
if (!mipspmu_event_set_period(event, hwc, idx))
528528
return;
529529

530-
if (perf_event_overflow(event, 0, data, regs))
530+
if (perf_event_overflow(event, data, regs))
531531
mipspmu->disable_event(idx);
532532
}
533533

arch/mips/kernel/traps.c

+4-4
Original file line numberDiff line numberDiff line change
@@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
578578
{
579579
if ((opcode & OPCODE) == LL) {
580580
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
581-
1, 0, regs, 0);
581+
1, regs, 0);
582582
return simulate_ll(regs, opcode);
583583
}
584584
if ((opcode & OPCODE) == SC) {
585585
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
586-
1, 0, regs, 0);
586+
1, regs, 0);
587587
return simulate_sc(regs, opcode);
588588
}
589589

@@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
602602
int rd = (opcode & RD) >> 11;
603603
int rt = (opcode & RT) >> 16;
604604
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
605-
1, 0, regs, 0);
605+
1, regs, 0);
606606
switch (rd) {
607607
case 0: /* CPU number */
608608
regs->regs[rt] = smp_processor_id();
@@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
640640
{
641641
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
642642
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
643-
1, 0, regs, 0);
643+
1, regs, 0);
644644
return 0;
645645
}
646646

arch/mips/kernel/unaligned.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
111111
unsigned long value;
112112
unsigned int res;
113113

114-
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
115-
1, 0, regs, 0);
114+
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
116115

117116
/*
118117
* This load never faults.
@@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
517516
mm_segment_t seg;
518517

519518
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
520-
1, 0, regs, regs->cp0_badvaddr);
519+
1, regs, regs->cp0_badvaddr);
521520
/*
522521
* Did we catch a fault trying to load an instruction?
523522
* Or are we running in MIPS16 mode?

arch/mips/math-emu/cp1emu.c

+1-2
Original file line numberDiff line numberDiff line change
@@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
272272
}
273273

274274
emul:
275-
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
276-
1, 0, xcp, 0);
275+
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
277276
MIPS_FPU_EMU_INC_STATS(emulated);
278277
switch (MIPSInst_OPCODE(ir)) {
279278
case ldc1_op:{

arch/mips/mm/fault.c

+3-5
Original file line numberDiff line numberDiff line change
@@ -145,7 +145,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
145145
* the fault.
146146
*/
147147
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
148-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
148+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
149149
if (unlikely(fault & VM_FAULT_ERROR)) {
150150
if (fault & VM_FAULT_OOM)
151151
goto out_of_memory;
@@ -154,12 +154,10 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
154154
BUG();
155155
}
156156
if (fault & VM_FAULT_MAJOR) {
157-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
158-
1, 0, regs, address);
157+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
159158
tsk->maj_flt++;
160159
} else {
161-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
162-
1, 0, regs, address);
160+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
163161
tsk->min_flt++;
164162
}
165163

arch/powerpc/include/asm/emulated_ops.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
7878
#define PPC_WARN_EMULATED(type, regs) \
7979
do { \
8080
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, \
81-
1, 0, regs, 0); \
81+
1, regs, 0); \
8282
__PPC_WARN_EMULATED(type); \
8383
} while (0)
8484

8585
#define PPC_WARN_ALIGNMENT(type, regs) \
8686
do { \
8787
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, \
88-
1, 0, regs, regs->dar); \
88+
1, regs, regs->dar); \
8989
__PPC_WARN_EMULATED(type); \
9090
} while (0)
9191

arch/powerpc/kernel/perf_event.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
12071207
* here so there is no possibility of being interrupted.
12081208
*/
12091209
static void record_and_restart(struct perf_event *event, unsigned long val,
1210-
struct pt_regs *regs, int nmi)
1210+
struct pt_regs *regs)
12111211
{
12121212
u64 period = event->hw.sample_period;
12131213
s64 prev, delta, left;
@@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
12581258
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
12591259
perf_get_data_addr(regs, &data.addr);
12601260

1261-
if (perf_event_overflow(event, nmi, &data, regs))
1261+
if (perf_event_overflow(event, &data, regs))
12621262
power_pmu_stop(event, 0);
12631263
}
12641264
}
@@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
13461346
if ((int)val < 0) {
13471347
/* event has overflowed */
13481348
found = 1;
1349-
record_and_restart(event, val, regs, nmi);
1349+
record_and_restart(event, val, regs);
13501350
}
13511351
}
13521352

arch/powerpc/kernel/perf_event_fsl_emb.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
568568
* here so there is no possibility of being interrupted.
569569
*/
570570
static void record_and_restart(struct perf_event *event, unsigned long val,
571-
struct pt_regs *regs, int nmi)
571+
struct pt_regs *regs)
572572
{
573573
u64 period = event->hw.sample_period;
574574
s64 prev, delta, left;
@@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
616616
perf_sample_data_init(&data, 0);
617617
data.period = event->hw.last_period;
618618

619-
if (perf_event_overflow(event, nmi, &data, regs))
619+
if (perf_event_overflow(event, &data, regs))
620620
fsl_emb_pmu_stop(event, 0);
621621
}
622622
}
@@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
644644
if (event) {
645645
/* event has overflowed */
646646
found = 1;
647-
record_and_restart(event, val, regs, nmi);
647+
record_and_restart(event, val, regs);
648648
} else {
649649
/*
650650
* Disabled counter is negative,

arch/powerpc/kernel/ptrace.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
882882
}
883883

884884
#ifdef CONFIG_HAVE_HW_BREAKPOINT
885-
void ptrace_triggered(struct perf_event *bp, int nmi,
885+
void ptrace_triggered(struct perf_event *bp,
886886
struct perf_sample_data *data, struct pt_regs *regs)
887887
{
888888
struct perf_event_attr attr;

arch/powerpc/mm/fault.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
173173
die("Weird page fault", regs, SIGSEGV);
174174
}
175175

176-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
176+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
177177

178178
/* When running in the kernel we expect faults to occur only to
179179
* addresses in user space. All other faults represent errors in the
@@ -319,7 +319,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
319319
}
320320
if (ret & VM_FAULT_MAJOR) {
321321
current->maj_flt++;
322-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
322+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
323323
regs, address);
324324
#ifdef CONFIG_PPC_SMLPAR
325325
if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -330,7 +330,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
330330
#endif
331331
} else {
332332
current->min_flt++;
333-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
333+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
334334
regs, address);
335335
}
336336
up_read(&mm->mmap_sem);

arch/s390/mm/fault.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
299299
goto out;
300300

301301
address = trans_exc_code & __FAIL_ADDR_MASK;
302-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
302+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
303303
flags = FAULT_FLAG_ALLOW_RETRY;
304304
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
305305
flags |= FAULT_FLAG_WRITE;
@@ -345,11 +345,11 @@ static inline int do_exception(struct pt_regs *regs, int access,
345345
if (flags & FAULT_FLAG_ALLOW_RETRY) {
346346
if (fault & VM_FAULT_MAJOR) {
347347
tsk->maj_flt++;
348-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
348+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
349349
regs, address);
350350
} else {
351351
tsk->min_flt++;
352-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
352+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
353353
regs, address);
354354
}
355355
if (fault & VM_FAULT_RETRY) {

arch/sh/kernel/ptrace_32.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
6363
return 0;
6464
}
6565

66-
void ptrace_triggered(struct perf_event *bp, int nmi,
66+
void ptrace_triggered(struct perf_event *bp,
6767
struct perf_sample_data *data, struct pt_regs *regs)
6868
{
6969
struct perf_event_attr attr;

arch/sh/kernel/traps_32.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
393393
*/
394394
if (!expected) {
395395
unaligned_fixups_notify(current, instruction, regs);
396-
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
396+
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
397397
regs, address);
398398
}
399399

arch/sh/kernel/traps_64.c

+4-4
Original file line numberDiff line numberDiff line change
@@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs,
434434
return error;
435435
}
436436

437-
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
437+
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
438438

439439
destreg = (opcode >> 4) & 0x3f;
440440
if (user_mode(regs)) {
@@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs,
512512
return error;
513513
}
514514

515-
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
515+
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
516516

517517
srcreg = (opcode >> 4) & 0x3f;
518518
if (user_mode(regs)) {
@@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
588588
return error;
589589
}
590590

591-
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
591+
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
592592

593593
destreg = (opcode >> 4) & 0x3f;
594594
if (user_mode(regs)) {
@@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
665665
return error;
666666
}
667667

668-
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
668+
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
669669

670670
srcreg = (opcode >> 4) & 0x3f;
671671
if (user_mode(regs)) {

arch/sh/math-emu/math.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
620620
struct task_struct *tsk = current;
621621
struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
622622

623-
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
623+
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
624624

625625
if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
626626
/* initialize once. */

arch/sh/mm/fault_32.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
160160
if ((regs->sr & SR_IMASK) != SR_IMASK)
161161
local_irq_enable();
162162

163-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
163+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
164164

165165
/*
166166
* If we're in an interrupt, have no user context or are running
@@ -210,11 +210,11 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
210210
}
211211
if (fault & VM_FAULT_MAJOR) {
212212
tsk->maj_flt++;
213-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
213+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
214214
regs, address);
215215
} else {
216216
tsk->min_flt++;
217-
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
217+
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
218218
regs, address);
219219
}
220220

0 commit comments

Comments
 (0)