Skip to content

Commit

Permalink
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/tip

Pull scheduler updates from Ingo Molnar:
 "These were the main changes in this cycle:

   - More -rt motivated separation of CONFIG_PREEMPT and
     CONFIG_PREEMPTION.

   - Add more low level scheduling topology sanity checks and warnings
     to filter out nonsensical topologies that break scheduling.

   - Extend uclamp constraints to influence wakeup CPU placement

   - Make the RT scheduler more aware of asymmetric topologies and CPU
     capacities, via uclamp metrics, if CONFIG_UCLAMP_TASK=y

   - Make idle CPU selection more consistent

   - Various fixes, smaller cleanups, updates and enhancements - please
     see the git log for details"

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (58 commits)
  sched/fair: Define sched_idle_cpu() only for SMP configurations
  sched/topology: Assert non-NUMA topology masks don't (partially) overlap
  idle: fix spelling mistake "iterrupts" -> "interrupts"
  sched/fair: Remove redundant call to cpufreq_update_util()
  sched/psi: create /proc/pressure and /proc/pressure/{io|memory|cpu} only when psi enabled
  sched/fair: Fix sgc->{min,max}_capacity calculation for SD_OVERLAP
  sched/fair: calculate delta runnable load only when it's needed
  sched/cputime: move rq parameter in irqtime_account_process_tick
  stop_machine: Make stop_cpus() static
  sched/debug: Reset watchdog on all CPUs while processing sysrq-t
  sched/core: Fix size of rq::uclamp initialization
  sched/uclamp: Fix a bug in propagating uclamp value in new cgroups
  sched/fair: Load balance aggressively for SCHED_IDLE CPUs
  sched/fair : Improve update_sd_pick_busiest for spare capacity case
  watchdog: Remove soft_lockup_hrtimer_cnt and related code
  sched/rt: Make RT capacity-aware
  sched/fair: Make EAS wakeup placement consider uclamp restrictions
  sched/fair: Make task_fits_capacity() consider uclamp restrictions
  sched/uclamp: Rename uclamp_util_with() into uclamp_rq_util_with()
  sched/uclamp: Make uclamp util helpers use and return UL values
  ...
  • Loading branch information
torvalds committed Jan 28, 2020
2 parents c0e809e + afa70d9 commit c677124
Show file tree
Hide file tree
Showing 72 changed files with 446 additions and 329 deletions.
6 changes: 3 additions & 3 deletions arch/arc/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -337,11 +337,11 @@ resume_user_mode_begin:
resume_kernel_mode:

; Disable Interrupts from this point on
; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
; CONFIG_PREEMPTION: This is a must for preempt_schedule_irq()
; !CONFIG_PREEMPTION: To ensure restore_regs is intr safe
IRQ_DISABLE r9

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION

; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/include/asm/switch_to.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
* to ensure that the maintenance completes in case we migrate to another
* CPU.
*/
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
#define __complete_pending_tlbi() dsb(ish)
#else
#define __complete_pending_tlbi()
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/kernel/entry-armv.S
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ __irq_svc:
svc_entry
irq_handler

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
ldr r0, [tsk, #TI_FLAGS] @ get flags
teq r8, #0 @ if preempt count != 0
Expand All @@ -226,7 +226,7 @@ ENDPROC(__irq_svc)

.ltorg

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
svc_preempt:
mov r8, lr
1: bl preempt_schedule_irq @ irq en/disable is done inside
Expand Down
2 changes: 2 additions & 0 deletions arch/arm/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,8 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)

#ifdef CONFIG_PREEMPT
#define S_PREEMPT " PREEMPT"
#elif defined(CONFIG_PREEMPT_RT)
#define S_PREEMPT " PREEMPT_RT"
#else
#define S_PREEMPT ""
#endif
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/mm/cache-v7.S
Original file line number Diff line number Diff line change
Expand Up @@ -135,13 +135,13 @@ flush_levels:
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
#endif
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
restore_irqs_notrace r9
#endif
and r2, r1, #7 @ extract the length of the cache lines
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/mm/cache-v7m.S
Original file line number Diff line number Diff line change
Expand Up @@ -183,13 +183,13 @@ flush_levels:
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic
#endif
write_csselr r10, r1 @ set current cache level
isb @ isb to sych the new cssr&csidr
read_ccsidr r1 @ read the new csidr
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
restore_irqs_notrace r9
#endif
and r2, r1, #7 @ extract the length of the cache lines
Expand Down
52 changes: 26 additions & 26 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -34,32 +34,32 @@ config ARM64
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK if !PREEMPT
select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
select ARCH_INLINE_READ_UNLOCK if !PREEMPT
select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_INLINE_WRITE_LOCK if !PREEMPT
select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_LOCK if !PREEMPT
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION
select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_KEEP_MEMBLOCK
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/crypto/sha256-glue.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
* input when running on a preemptible kernel, but process the
* data block by block instead.
*/
if (IS_ENABLED(CONFIG_PREEMPT) &&
if (IS_ENABLED(CONFIG_PREEMPTION) &&
chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE)
chunk = SHA256_BLOCK_SIZE -
sctx->count % SHA256_BLOCK_SIZE;
Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -675,8 +675,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
* where <label> is optional, and marks the point where execution will resume
* after a yield has been performed. If omitted, execution resumes right after
* the endif_yield_neon invocation. Note that the entire sequence, including
* the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
* is not defined.
* the provided patchup code, will be omitted from the image if
* CONFIG_PREEMPTION is not defined.
*
* As a convenience, in the case where no patchup code is required, the above
* sequence may be abbreviated to
Expand Down Expand Up @@ -704,7 +704,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm

.macro if_will_cond_yield_neon
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
get_current_task x0
ldr x0, [x0, #TSK_TI_PREEMPT]
sub x0, x0, #PREEMPT_DISABLE_OFFSET
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/include/asm/preempt.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,11 +79,11 @@ static inline bool should_resched(int preempt_offset)
return pc == preempt_offset;
}

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule()
void preempt_schedule_notrace(void);
#define __preempt_schedule_notrace() preempt_schedule_notrace()
#endif /* CONFIG_PREEMPT */
#endif /* CONFIG_PREEMPTION */

#endif /* __ASM_PREEMPT_H */
2 changes: 1 addition & 1 deletion arch/arm64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,7 @@ el1_irq:

irq_handler

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
/*
Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,12 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)

#ifdef CONFIG_PREEMPT
#define S_PREEMPT " PREEMPT"
#elif defined(CONFIG_PREEMPT_RT)
#define S_PREEMPT " PREEMPT_RT"
#else
#define S_PREEMPT ""
#endif

#define S_SMP " SMP"

static int __die(const char *str, int err, struct pt_regs *regs)
Expand Down
8 changes: 4 additions & 4 deletions arch/c6x/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
#define DP B14
#define SP B15

#ifndef CONFIG_PREEMPT
#ifndef CONFIG_PREEMPTION
#define resume_kernel restore_all
#endif

Expand Down Expand Up @@ -287,7 +287,7 @@ work_notifysig:
;; is a little bit different
;;
ENTRY(ret_from_exception)
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
MASK_INT B2
#endif

Expand Down Expand Up @@ -557,7 +557,7 @@ ENDPROC(_nmi_handler)
;;
;; Jump to schedule() then return to ret_from_isr
;;
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
resume_kernel:
GET_THREAD_INFO A12
LDW .D1T1 *+A12(THREAD_INFO_PREEMPT_COUNT),A1
Expand All @@ -582,7 +582,7 @@ preempt_schedule:
B .S2 preempt_schedule_irq
#endif
ADDKPC .S2 preempt_schedule,B3,4
#endif /* CONFIG_PREEMPT */
#endif /* CONFIG_PREEMPTION */

ENTRY(enable_exception)
DINT
Expand Down
4 changes: 2 additions & 2 deletions arch/csky/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ ENTRY(csky_irq)
zero_fp
psrset ee

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
mov r9, sp /* Get current stack pointer */
bmaski r10, THREAD_SHIFT
andn r9, r10 /* Get thread_info */
Expand All @@ -294,7 +294,7 @@ ENTRY(csky_irq)
mov a0, sp
jbsr csky_do_IRQ

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
subi r12, 1
stw r12, (r9, TINFO_PREEMPT)
cmpnei r12, 0
Expand Down
6 changes: 3 additions & 3 deletions arch/h8300/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -284,12 +284,12 @@ badsys:
mov.l er0,@(LER0:16,sp)
bra resume_userspace

#if !defined(CONFIG_PREEMPT)
#if !defined(CONFIG_PREEMPTION)
#define resume_kernel restore_all
#endif

ret_from_exception:
#if defined(CONFIG_PREEMPT)
#if defined(CONFIG_PREEMPTION)
orc #0xc0,ccr
#endif
ret_from_interrupt:
Expand Down Expand Up @@ -319,7 +319,7 @@ work_resched:
restore_all:
RESTORE_ALL /* Does RTE */

#if defined(CONFIG_PREEMPT)
#if defined(CONFIG_PREEMPTION)
resume_kernel:
mov.l @(TI_PRE_COUNT:16,er4),er0
bne restore_all:8
Expand Down
6 changes: 3 additions & 3 deletions arch/hexagon/kernel/vm_entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -265,12 +265,12 @@ event_dispatch:
* should be in the designated register (usually R19)
*
* If we were in kernel mode, we don't need to check scheduler
* or signals if CONFIG_PREEMPT is not set. If set, then it has
* or signals if CONFIG_PREEMPTION is not set. If set, then it has
* to jump to a need_resched kind of block.
* BTW, CONFIG_PREEMPT is not supported yet.
* BTW, CONFIG_PREEMPTION is not supported yet.
*/

#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
R0 = #VM_INT_DISABLE
trap1(#HVM_TRAP1_VMSETIE)
#endif
Expand Down
12 changes: 6 additions & 6 deletions arch/ia64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -670,12 +670,12 @@ GLOBAL_ENTRY(ia64_leave_syscall)
*
* p6 controls whether current_thread_info()->flags needs to be check for
* extra work. We always check for extra work when returning to user-level.
* With CONFIG_PREEMPT, we also check for extra work when the preempt_count
* With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
* is 0. After extra work processing has been completed, execution
* resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check
* needs to be redone.
*/
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
RSM_PSR_I(p0, r2, r18) // disable interrupts
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
Expand All @@ -685,7 +685,7 @@ GLOBAL_ENTRY(ia64_leave_syscall)
(pUStk) mov r21=0 // r21 <- 0
;;
cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0)
#else /* !CONFIG_PREEMPT */
#else /* !CONFIG_PREEMPTION */
RSM_PSR_I(pUStk, r2, r18)
cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall
(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
Expand Down Expand Up @@ -814,12 +814,12 @@ GLOBAL_ENTRY(ia64_leave_kernel)
*
* p6 controls whether current_thread_info()->flags needs to be check for
* extra work. We always check for extra work when returning to user-level.
* With CONFIG_PREEMPT, we also check for extra work when the preempt_count
* With CONFIG_PREEMPTION, we also check for extra work when the preempt_count
* is 0. After extra work processing has been completed, execution
* resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check
* needs to be redone.
*/
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
RSM_PSR_I(p0, r17, r31) // disable interrupts
cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel
(pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13
Expand Down Expand Up @@ -1120,7 +1120,7 @@ skip_rbs_switch:

/*
* On entry:
* r20 = &current->thread_info->pre_count (if CONFIG_PREEMPT)
* r20 = &current->thread_info->pre_count (if CONFIG_PREEMPTION)
* r31 = current->thread_info->flags
* On exit:
* p6 = TRUE if work-pending-check needs to be redone
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -841,7 +841,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
return 1;
}

#if !defined(CONFIG_PREEMPT)
#if !defined(CONFIG_PREEMPTION)
if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
ia64_psr(regs)->ri = p->ainsn.slot;
Expand Down
2 changes: 1 addition & 1 deletion arch/microblaze/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -728,7 +728,7 @@ no_intr_resched:
bri 6f;
/* MS: Return to kernel state. */
2:
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
lwi r11, CURRENT_TASK, TS_THREAD_INFO;
/* MS: get preempt_count from thread info */
lwi r5, r11, TI_PREEMPT_COUNT;
Expand Down
4 changes: 2 additions & 2 deletions arch/mips/include/asm/asmmacro.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
.endm

.macro local_irq_disable reg=t0
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
lw \reg, TI_PRE_COUNT($28)
addi \reg, \reg, 1
sw \reg, TI_PRE_COUNT($28)
Expand All @@ -73,7 +73,7 @@
xori \reg, \reg, 1
mtc0 \reg, CP0_STATUS
irq_disable_hazard
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_PREEMPTION
lw \reg, TI_PRE_COUNT($28)
addi \reg, \reg, -1
sw \reg, TI_PRE_COUNT($28)
Expand Down
Loading

0 comments on commit c677124

Please sign in to comment.