Skip to content

Commit

Permalink
hrtimer: Prepare handling of hard and softirq based hrtimers
Browse files Browse the repository at this point in the history
The softirq based hrtimer can utilize most of the existing hrtimers
functions, but need to operate on a different data set.

Add an 'active_mask' parameter to various functions so the hard and soft bases
can be selected. Fixup the existing callers and hand in the ACTIVE_HARD
mask.

Signed-off-by: Anna-Maria Gleixner <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: John Stultz <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
anna-marialx authored and Ingo Molnar committed Jan 16, 2018
1 parent 98ecadd commit c458b1d
Showing 1 changed file with 29 additions and 9 deletions.
38 changes: 29 additions & 9 deletions kernel/time/hrtimer.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,15 @@

#include "tick-internal.h"

/*
* Masks for selecting the soft and hard context timers from
* cpu_base->active
*/
#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)

/*
* The timer bases:
*
Expand Down Expand Up @@ -507,13 +516,24 @@ static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
return expires_next;
}

static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
/*
* Recomputes cpu_base::*next_timer and returns the earliest expires_next but
* does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
*
* @active_mask must be one of:
* - HRTIMER_ACTIVE,
* - HRTIMER_ACTIVE_SOFT, or
* - HRTIMER_ACTIVE_HARD.
*/
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base,
unsigned int active_mask)
{
unsigned int active = cpu_base->active_bases;
unsigned int active;
ktime_t expires_next = KTIME_MAX;

cpu_base->next_timer = NULL;

active = cpu_base->active_bases & active_mask;
expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);

return expires_next;
Expand Down Expand Up @@ -553,7 +573,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
{
ktime_t expires_next;

expires_next = __hrtimer_get_next_event(cpu_base);
expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);

if (skip_equal && expires_next == cpu_base->expires_next)
return;
Expand Down Expand Up @@ -1074,7 +1094,7 @@ u64 hrtimer_get_next_event(void)
raw_spin_lock_irqsave(&cpu_base->lock, flags);

if (!__hrtimer_hres_active(cpu_base))
expires = __hrtimer_get_next_event(cpu_base);
expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);

raw_spin_unlock_irqrestore(&cpu_base->lock, flags);

Expand Down Expand Up @@ -1248,10 +1268,10 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
}

static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
unsigned long flags)
unsigned long flags, unsigned int active_mask)
{
struct hrtimer_clock_base *base;
unsigned int active = cpu_base->active_bases;
unsigned int active = cpu_base->active_bases & active_mask;

for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *node;
Expand Down Expand Up @@ -1314,10 +1334,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
*/
cpu_base->expires_next = KTIME_MAX;

__hrtimer_run_queues(cpu_base, now, flags);
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);

/* Reevaluate the clock bases for the next expiry */
expires_next = __hrtimer_get_next_event(cpu_base);
expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
/*
* Store the new expiry value so the migration code can verify
* against it.
Expand Down Expand Up @@ -1421,7 +1441,7 @@ void hrtimer_run_queues(void)

raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now, flags);
__hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
}

Expand Down

0 comments on commit c458b1d

Please sign in to comment.