Skip to content

Commit

Permalink
Merge branch 'v28-timers-for-linus' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/linux-2.6-tip

* 'v28-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (36 commits)
  fix documentation of sysrq-q really
  Fix documentation of sysrq-q
  timer_list: add base address to clock base
  timer_list: print cpu number of clockevents device
  timer_list: print real timer address
  NOHZ: restart tick device from irq_enter()
  NOHZ: split tick_nohz_restart_sched_tick()
  NOHZ: unify the nohz function calls in irq_enter()
  timers: fix itimer/many thread hang, fix
  timers: fix itimer/many thread hang, v3
  ntp: improve adjtimex frequency rounding
  timekeeping: fix rounding problem during clock update
  ntp: let update_persistent_clock() sleep
  hrtimer: reorder struct hrtimer to save 8 bytes on 64bit builds
  posix-timers: lock_timer: make it readable
  posix-timers: lock_timer: kill the bogus ->it_id check
  posix-timers: kill ->it_sigev_signo and ->it_sigev_value
  posix-timers: sys_timer_create: cleanup the error handling
  posix-timers: move the initialization of timer->sigq from send to create path
  posix-timers: sys_timer_create: simplify and s/tasklist/rcu/
  ...

Fix trivial conflicts due to sysrq-q description clahes in
Documentation/sysrq.txt and drivers/char/sysrq.c
  • Loading branch information
torvalds committed Oct 20, 2008
2 parents 72558dd + c465a76 commit 99ebcf8
Show file tree
Hide file tree
Showing 37 changed files with 903 additions and 721 deletions.
5 changes: 3 additions & 2 deletions Documentation/sysrq.txt
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,9 @@ On all - write a character to /proc/sysrq-trigger. e.g.:

'p' - Will dump the current registers and flags to your console.

'q' - Will dump a list of all running hrtimers.
WARNING: Does not cover any other timers
'q' - Will dump per CPU lists of all armed hrtimers (but NOT regular
timer_list timers) and detailed information about all
clockevent devices.

'r' - Turns off keyboard raw mode and sets it to XLATE.

Expand Down
2 changes: 1 addition & 1 deletion drivers/char/sysrq.c
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ static void sysrq_handle_show_timers(int key, struct tty_struct *tty)
static struct sysrq_key_op sysrq_show_timers_op = {
.handler = sysrq_handle_show_timers,
.help_msg = "show-all-timers(Q)",
.action_msg = "Show pending hrtimers (no others)",
.action_msg = "Show clockevent devices & pending hrtimers (no others)",
};

static void sysrq_handle_mountro(int key, struct tty_struct *tty)
Expand Down
7 changes: 5 additions & 2 deletions drivers/clocksource/acpi_pm.c
Original file line number Diff line number Diff line change
Expand Up @@ -237,9 +237,12 @@ static int __init parse_pmtmr(char *arg)

if (strict_strtoul(arg, 16, &base))
return -EINVAL;

#ifdef CONFIG_X86_64
if (base > UINT_MAX)
return -ERANGE;
#endif
printk(KERN_INFO "PMTMR IOPort override: 0x%04x -> 0x%04lx\n",
(unsigned int)pmtmr_ioport, base);
pmtmr_ioport, base);
pmtmr_ioport = base;

return 1;
Expand Down
19 changes: 7 additions & 12 deletions fs/binfmt_elf.c
Original file line number Diff line number Diff line change
Expand Up @@ -1341,20 +1341,15 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
prstatus->pr_pgrp = task_pgrp_vnr(p);
prstatus->pr_sid = task_session_vnr(p);
if (thread_group_leader(p)) {
struct task_cputime cputime;

/*
* This is the record for the group leader. Add in the
* cumulative times of previous dead threads. This total
* won't include the time of each live thread whose state
* is included in the core dump. The final total reported
* to our parent process when it calls wait4 will include
* those sums as well as the little bit more time it takes
* this and each other thread to finish dying after the
* core dump synchronization phase.
* This is the record for the group leader. It shows the
* group-wide total, not its individual thread total.
*/
cputime_to_timeval(cputime_add(p->utime, p->signal->utime),
&prstatus->pr_utime);
cputime_to_timeval(cputime_add(p->stime, p->signal->stime),
&prstatus->pr_stime);
thread_group_cputime(p, &cputime);
cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
} else {
cputime_to_timeval(p->utime, &prstatus->pr_utime);
cputime_to_timeval(p->stime, &prstatus->pr_stime);
Expand Down
8 changes: 4 additions & 4 deletions fs/proc/array.c
Original file line number Diff line number Diff line change
Expand Up @@ -388,20 +388,20 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,

/* add up live thread stats at the group level */
if (whole) {
struct task_cputime cputime;
struct task_struct *t = task;
do {
min_flt += t->min_flt;
maj_flt += t->maj_flt;
utime = cputime_add(utime, task_utime(t));
stime = cputime_add(stime, task_stime(t));
gtime = cputime_add(gtime, task_gtime(t));
t = next_thread(t);
} while (t != task);

min_flt += sig->min_flt;
maj_flt += sig->maj_flt;
utime = cputime_add(utime, sig->utime);
stime = cputime_add(stime, sig->stime);
thread_group_cputime(task, &cputime);
utime = cputime.utime;
stime = cputime.stime;
gtime = cputime_add(gtime, sig->gtime);
}

Expand Down
14 changes: 10 additions & 4 deletions include/linux/clocksource.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,8 @@ struct clocksource;
* @read: returns a cycle value
* @mask: bitmask for two's complement
* subtraction of non 64 bit counters
* @mult: cycle to nanosecond multiplier
* @mult: cycle to nanosecond multiplier (adjusted by NTP)
* @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
* @shift: cycle to nanosecond divisor (power of two)
* @flags: flags describing special properties
* @vread: vsyscall based read
Expand All @@ -63,6 +64,7 @@ struct clocksource {
cycle_t (*read)(void);
cycle_t mask;
u32 mult;
u32 mult_orig;
u32 shift;
unsigned long flags;
cycle_t (*vread)(void);
Expand All @@ -77,6 +79,7 @@ struct clocksource {
/* timekeeping specific data, ignore */
cycle_t cycle_interval;
u64 xtime_interval;
u32 raw_interval;
/*
* Second part is written at each timer interrupt
* Keep it in a different cache line to dirty no
Expand All @@ -85,6 +88,7 @@ struct clocksource {
cycle_t cycle_last ____cacheline_aligned_in_smp;
u64 xtime_nsec;
s64 error;
struct timespec raw_time;

#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
Expand Down Expand Up @@ -201,17 +205,19 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
{
u64 tmp;

/* XXX - All of this could use a whole lot of optimization */
/* Do the ns -> cycle conversion first, using original mult */
tmp = length_nsec;
tmp <<= c->shift;
tmp += c->mult/2;
do_div(tmp, c->mult);
tmp += c->mult_orig/2;
do_div(tmp, c->mult_orig);

c->cycle_interval = (cycle_t)tmp;
if (c->cycle_interval == 0)
c->cycle_interval = 1;

/* Go back from cycles -> shifted ns, this time use ntp adjused mult */
c->xtime_interval = (u64)c->cycle_interval * c->mult;
c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
}


Expand Down
10 changes: 2 additions & 8 deletions include/linux/hrtimer.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,12 +125,12 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
unsigned long state;
enum hrtimer_cb_mode cb_mode;
struct list_head cb_entry;
enum hrtimer_cb_mode cb_mode;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
char start_comm[16];
int start_pid;
#endif
};

Expand All @@ -155,10 +155,8 @@ struct hrtimer_sleeper {
* @first: pointer to the timer node which expires first
* @resolution: the resolution of the clock, in nanoseconds
* @get_time: function to retrieve the current time of the clock
* @get_softirq_time: function to retrieve the current time from the softirq
* @softirq_time: the time when running the hrtimer queue in the softirq
* @offset: offset of this clock to the monotonic base
* @reprogram: function to reprogram the timer event
*/
struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
Expand All @@ -167,13 +165,9 @@ struct hrtimer_clock_base {
struct rb_node *first;
ktime_t resolution;
ktime_t (*get_time)(void);
ktime_t (*get_softirq_time)(void);
ktime_t softirq_time;
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t offset;
int (*reprogram)(struct hrtimer *t,
struct hrtimer_clock_base *b,
ktime_t n);
#endif
};

Expand Down
1 change: 1 addition & 0 deletions include/linux/kernel_stat.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ static inline int kstat_irqs(int irq)
return sum;
}

extern unsigned long long task_delta_exec(struct task_struct *);
extern void account_user_time(struct task_struct *, cputime_t);
extern void account_user_time_scaled(struct task_struct *, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t);
Expand Down
4 changes: 2 additions & 2 deletions include/linux/posix-timers.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,6 @@ struct k_itimer {
int it_requeue_pending; /* waiting to requeue this timer */
#define REQUEUE_PENDING 1
int it_sigev_notify; /* notify word of sigevent struct */
int it_sigev_signo; /* signo word of sigevent struct */
sigval_t it_sigev_value; /* value word of sigevent struct */
struct task_struct *it_process; /* process to send signal to */
struct sigqueue *sigq; /* signal queue entry. */
union {
Expand Down Expand Up @@ -115,4 +113,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,

long clock_nanosleep_restart(struct restart_block *restart_block);

void update_rlimit_cpu(unsigned long rlim_new);

#endif
84 changes: 71 additions & 13 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -434,6 +434,39 @@ struct pacct_struct {
unsigned long ac_minflt, ac_majflt;
};

/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are
* tracked for threads and thread groups. Most things considering
* CPU time want to group these counts together and treat all three
* of them in parallel.
*/
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp stime
#define virt_exp utime
#define sched_exp sum_exec_runtime

/**
* struct thread_group_cputime - thread group interval timer counts
* @totals: thread group interval timers; substructure for
* uniprocessor kernel, per-cpu for SMP kernel.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU clock calculations.
*/
struct thread_group_cputime {
struct task_cputime *totals;
};

/*
* NOTE! "signal_struct" does not have it's own
* locking, because a shared signal_struct always
Expand Down Expand Up @@ -479,6 +512,17 @@ struct signal_struct {
cputime_t it_prof_expires, it_virt_expires;
cputime_t it_prof_incr, it_virt_incr;

/*
* Thread group totals for process CPU clocks.
* See thread_group_cputime(), et al, for details.
*/
struct thread_group_cputime cputime;

/* Earliest-expiration cache. */
struct task_cputime cputime_expires;

struct list_head cpu_timers[3];

/* job control IDs */

/*
Expand Down Expand Up @@ -509,22 +553,14 @@ struct signal_struct {
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
cputime_t utime, stime, cutime, cstime;
cputime_t cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
struct task_io_accounting ioac;

/*
* Cumulative ns of scheduled CPU time for dead threads in the
* group, not including a zombie group leader. (This only differs
* from jiffies_to_ns(utime + stime) if sched_clock uses something
* other than jiffies.)
*/
unsigned long long sum_sched_runtime;

/*
* We don't bother to synchronize most readers of this at all,
* because there is no reader checking a limit that actually needs
Expand All @@ -536,8 +572,6 @@ struct signal_struct {
*/
struct rlimit rlim[RLIM_NLIMITS];

struct list_head cpu_timers[3];

/* keep the process-shared keyrings here so that they do the right
* thing in threads created with CLONE_THREAD */
#ifdef CONFIG_KEYS
Expand Down Expand Up @@ -1146,8 +1180,7 @@ struct task_struct {
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;

cputime_t it_prof_expires, it_virt_expires;
unsigned long long it_sched_expires;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];

/* process credentials */
Expand Down Expand Up @@ -1597,6 +1630,7 @@ extern unsigned long long cpu_clock(int cpu);

extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);

/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
Expand Down Expand Up @@ -2093,6 +2127,30 @@ static inline int spin_needbreak(spinlock_t *lock)
#endif
}

/*
* Thread group CPU time accounting.
*/

extern int thread_group_cputime_alloc(struct task_struct *);
extern void thread_group_cputime(struct task_struct *, struct task_cputime *);

static inline void thread_group_cputime_init(struct signal_struct *sig)
{
sig->cputime.totals = NULL;
}

static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
{
if (curr->signal->cputime.totals)
return 0;
return thread_group_cputime_alloc(curr);
}

static inline void thread_group_cputime_free(struct signal_struct *sig)
{
free_percpu(sig->cputime.totals);
}

/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
Expand Down
7 changes: 3 additions & 4 deletions include/linux/tick.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,36 +96,35 @@ extern cpumask_t *tick_get_broadcast_oneshot_mask(void);
extern void tick_clock_notify(void);
extern int tick_check_oneshot_change(int allow_nohz);
extern struct tick_sched *tick_get_tick_sched(int cpu);
extern void tick_check_idle(int cpu);
# else
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_check_idle(int cpu) { }
# endif

#else /* CONFIG_GENERIC_CLOCKEVENTS */
static inline void tick_init(void) { }
static inline void tick_cancel_sched_timer(int cpu) { }
static inline void tick_clock_notify(void) { }
static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
static inline void tick_check_idle(int cpu) { }
#endif /* !CONFIG_GENERIC_CLOCKEVENTS */

# ifdef CONFIG_NO_HZ
extern void tick_nohz_stop_sched_tick(int inidle);
extern void tick_nohz_restart_sched_tick(void);
extern void tick_nohz_update_jiffies(void);
extern ktime_t tick_nohz_get_sleep_length(void);
extern void tick_nohz_stop_idle(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
# else
static inline void tick_nohz_stop_sched_tick(int inidle) { }
static inline void tick_nohz_restart_sched_tick(void) { }
static inline void tick_nohz_update_jiffies(void) { }
static inline ktime_t tick_nohz_get_sleep_length(void)
{
ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };

return len;
}
static inline void tick_nohz_stop_idle(int cpu) { }
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !NO_HZ */

Expand Down
Loading

0 comments on commit 99ebcf8

Please sign in to comment.