Skip to content

Commit

Permalink
sched: clean up schedstats, cnt -> count
Browse files Browse the repository at this point in the history
rename all 'cnt' fields and variables to the less yucky 'count' name.

yuckage noticed by Andrew Morton.

no change in code, other than the /proc/sched_debug bkl_count string got
a bit larger:

   text    data     bss     dec     hex filename
  38236    3506      24   41766    a326 sched.o.before
  38240    3506      24   41770    a32a sched.o.after

Signed-off-by: Ingo Molnar <[email protected]>
Reviewed-by: Thomas Gleixner <[email protected]>
  • Loading branch information
Ingo Molnar committed Oct 15, 2007
1 parent 2b1e315 commit 2d72376
Show file tree
Hide file tree
Showing 6 changed files with 36 additions and 36 deletions.
2 changes: 1 addition & 1 deletion fs/proc/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ static int proc_pid_schedstat(struct task_struct *task, char *buffer)
return sprintf(buffer, "%llu %llu %lu\n",
task->sched_info.cpu_time,
task->sched_info.run_delay,
task->sched_info.pcnt);
task->sched_info.pcount);
}
#endif

Expand Down
12 changes: 6 additions & 6 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -614,7 +614,7 @@ struct reclaim_state;
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
struct sched_info {
/* cumulative counters */
unsigned long pcnt; /* # of times run on this cpu */
unsigned long pcount; /* # of times run on this cpu */
unsigned long long cpu_time, /* time spent on the cpu */
run_delay; /* time spent waiting on a runqueue */

Expand All @@ -623,7 +623,7 @@ struct sched_info {
last_queued; /* when we were last queued to run */
#ifdef CONFIG_SCHEDSTATS
/* BKL stats */
unsigned long bkl_cnt;
unsigned long bkl_count;
#endif
};
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
Expand Down Expand Up @@ -759,7 +759,7 @@ struct sched_domain {

#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned long lb_cnt[CPU_MAX_IDLE_TYPES];
unsigned long lb_count[CPU_MAX_IDLE_TYPES];
unsigned long lb_failed[CPU_MAX_IDLE_TYPES];
unsigned long lb_balanced[CPU_MAX_IDLE_TYPES];
unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES];
Expand All @@ -769,17 +769,17 @@ struct sched_domain {
unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES];

/* Active load balancing */
unsigned long alb_cnt;
unsigned long alb_count;
unsigned long alb_failed;
unsigned long alb_pushed;

/* SD_BALANCE_EXEC stats */
unsigned long sbe_cnt;
unsigned long sbe_count;
unsigned long sbe_balanced;
unsigned long sbe_pushed;

/* SD_BALANCE_FORK stats */
unsigned long sbf_cnt;
unsigned long sbf_count;
unsigned long sbf_balanced;
unsigned long sbf_pushed;

Expand Down
2 changes: 1 addition & 1 deletion kernel/delayacct.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
* No locking available for sched_info (and too expensive to add one)
* Mitigate by taking snapshot of values
*/
t1 = tsk->sched_info.pcnt;
t1 = tsk->sched_info.pcount;
t2 = tsk->sched_info.run_delay;
t3 = tsk->sched_info.cpu_time;

Expand Down
24 changes: 12 additions & 12 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -349,19 +349,19 @@ struct rq {
unsigned long yld_exp_empty;
unsigned long yld_act_empty;
unsigned long yld_both_empty;
unsigned long yld_cnt;
unsigned long yld_count;

/* schedule() stats */
unsigned long sched_switch;
unsigned long sched_cnt;
unsigned long sched_count;
unsigned long sched_goidle;

/* try_to_wake_up() stats */
unsigned long ttwu_cnt;
unsigned long ttwu_count;
unsigned long ttwu_local;

/* BKL stats */
unsigned long bkl_cnt;
unsigned long bkl_count;
#endif
struct lock_class_key rq_lock_key;
};
Expand Down Expand Up @@ -1481,7 +1481,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)

new_cpu = cpu;

schedstat_inc(rq, ttwu_cnt);
schedstat_inc(rq, ttwu_count);
if (cpu == this_cpu) {
schedstat_inc(rq, ttwu_local);
goto out_set_cpu;
Expand Down Expand Up @@ -2637,7 +2637,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
sd_idle = 1;

schedstat_inc(sd, lb_cnt[idle]);
schedstat_inc(sd, lb_count[idle]);

redo:
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
Expand Down Expand Up @@ -2790,7 +2790,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
sd_idle = 1;

schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
&sd_idle, &cpus, NULL);
Expand Down Expand Up @@ -2924,7 +2924,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
}

if (likely(sd)) {
schedstat_inc(sd, alb_cnt);
schedstat_inc(sd, alb_count);

if (move_one_task(target_rq, target_cpu, busiest_rq,
sd, CPU_IDLE))
Expand Down Expand Up @@ -3414,11 +3414,11 @@ static inline void schedule_debug(struct task_struct *prev)

profile_hit(SCHED_PROFILING, __builtin_return_address(0));

schedstat_inc(this_rq(), sched_cnt);
schedstat_inc(this_rq(), sched_count);
#ifdef CONFIG_SCHEDSTATS
if (unlikely(prev->lock_depth >= 0)) {
schedstat_inc(this_rq(), bkl_cnt);
schedstat_inc(prev, sched_info.bkl_cnt);
schedstat_inc(this_rq(), bkl_count);
schedstat_inc(prev, sched_info.bkl_count);
}
#endif
}
Expand Down Expand Up @@ -4558,7 +4558,7 @@ asmlinkage long sys_sched_yield(void)
{
struct rq *rq = this_rq_lock();

schedstat_inc(rq, yld_cnt);
schedstat_inc(rq, yld_count);
current->sched_class->yield_task(rq);

/*
Expand Down
8 changes: 4 additions & 4 deletions kernel/sched_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
#ifdef CONFIG_SCHEDSTATS
SEQ_printf(m, " .%-30s: %ld\n", "bkl_cnt",
rq->bkl_cnt);
SEQ_printf(m, " .%-30s: %ld\n", "bkl_count",
rq->bkl_count);
#endif
SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
cfs_rq->nr_spread_over);
Expand Down Expand Up @@ -342,7 +342,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.exec_max);
PN(se.slice_max);
PN(se.wait_max);
P(sched_info.bkl_cnt);
P(sched_info.bkl_count);
#endif
SEQ_printf(m, "%-25s:%20Ld\n",
"nr_switches", (long long)(p->nvcsw + p->nivcsw));
Expand Down Expand Up @@ -370,7 +370,7 @@ void proc_sched_set_task(struct task_struct *p)
p->se.exec_max = 0;
p->se.slice_max = 0;
p->se.wait_max = 0;
p->sched_info.bkl_cnt = 0;
p->sched_info.bkl_count = 0;
#endif
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
Expand Down
24 changes: 12 additions & 12 deletions kernel/sched_stats.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,18 @@ static int show_schedstat(struct seq_file *seq, void *v)
struct rq *rq = cpu_rq(cpu);
#ifdef CONFIG_SMP
struct sched_domain *sd;
int dcnt = 0;
int dcount = 0;
#endif

/* runqueue-specific stats */
seq_printf(seq,
"cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu",
cpu, rq->yld_both_empty,
rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
rq->ttwu_cnt, rq->ttwu_local,
rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
rq->sched_switch, rq->sched_count, rq->sched_goidle,
rq->ttwu_count, rq->ttwu_local,
rq->rq_sched_info.cpu_time,
rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);

seq_printf(seq, "\n");

Expand All @@ -39,12 +39,12 @@ static int show_schedstat(struct seq_file *seq, void *v)
char mask_str[NR_CPUS];

cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
seq_printf(seq, "domain%d %s", dcnt++, mask_str);
seq_printf(seq, "domain%d %s", dcount++, mask_str);
for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
itype++) {
seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
"%lu",
sd->lb_cnt[itype],
sd->lb_count[itype],
sd->lb_balanced[itype],
sd->lb_failed[itype],
sd->lb_imbalance[itype],
Expand All @@ -55,9 +55,9 @@ static int show_schedstat(struct seq_file *seq, void *v)
}
seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
" %lu %lu %lu\n",
sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
sd->alb_count, sd->alb_failed, sd->alb_pushed,
sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
sd->ttwu_wake_remote, sd->ttwu_move_affine,
sd->ttwu_move_balance);
}
Expand Down Expand Up @@ -101,7 +101,7 @@ rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
{
if (rq) {
rq->rq_sched_info.run_delay += delta;
rq->rq_sched_info.pcnt++;
rq->rq_sched_info.pcount++;
}
}

Expand Down Expand Up @@ -164,7 +164,7 @@ static void sched_info_arrive(struct task_struct *t)
sched_info_dequeued(t);
t->sched_info.run_delay += delta;
t->sched_info.last_arrival = now;
t->sched_info.pcnt++;
t->sched_info.pcount++;

rq_sched_info_arrive(task_rq(t), delta);
}
Expand Down

0 comments on commit 2d72376

Please sign in to comment.