Skip to content

Commit

Permalink
cpumask: Replace cpu_coregroup_map with cpu_coregroup_mask
Browse files Browse the repository at this point in the history
cpu_coregroup_map returned a cpumask_t: it's going away.

(Note, the sched part of this patch won't apply meaningfully to the
sched tree, but I'm posting it to show the goal).

Signed-off-by: Rusty Russell <[email protected]>
Signed-off-by: Mike Travis <[email protected]>
Cc: Jens Axboe <[email protected]>
Cc: Ingo Molnar <[email protected]>
  • Loading branch information
rustyrussell committed Dec 26, 2008
1 parent 9be3eec commit be4d638
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 5 deletions.
4 changes: 2 additions & 2 deletions block/blk.h
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,8 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
static inline int blk_cpu_to_group(int cpu)
{
#ifdef CONFIG_SCHED_MC
cpumask_t mask = cpu_coregroup_map(cpu);
return first_cpu(mask);
const struct cpumask *mask = cpu_coregroup_mask(cpu);
return cpumask_first(mask);
#elif defined(CONFIG_SCHED_SMT)
return first_cpu(per_cpu(cpu_sibling_map, cpu));
#else
Expand Down
6 changes: 3 additions & 3 deletions kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -7119,7 +7119,7 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
{
int group;
#ifdef CONFIG_SCHED_MC
*mask = cpu_coregroup_map(cpu);
*mask = *cpu_coregroup_mask(cpu);
cpus_and(*mask, *mask, *cpu_map);
group = first_cpu(*mask);
#elif defined(CONFIG_SCHED_SMT)
Expand Down Expand Up @@ -7485,7 +7485,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
sd = &per_cpu(core_domains, i);
SD_INIT(sd, MC);
set_domain_attribute(sd, attr);
sd->span = cpu_coregroup_map(i);
sd->span = *cpu_coregroup_mask(i);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
Expand Down Expand Up @@ -7528,7 +7528,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
SCHED_CPUMASK_VAR(this_core_map, allmasks);
SCHED_CPUMASK_VAR(send_covered, allmasks);

*this_core_map = cpu_coregroup_map(i);
*this_core_map = *cpu_coregroup_mask(i);
cpus_and(*this_core_map, *this_core_map, *cpu_map);
if (i != first_cpu(*this_core_map))
continue;
Expand Down

0 comments on commit be4d638

Please sign in to comment.