Skip to content

Commit

Permalink
cpumask: use mm_cpumask() wrapper: x86
Browse files Browse the repository at this point in the history
Makes code futureproof against the impending change to mm->cpu_vm_mask (to be a pointer).

It's also a chance to use the new cpumask_ ops which take a pointer
(the older ones are deprecated, but there's no hurry for arch code).

Signed-off-by: Rusty Russell <[email protected]>
  • Loading branch information
rustyrussell committed Sep 24, 2009
1 parent fa40699 commit 78f1c4d
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 14 deletions.
6 changes: 3 additions & 3 deletions arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,

if (likely(prev != next)) {
/* stop flush ipis for the previous mm */
cpu_clear(cpu, prev->cpu_vm_mask);
cpumask_clear_cpu(cpu, mm_cpumask(prev));
#ifdef CONFIG_SMP
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
percpu_write(cpu_tlbstate.active_mm, next);
#endif
cpu_set(cpu, next->cpu_vm_mask);
cpumask_set_cpu(cpu, mm_cpumask(next));

/* Re-load page tables */
load_cr3(next->pgd);
Expand All @@ -58,7 +58,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);

if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
/* We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/ldt.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
#ifdef CONFIG_SMP
preempt_disable();
load_LDT(pc);
if (!cpus_equal(current->mm->cpu_vm_mask,
cpumask_of_cpu(smp_processor_id())))
if (!cpumask_equal(mm_cpumask(current->mm),
cpumask_of(smp_processor_id())))
smp_call_function(flush_ldt, current->mm, 1);
preempt_enable();
#else
Expand Down
15 changes: 8 additions & 7 deletions arch/x86/mm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,8 @@ void leave_mm(int cpu)
{
if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
BUG();
cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
cpumask_clear_cpu(cpu,
mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
load_cr3(swapper_pg_dir);
}
EXPORT_SYMBOL_GPL(leave_mm);
Expand Down Expand Up @@ -234,8 +235,8 @@ void flush_tlb_current_task(void)
preempt_disable();

local_flush_tlb();
if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);
preempt_enable();
}

Expand All @@ -249,8 +250,8 @@ void flush_tlb_mm(struct mm_struct *mm)
else
leave_mm(smp_processor_id());
}
if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, TLB_FLUSH_ALL);

preempt_enable();
}
Expand All @@ -268,8 +269,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
leave_mm(smp_processor_id());
}

if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(&mm->cpu_vm_mask, mm, va);
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, va);

preempt_enable();
}
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/xen/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1165,14 +1165,14 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
/* Get the "official" set of cpus referring to our pagetable. */
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
for_each_online_cpu(cpu) {
if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
continue;
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
}
return;
}
cpumask_copy(mask, &mm->cpu_vm_mask);
cpumask_copy(mask, mm_cpumask(mm));

/* It's possible that a vcpu may have a stale reference to our
cr3, because its in lazy mode, and it hasn't yet flushed
Expand Down

0 comments on commit 78f1c4d

Please sign in to comment.