Skip to content

Commit

Permalink
mm: add new mmgrab() helper
Browse files Browse the repository at this point in the history
Apart from adding the helper function itself, the rest of the kernel is
converted mechanically using:

  git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)->mm_count);/mmgrab\(\1\);/'
  git grep -l 'atomic_inc.*mm_count' | xargs sed -i 's/atomic_inc(&\(.*\)\.mm_count);/mmgrab\(\&\1\);/'

This is needed for a later patch that hooks into the helper, but might
be a worthwhile cleanup on its own.

(Michal Hocko provided most of the kerneldoc comment.)

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Vegard Nossum <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Acked-by: Peter Zijlstra (Intel) <[email protected]>
Acked-by: David Rientjes <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
vegard authored and torvalds committed Feb 28, 2017
1 parent 522b837 commit f1f1007
Show file tree
Hide file tree
Showing 40 changed files with 65 additions and 43 deletions.
2 changes: 1 addition & 1 deletion arch/alpha/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ smp_callin(void)
alpha_mv.smp_callin();

/* All kernel threads share the same mm context. */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;

/* inform the notifiers about the new cpu */
Expand Down
2 changes: 1 addition & 1 deletion arch/arc/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ void start_kernel_secondary(void)
setup_processor();

atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ asmlinkage void secondary_start_kernel(void)
* reference and switch to it.
*/
cpu = smp_processor_id();
atomic_inc(&mm->mm_count);
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));

Expand Down
2 changes: 1 addition & 1 deletion arch/arm64/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ asmlinkage void secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
atomic_inc(&mm->mm_count);
mmgrab(mm);
current->active_mm = mm;

/*
Expand Down
2 changes: 1 addition & 1 deletion arch/blackfin/mach-common/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ void secondary_start_kernel(void)

/* Attach the new idle task to the global mm. */
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
mmgrab(mm);
current->active_mm = mm;

preempt_disable();
Expand Down
2 changes: 1 addition & 1 deletion arch/hexagon/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ void start_secondary(void)
);

/* Set the memory struct */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;

cpu = smp_processor_id();
Expand Down
2 changes: 1 addition & 1 deletion arch/ia64/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -994,7 +994,7 @@ cpu_init (void)
*/
ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);

Expand Down
2 changes: 1 addition & 1 deletion arch/m32r/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,7 @@ void __init cpu_init (void)
printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);

/* Set up and load the per-CPU TSS and LDT */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;
if (current->mm)
BUG();
Expand Down
2 changes: 1 addition & 1 deletion arch/metag/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ asmlinkage void secondary_start_kernel(void)
* reference and switch to it.
*/
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
enter_lazy_tlb(mm, current);
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -2232,7 +2232,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = asid_first_version(cpu);

atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
Expand Down
2 changes: 1 addition & 1 deletion arch/mn10300/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,7 @@ static void __init smp_cpu_init(void)
}
printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);

atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);

Expand Down
2 changes: 1 addition & 1 deletion arch/parisc/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ smp_cpu_init(int cpunum)
set_cpu_online(cpunum, true);

/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -707,7 +707,7 @@ void start_secondary(void *unused)
unsigned int cpu = smp_processor_id();
int i, base;

atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;

smp_store_cpu_info(cpu);
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/kernel/processor.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ void cpu_init(void)
get_cpu_id(id);
if (machine_has_cpu_mhz)
update_cpu_mhz(NULL);
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
Expand Down
2 changes: 1 addition & 1 deletion arch/score/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ void __init trap_init(void)
set_except_vector(18, handle_dbe);
flush_icache_range(DEBUG_VECTOR_BASE_ADDR, IRQ_VECTOR_BASE_ADDR);

atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;
cpu_cache_init();
}
2 changes: 1 addition & 1 deletion arch/sh/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ asmlinkage void start_secondary(void)
struct mm_struct *mm = &init_mm;

enable_mmu();
atomic_inc(&mm->mm_count);
mmgrab(mm);
atomic_inc(&mm->mm_users);
current->active_mm = mm;
#ifdef CONFIG_MMU
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc/kernel/leon_smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ void leon_cpu_pre_online(void *arg)
: "memory" /* paranoid */);

/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;

while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc/kernel/smp_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ void smp_callin(void)
current_thread_info()->new_child = 0;

/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;

/* inform the notifiers about the new cpu */
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc/kernel/sun4d_smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ void sun4d_cpu_pre_online(void *arg)
show_leds(cpuid);

/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;

local_ops->cache_all();
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc/kernel/sun4m_smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ void sun4m_cpu_pre_online(void *arg)
: "memory" /* paranoid */);

/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;

while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc/kernel/traps_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ void trap_init(void)
thread_info_offsets_are_bolixed_pete();

/* Attach to the address space of init_task. */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;

/* NOTE: Other cpus have this done as they are started
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc/kernel/traps_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -2837,6 +2837,6 @@ void __init trap_init(void)
/* Attach to the address space of init_task. On SMP we
* do this in smp.c:smp_callin for other cpus.
*/
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;
}
2 changes: 1 addition & 1 deletion arch/tile/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ static void start_secondary(void)
__this_cpu_write(current_asid, min_asid);

/* Set up this thread as another owner of the init_mm */
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
current->active_mm = &init_mm;
if (current->mm)
BUG();
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1510,7 +1510,7 @@ void cpu_init(void)
for (i = 0; i <= IO_BITMAP_LONGS; i++)
t->io_bitmap[i] = ~0UL;

atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
me->active_mm = &init_mm;
BUG_ON(me->mm);
enter_lazy_tlb(&init_mm, me);
Expand Down Expand Up @@ -1561,7 +1561,7 @@ void cpu_init(void)
/*
* Set up and load the per-CPU TSS and LDT
*/
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
curr->active_mm = &init_mm;
BUG_ON(curr->mm);
enter_lazy_tlb(&init_mm, curr);
Expand Down
2 changes: 1 addition & 1 deletion arch/xtensa/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ void secondary_start_kernel(void)
/* All kernel threads share the same mm context. */

atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
mmgrab(mm);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
enter_lazy_tlb(mm, current);
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdkfd/kfd_process.c
Original file line number Diff line number Diff line change
Expand Up @@ -262,7 +262,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
* and because the mmu_notifier_unregister function also drop
* mm_count we need to take an extra count here.
*/
atomic_inc(&p->mm->mm_count);
mmgrab(p->mm);
mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_gem_userptr.c
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
mm->i915 = to_i915(obj->base.dev);

mm->mm = current->mm;
atomic_inc(&current->mm->mm_count);
mmgrab(current->mm);

mm->mn = NULL;

Expand Down
2 changes: 1 addition & 1 deletion drivers/infiniband/hw/hfi1/file_ops.c
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
if (fd) {
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
atomic_inc(&fd->mm->mm_count);
mmgrab(fd->mm);
fp->private_data = fd;
} else {
fp->private_data = NULL;
Expand Down
4 changes: 2 additions & 2 deletions fs/proc/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)

if (!IS_ERR_OR_NULL(mm)) {
/* ensure this mm_struct can't be freed */
atomic_inc(&mm->mm_count);
mmgrab(mm);
/* but do not pin its memory */
mmput(mm);
}
Expand Down Expand Up @@ -1064,7 +1064,7 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
if (p) {
if (atomic_read(&p->mm->mm_users) > 1) {
mm = p->mm;
atomic_inc(&mm->mm_count);
mmgrab(mm);
}
task_unlock(p);
}
Expand Down
2 changes: 1 addition & 1 deletion fs/userfaultfd.c
Original file line number Diff line number Diff line change
Expand Up @@ -1847,7 +1847,7 @@ static struct file *userfaultfd_file_create(int flags)
ctx->released = false;
ctx->mm = current->mm;
/* prevent the mm struct to be freed */
atomic_inc(&ctx->mm->mm_count);
mmgrab(ctx->mm);

file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx,
O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
Expand Down
22 changes: 22 additions & 0 deletions include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -2904,6 +2904,28 @@ static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
*/
extern struct mm_struct * mm_alloc(void);

/**
* mmgrab() - Pin a &struct mm_struct.
* @mm: The &struct mm_struct to pin.
*
* Make sure that @mm will not get freed even after the owning task
* exits. This doesn't guarantee that the associated address space
* will still exist later on and mmget_not_zero() has to be used before
* accessing it.
*
* This is a preferred way to to pin @mm for a longer/unbounded amount
* of time.
*
* Use mmdrop() to release the reference acquired by mmgrab().
*
* See also <Documentation/vm/active_mm.txt> for an in-depth explanation
* of &mm_struct.mm_count vs &mm_struct.mm_users.
*/
static inline void mmgrab(struct mm_struct *mm)
{
atomic_inc(&mm->mm_count);
}

/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
static inline void mmdrop(struct mm_struct *mm)
Expand Down
2 changes: 1 addition & 1 deletion kernel/exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ static void exit_mm(void)
__set_current_state(TASK_RUNNING);
down_read(&mm->mmap_sem);
}
atomic_inc(&mm->mm_count);
mmgrab(mm);
BUG_ON(mm != current->active_mm);
/* more a memory barrier than a real lock */
task_lock(current);
Expand Down
2 changes: 1 addition & 1 deletion kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -338,7 +338,7 @@ static inline bool should_fail_futex(bool fshared)

static inline void futex_get_mm(union futex_key *key)
{
atomic_inc(&key->private.mm->mm_count);
mmgrab(key->private.mm);
/*
* Ensure futex_get_mm() implies a full barrier such that
* get_futex_key() implies a full barrier. This is relied upon
Expand Down
4 changes: 2 additions & 2 deletions kernel/sched/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2847,7 +2847,7 @@ context_switch(struct rq *rq, struct task_struct *prev,

if (!mm) {
next->active_mm = oldmm;
atomic_inc(&oldmm->mm_count);
mmgrab(oldmm);
enter_lazy_tlb(oldmm, next);
} else
switch_mm_irqs_off(oldmm, mm, next);
Expand Down Expand Up @@ -6098,7 +6098,7 @@ void __init sched_init(void)
/*
* The boot idle thread does lazy MMU switching as well:
*/
atomic_inc(&init_mm.mm_count);
mmgrab(&init_mm);
enter_lazy_tlb(&init_mm, current);

/*
Expand Down
2 changes: 1 addition & 1 deletion mm/khugepaged.c
Original file line number Diff line number Diff line change
Expand Up @@ -420,7 +420,7 @@ int __khugepaged_enter(struct mm_struct *mm)
list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
spin_unlock(&khugepaged_mm_lock);

atomic_inc(&mm->mm_count);
mmgrab(mm);
if (wakeup)
wake_up_interruptible(&khugepaged_wait);

Expand Down
2 changes: 1 addition & 1 deletion mm/ksm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1854,7 +1854,7 @@ int __ksm_enter(struct mm_struct *mm)
spin_unlock(&ksm_mmlist_lock);

set_bit(MMF_VM_MERGEABLE, &mm->flags);
atomic_inc(&mm->mm_count);
mmgrab(mm);

if (needs_wakeup)
wake_up_interruptible(&ksm_thread_wait);
Expand Down
2 changes: 1 addition & 1 deletion mm/mmu_context.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ void use_mm(struct mm_struct *mm)
task_lock(tsk);
active_mm = tsk->active_mm;
if (active_mm != mm) {
atomic_inc(&mm->mm_count);
mmgrab(mm);
tsk->active_mm = mm;
}
tsk->mm = mm;
Expand Down
2 changes: 1 addition & 1 deletion mm/mmu_notifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
mm->mmu_notifier_mm = mmu_notifier_mm;
mmu_notifier_mm = NULL;
}
atomic_inc(&mm->mm_count);
mmgrab(mm);

/*
* Serialize the update against mmu_notifier_unregister. A
Expand Down
Loading

0 comments on commit f1f1007

Please sign in to comment.