Skip to content

Commit

Permalink
kmemleak: Handle percpu memory allocation
Browse files Browse the repository at this point in the history
This patch adds kmemleak callbacks from the percpu allocator, reducing a
number of false positives caused by kmemleak not scanning such memory
blocks. The percpu chunks are never reported as leaks because of current
kmemleak limitations with the __percpu pointer not pointing directly to
the actual chunks.

Reported-by: Huajun Li <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
Acked-by: Tejun Heo <[email protected]>
Signed-off-by: Catalin Marinas <[email protected]>
  • Loading branch information
ctmarinas committed Dec 2, 2011
1 parent 7434170 commit f528f0b
Show file tree
Hide file tree
Showing 4 changed files with 94 additions and 1 deletion.
3 changes: 3 additions & 0 deletions Documentation/kmemleak.txt
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,10 @@ See the include/linux/kmemleak.h header for the functions prototype.

kmemleak_init - initialize kmemleak
kmemleak_alloc - notify of a memory block allocation
kmemleak_alloc_percpu - notify of a percpu memory block allocation
kmemleak_free - notify of a memory block freeing
kmemleak_free_part - notify of a partial memory block freeing
kmemleak_free_percpu - notify of a percpu memory block freeing
kmemleak_not_leak - mark an object as not a leak
kmemleak_ignore - do not scan or report an object as leak
kmemleak_scan_area - add scan areas inside a memory block
Expand Down
8 changes: 8 additions & 0 deletions include/linux/kmemleak.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,10 @@
extern void kmemleak_init(void) __ref;
extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
gfp_t gfp) __ref;
extern void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size) __ref;
extern void kmemleak_free(const void *ptr) __ref;
extern void kmemleak_free_part(const void *ptr, size_t size) __ref;
extern void kmemleak_free_percpu(const void __percpu *ptr) __ref;
extern void kmemleak_padding(const void *ptr, unsigned long offset,
size_t size) __ref;
extern void kmemleak_not_leak(const void *ptr) __ref;
Expand Down Expand Up @@ -68,6 +70,9 @@ static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
gfp_t gfp)
{
}
static inline void kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
{
}
static inline void kmemleak_free(const void *ptr)
{
}
Expand All @@ -77,6 +82,9 @@ static inline void kmemleak_free_part(const void *ptr, size_t size)
static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
{
}
static inline void kmemleak_free_percpu(const void __percpu *ptr)
{
}
static inline void kmemleak_not_leak(const void *ptr)
{
}
Expand Down
72 changes: 72 additions & 0 deletions mm/kmemleak.c
Original file line number Diff line number Diff line change
Expand Up @@ -230,8 +230,10 @@ static int kmemleak_skip_disable;
/* kmemleak operation type for early logging */
enum {
KMEMLEAK_ALLOC,
KMEMLEAK_ALLOC_PERCPU,
KMEMLEAK_FREE,
KMEMLEAK_FREE_PART,
KMEMLEAK_FREE_PERCPU,
KMEMLEAK_NOT_LEAK,
KMEMLEAK_IGNORE,
KMEMLEAK_SCAN_AREA,
Expand Down Expand Up @@ -852,6 +854,20 @@ static void early_alloc(struct early_log *log)
rcu_read_unlock();
}

/*
* Log an early allocated block and populate the stack trace.
*/
static void early_alloc_percpu(struct early_log *log)
{
unsigned int cpu;
const void __percpu *ptr = log->ptr;

for_each_possible_cpu(cpu) {
log->ptr = per_cpu_ptr(ptr, cpu);
early_alloc(log);
}
}

/**
* kmemleak_alloc - register a newly allocated object
* @ptr: pointer to beginning of the object
Expand All @@ -878,6 +894,34 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
}
EXPORT_SYMBOL_GPL(kmemleak_alloc);

/**
* kmemleak_alloc_percpu - register a newly allocated __percpu object
* @ptr: __percpu pointer to beginning of the object
* @size: size of the object
*
* This function is called from the kernel percpu allocator when a new object
* (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
* allocation.
*/
void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
{
unsigned int cpu;

pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);

/*
* Percpu allocations are only scanned and not reported as leaks
* (min_count is set to 0).
*/
if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
for_each_possible_cpu(cpu)
create_object((unsigned long)per_cpu_ptr(ptr, cpu),
size, 0, GFP_KERNEL);
else if (atomic_read(&kmemleak_early_log))
log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);

/**
* kmemleak_free - unregister a previously registered object
* @ptr: pointer to beginning of the object
Expand Down Expand Up @@ -916,6 +960,28 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
}
EXPORT_SYMBOL_GPL(kmemleak_free_part);

/**
* kmemleak_free_percpu - unregister a previously registered __percpu object
* @ptr: __percpu pointer to beginning of the object
*
* This function is called from the kernel percpu allocator when an object
* (memory block) is freed (free_percpu).
*/
void __ref kmemleak_free_percpu(const void __percpu *ptr)
{
unsigned int cpu;

pr_debug("%s(0x%p)\n", __func__, ptr);

if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
for_each_possible_cpu(cpu)
delete_object_full((unsigned long)per_cpu_ptr(ptr,
cpu));
else if (atomic_read(&kmemleak_early_log))
log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
}
EXPORT_SYMBOL_GPL(kmemleak_free_percpu);

/**
* kmemleak_not_leak - mark an allocated object as false positive
* @ptr: pointer to beginning of the object
Expand Down Expand Up @@ -1727,12 +1793,18 @@ void __init kmemleak_init(void)
case KMEMLEAK_ALLOC:
early_alloc(log);
break;
case KMEMLEAK_ALLOC_PERCPU:
early_alloc_percpu(log);
break;
case KMEMLEAK_FREE:
kmemleak_free(log->ptr);
break;
case KMEMLEAK_FREE_PART:
kmemleak_free_part(log->ptr, log->size);
break;
case KMEMLEAK_FREE_PERCPU:
kmemleak_free_percpu(log->ptr);
break;
case KMEMLEAK_NOT_LEAK:
kmemleak_not_leak(log->ptr);
break;
Expand Down
12 changes: 11 additions & 1 deletion mm/percpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/kmemleak.h>

#include <asm/cacheflush.h>
#include <asm/sections.h>
Expand Down Expand Up @@ -710,6 +711,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
const char *err;
int slot, off, new_alloc;
unsigned long flags;
void __percpu *ptr;

if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for "
Expand Down Expand Up @@ -802,7 +804,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
mutex_unlock(&pcpu_alloc_mutex);

/* return address relative to base address */
return __addr_to_pcpu_ptr(chunk->base_addr + off);
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
kmemleak_alloc_percpu(ptr, size);
return ptr;

fail_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
Expand Down Expand Up @@ -916,6 +920,8 @@ void free_percpu(void __percpu *ptr)
if (!ptr)
return;

kmemleak_free_percpu(ptr);

addr = __pcpu_ptr_to_addr(ptr);

spin_lock_irqsave(&pcpu_lock, flags);
Expand Down Expand Up @@ -1637,6 +1643,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
rc = -ENOMEM;
goto out_free_areas;
}
/* kmemleak tracks the percpu allocations separately */
kmemleak_free(ptr);
areas[group] = ptr;

base = min(ptr, base);
Expand Down Expand Up @@ -1751,6 +1759,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
"for cpu%u\n", psize_str, cpu);
goto enomem;
}
/* kmemleak tracks the percpu allocations separately */
kmemleak_free(ptr);
pages[j++] = virt_to_page(ptr);
}

Expand Down

0 comments on commit f528f0b

Please sign in to comment.