Skip to content

Commit

Permalink
mm: vmalloc: pass additional vm_flags to __vmalloc_node_range()
Browse files Browse the repository at this point in the history
For instrumenting global variables KASan will shadow memory backing memory
for modules.  So on module loading we will need to allocate memory for
shadow and map it at address in shadow that corresponds to the address
allocated in module_alloc().

__vmalloc_node_range() could be used for this purpose, except it puts a
guard hole after allocated area.  Guard hole in shadow memory should be a
problem because at some future point we might need to have a shadow memory
at address occupied by guard hole.  So we could fail to allocate shadow
for module_alloc().

Now we have VM_NO_GUARD flag disabling guard page, so we need to pass into
__vmalloc_node_range().  Add new parameter 'vm_flags' to
__vmalloc_node_range() function.

Signed-off-by: Andrey Ryabinin <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Konstantin Serebryany <[email protected]>
Cc: Dmitry Chernenkov <[email protected]>
Signed-off-by: Andrey Konovalov <[email protected]>
Cc: Yuri Gribov <[email protected]>
Cc: Konstantin Khlebnikov <[email protected]>
Cc: Sasha Levin <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Joonsoo Kim <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Andi Kleen <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
aryabinin authored and torvalds committed Feb 14, 2015
1 parent 71394fe commit cb9e3c2
Show file tree
Hide file tree
Showing 10 changed files with 18 additions and 14 deletions.
2 changes: 1 addition & 1 deletion arch/arm/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
__builtin_return_address(0));
GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
NUMA_NO_NODE, __builtin_return_address(0));
}

enum aarch64_reloc_op {
Expand Down
2 changes: 1 addition & 1 deletion arch/mips/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ static DEFINE_SPINLOCK(dbe_lock);
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
Expand Down
2 changes: 1 addition & 1 deletion arch/parisc/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ void *module_alloc(unsigned long size)
* init_data correctly */
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_HIGHMEM,
PAGE_KERNEL_RWX, NUMA_NO_NODE,
PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}

Expand Down
2 changes: 1 addition & 1 deletion arch/s390/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
Expand Down
2 changes: 1 addition & 1 deletion arch/sparc/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ static void *module_map(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#else
Expand Down
2 changes: 1 addition & 1 deletion arch/unicore32/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/module.c
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ void *module_alloc(unsigned long size)
return __vmalloc_node_range(size, 1,
MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
PAGE_KERNEL_EXEC, NUMA_NO_NODE,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}

Expand Down
4 changes: 3 additions & 1 deletion include/linux/vmalloc.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,9 @@ extern void *vmalloc_32_user(unsigned long size);
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, int node, const void *caller);
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller);

extern void vfree(const void *addr);

extern void *vmap(struct page **pages, unsigned int count,
Expand Down
10 changes: 6 additions & 4 deletions mm/vmalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1619,6 +1619,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
* @end: vm area range end
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
* @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
* @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address
*
Expand All @@ -1628,7 +1629,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
*/
void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, int node, const void *caller)
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller)
{
struct vm_struct *area;
void *addr;
Expand All @@ -1638,8 +1640,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail;

area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
start, end, node, gfp_mask, caller);
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
vm_flags, start, end, node, gfp_mask, caller);
if (!area)
goto fail;

Expand Down Expand Up @@ -1688,7 +1690,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
int node, const void *caller)
{
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
gfp_mask, prot, node, caller);
gfp_mask, prot, 0, node, caller);
}

void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
Expand Down

0 comments on commit cb9e3c2

Please sign in to comment.