Skip to content

Commit

Permalink
kprobes: allow to specify custom allocator for insn caches
Browse files Browse the repository at this point in the history
The current two insn slot caches both use module_alloc/module_free to
allocate and free insn slot cache pages.

For s390 this is not sufficient since there is the need to allocate insn
slots that are either within the vmalloc module area or within dma memory.

Therefore add a mechanism which allows to specify an own allocator for an
own insn slot cache.

Signed-off-by: Heiko Carstens <[email protected]>
Acked-by: Masami Hiramatsu <[email protected]>
Cc: Ananth N Mavinakayanahalli <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Martin Schwidefsky <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
heicarst authored and torvalds committed Sep 11, 2013
1 parent c802d64 commit af96397
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 2 deletions.
2 changes: 2 additions & 0 deletions include/linux/kprobes.h
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,8 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);

struct kprobe_insn_cache {
struct mutex mutex;
void *(*alloc)(void); /* allocate insn page */
void (*free)(void *); /* free insn page */
struct list_head pages; /* list of kprobe_insn_page */
size_t insn_size; /* size of instruction slot */
int nr_garbage;
Expand Down
20 changes: 18 additions & 2 deletions kernel/kprobes.c
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
struct kprobe_insn_page {
struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
struct kprobe_insn_cache *cache;
int nused;
int ngarbage;
char slot_used[];
Expand All @@ -132,8 +133,20 @@ enum kprobe_slot_state {
SLOT_USED = 2,
};

static void *alloc_insn_page(void)
{
return module_alloc(PAGE_SIZE);
}

static void free_insn_page(void *page)
{
module_free(NULL, page);
}

struct kprobe_insn_cache kprobe_insn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
.alloc = alloc_insn_page,
.free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
.nr_garbage = 0,
Expand Down Expand Up @@ -182,7 +195,7 @@ kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
* kernel image and loaded module images reside. This is required
* so x86_64 can correctly handle the %rip-relative fixups.
*/
kip->insns = module_alloc(PAGE_SIZE);
kip->insns = c->alloc();
if (!kip->insns) {
kfree(kip);
goto out;
Expand All @@ -192,6 +205,7 @@ kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
kip->ngarbage = 0;
kip->cache = c;
list_add(&kip->list, &c->pages);
slot = kip->insns;
out:
Expand All @@ -213,7 +227,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
*/
if (!list_is_singular(&kip->list)) {
list_del(&kip->list);
module_free(NULL, kip->insns);
kip->cache->free(kip->insns);
kfree(kip);
}
return 1;
Expand Down Expand Up @@ -274,6 +288,8 @@ void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
/* For optimized_kprobe buffer */
struct kprobe_insn_cache kprobe_optinsn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
.alloc = alloc_insn_page,
.free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
/* .insn_size is initialized later */
.nr_garbage = 0,
Expand Down

0 comments on commit af96397

Please sign in to comment.