Skip to content

Commit

Permalink
Merge tag 'trace-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/…
Browse files Browse the repository at this point in the history
…git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "This release has no new tracing features, just clean ups, minor fixes
  and small optimizations"

* tag 'trace-v4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (25 commits)
  tracing: Remove outdated ring buffer comment
  tracing/probes: Fix a warning message to show correct maximum length
  tracing: Fix return value check in trace_benchmark_reg()
  tracing: Use modern function declaration
  jump_label: Reduce the size of struct static_key
  tracing/probe: Show subsystem name in messages
  tracing/hwlat: Update old comment about migration
  timers: Make flags output in the timer_start tracepoint useful
  tracing: Have traceprobe_probes_write() not access userspace unnecessarily
  tracing: Have COMM event filter key be treated as a string
  ftrace: Have set_graph_function handle multiple functions in one write
  ftrace: Do not hold references of ftrace_graph_{notrace_}hash out of graph_lock
  tracing: Reset parser->buffer to allow multiple "puts"
  ftrace: Have set_graph_functions handle write with RDWR
  ftrace: Reset fgd->hash in ftrace_graph_write()
  ftrace: Replace (void *)1 with a meaningful macro name FTRACE_GRAPH_EMPTY
  ftrace: Create a slight optimization on searching the ftrace_hash
  tracing: Add ftrace_hash_key() helper function
  ftrace: Convert graph filter to use hash tables
  ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip
  ...
  • Loading branch information
torvalds committed Feb 27, 2017
2 parents e5d56ef + 67d04bb commit 79b17ea
Show file tree
Hide file tree
Showing 16 changed files with 585 additions and 259 deletions.
4 changes: 3 additions & 1 deletion Documentation/static-keys.txt
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,9 @@ or:

There are a few functions and macros that architectures must implement in order
to take advantage of this optimization. If there is no architecture support, we
simply fall back to a traditional, load, test, and jump sequence.
simply fall back to a traditional, load, test, and jump sequence. Also, the
struct jump_entry table must be at least 4-byte aligned because the
static_key->entry field makes use of the two least significant bits.

* select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig

Expand Down
27 changes: 17 additions & 10 deletions include/linux/compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,29 +105,36 @@ struct ftrace_branch_data {
};
};

struct ftrace_likely_data {
struct ftrace_branch_data data;
unsigned long constant;
};

/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
&& !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
int expect, int is_constant);

#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)

#define __branch_check__(x, expect) ({ \
#define __branch_check__(x, expect, is_constant) ({ \
int ______r; \
static struct ftrace_branch_data \
static struct ftrace_likely_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_annotated_branch"))) \
______f = { \
.func = __func__, \
.file = __FILE__, \
.line = __LINE__, \
.data.func = __func__, \
.data.file = __FILE__, \
.data.line = __LINE__, \
}; \
______r = likely_notrace(x); \
ftrace_likely_update(&______f, ______r, expect); \
______r = __builtin_expect(!!(x), expect); \
ftrace_likely_update(&______f, ______r, \
expect, is_constant); \
______r; \
})

Expand All @@ -137,10 +144,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
* written by Daniel Walker.
*/
# ifndef likely
# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
# endif
# ifndef unlikely
# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
# endif

#ifdef CONFIG_PROFILE_ALL_BRANCHES
Expand Down
23 changes: 15 additions & 8 deletions include/linux/jump_label.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,17 @@ extern bool static_key_initialized;

struct static_key {
atomic_t enabled;
/* Set lsb bit to 1 if branch is default true, 0 ot */
struct jump_entry *entries;
#ifdef CONFIG_MODULES
struct static_key_mod *next;
#endif
/*
* bit 0 => 1 if key is initially true
* 0 if initially false
* bit 1 => 1 if points to struct static_key_mod
* 0 if points to struct jump_entry
*/
union {
unsigned long type;
struct jump_entry *entries;
struct static_key_mod *next;
};
};

#else
Expand All @@ -118,9 +124,10 @@ struct module;

#ifdef HAVE_JUMP_LABEL

#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_MASK 1UL
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_LINKED 2UL
#define JUMP_TYPE_MASK 3UL

static __always_inline bool static_key_false(struct static_key *key)
{
Expand Down
2 changes: 2 additions & 0 deletions include/linux/timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,8 @@ struct timer_list {
#define TIMER_ARRAYSHIFT 22
#define TIMER_ARRAYMASK 0xFFC00000

#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)

#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
.entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
Expand Down
14 changes: 12 additions & 2 deletions include/trace/events/timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,13 @@ DEFINE_EVENT(timer_class, timer_init,
TP_ARGS(timer)
);

#define decode_timer_flags(flags) \
__print_flags(flags, "|", \
{ TIMER_MIGRATING, "M" }, \
{ TIMER_DEFERRABLE, "D" }, \
{ TIMER_PINNED, "P" }, \
{ TIMER_IRQSAFE, "I" })

/**
* timer_start - called when the timer is started
* @timer: pointer to struct timer_list
Expand Down Expand Up @@ -65,9 +72,12 @@ TRACE_EVENT(timer_start,
__entry->flags = flags;
),

TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x",
TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now, __entry->flags)
(long)__entry->expires - __entry->now,
__entry->flags & TIMER_CPUMASK,
__entry->flags >> TIMER_ARRAYSHIFT,
decode_timer_flags(__entry->flags & TIMER_TRACE_FLAGMASK))
);

/**
Expand Down
153 changes: 127 additions & 26 deletions kernel/jump_label.c
Original file line number Diff line number Diff line change
Expand Up @@ -236,12 +236,28 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry

static inline struct jump_entry *static_key_entries(struct static_key *key)
{
return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
}

static inline bool static_key_type(struct static_key *key)
{
return (unsigned long)key->entries & JUMP_TYPE_MASK;
return key->type & JUMP_TYPE_TRUE;
}

static inline bool static_key_linked(struct static_key *key)
{
return key->type & JUMP_TYPE_LINKED;
}

static inline void static_key_clear_linked(struct static_key *key)
{
key->type &= ~JUMP_TYPE_LINKED;
}

static inline void static_key_set_linked(struct static_key *key)
{
key->type |= JUMP_TYPE_LINKED;
}

static inline struct static_key *jump_entry_key(struct jump_entry *entry)
Expand All @@ -254,6 +270,26 @@ static bool jump_entry_branch(struct jump_entry *entry)
return (unsigned long)entry->key & 1UL;
}

/***
* A 'struct static_key' uses a union such that it either points directly
* to a table of 'struct jump_entry' or to a linked list of modules which in
* turn point to 'struct jump_entry' tables.
*
* The two lower bits of the pointer are used to keep track of which pointer
* type is in use and to store the initial branch direction, we use an access
* function which preserves these bits.
*/
static void static_key_set_entries(struct static_key *key,
struct jump_entry *entries)
{
unsigned long type;

WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
type = key->type & JUMP_TYPE_MASK;
key->entries = entries;
key->type |= type;
}

static enum jump_label_type jump_label_type(struct jump_entry *entry)
{
struct static_key *key = jump_entry_key(entry);
Expand Down Expand Up @@ -313,13 +349,7 @@ void __init jump_label_init(void)
continue;

key = iterk;
/*
* Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
*/
*((unsigned long *)&key->entries) += (unsigned long)iter;
#ifdef CONFIG_MODULES
key->next = NULL;
#endif
static_key_set_entries(key, iter);
}
static_key_initialized = true;
jump_label_unlock();
Expand All @@ -343,6 +373,29 @@ struct static_key_mod {
struct module *mod;
};

static inline struct static_key_mod *static_key_mod(struct static_key *key)
{
WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
}

/***
* key->type and key->next are the same via union.
* This sets key->next and preserves the type bits.
*
* See additional comments above static_key_set_entries().
*/
static void static_key_set_mod(struct static_key *key,
struct static_key_mod *mod)
{
unsigned long type;

WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
type = key->type & JUMP_TYPE_MASK;
key->next = mod;
key->type |= type;
}

static int __jump_label_mod_text_reserved(void *start, void *end)
{
struct module *mod;
Expand All @@ -365,11 +418,23 @@ static void __jump_label_mod_update(struct static_key *key)
{
struct static_key_mod *mod;

for (mod = key->next; mod; mod = mod->next) {
struct module *m = mod->mod;
for (mod = static_key_mod(key); mod; mod = mod->next) {
struct jump_entry *stop;
struct module *m;

/*
* NULL if the static_key is defined in a module
* that does not use it
*/
if (!mod->entries)
continue;

__jump_label_update(key, mod->entries,
m->jump_entries + m->num_jump_entries);
m = mod->mod;
if (!m)
stop = __stop___jump_table;
else
stop = m->jump_entries + m->num_jump_entries;
__jump_label_update(key, mod->entries, stop);
}
}

Expand Down Expand Up @@ -404,7 +469,7 @@ static int jump_label_add_module(struct module *mod)
struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
struct jump_entry *iter;
struct static_key *key = NULL;
struct static_key_mod *jlm;
struct static_key_mod *jlm, *jlm2;

/* if the module doesn't have jump label entries, just return */
if (iter_start == iter_stop)
Expand All @@ -421,20 +486,32 @@ static int jump_label_add_module(struct module *mod)

key = iterk;
if (within_module(iter->key, mod)) {
/*
* Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
*/
*((unsigned long *)&key->entries) += (unsigned long)iter;
key->next = NULL;
static_key_set_entries(key, iter);
continue;
}
jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
if (!jlm)
return -ENOMEM;
if (!static_key_linked(key)) {
jlm2 = kzalloc(sizeof(struct static_key_mod),
GFP_KERNEL);
if (!jlm2) {
kfree(jlm);
return -ENOMEM;
}
preempt_disable();
jlm2->mod = __module_address((unsigned long)key);
preempt_enable();
jlm2->entries = static_key_entries(key);
jlm2->next = NULL;
static_key_set_mod(key, jlm2);
static_key_set_linked(key);
}
jlm->mod = mod;
jlm->entries = iter;
jlm->next = key->next;
key->next = jlm;
jlm->next = static_key_mod(key);
static_key_set_mod(key, jlm);
static_key_set_linked(key);

/* Only update if we've changed from our initial state */
if (jump_label_type(iter) != jump_label_init_type(iter))
Expand All @@ -461,16 +538,34 @@ static void jump_label_del_module(struct module *mod)
if (within_module(iter->key, mod))
continue;

/* No memory during module load */
if (WARN_ON(!static_key_linked(key)))
continue;

prev = &key->next;
jlm = key->next;
jlm = static_key_mod(key);

while (jlm && jlm->mod != mod) {
prev = &jlm->next;
jlm = jlm->next;
}

if (jlm) {
/* No memory during module load */
if (WARN_ON(!jlm))
continue;

if (prev == &key->next)
static_key_set_mod(key, jlm->next);
else
*prev = jlm->next;

kfree(jlm);

jlm = static_key_mod(key);
/* if only one etry is left, fold it back into the static_key */
if (jlm->next == NULL) {
static_key_set_entries(key, jlm->entries);
static_key_clear_linked(key);
kfree(jlm);
}
}
Expand Down Expand Up @@ -499,8 +594,10 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
case MODULE_STATE_COMING:
jump_label_lock();
ret = jump_label_add_module(mod);
if (ret)
if (ret) {
WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
jump_label_del_module(mod);
}
jump_label_unlock();
break;
case MODULE_STATE_GOING:
Expand Down Expand Up @@ -561,18 +658,22 @@ int jump_label_text_reserved(void *start, void *end)
static void jump_label_update(struct static_key *key)
{
struct jump_entry *stop = __stop___jump_table;
struct jump_entry *entry = static_key_entries(key);
struct jump_entry *entry;
#ifdef CONFIG_MODULES
struct module *mod;

__jump_label_mod_update(key);
if (static_key_linked(key)) {
__jump_label_mod_update(key);
return;
}

preempt_disable();
mod = __module_address((unsigned long)key);
if (mod)
stop = mod->jump_entries + mod->num_jump_entries;
preempt_enable();
#endif
entry = static_key_entries(key);
/* if there are no users, entry can be NULL */
if (entry)
__jump_label_update(key, entry, stop);
Expand Down
Loading

0 comments on commit 79b17ea

Please sign in to comment.