Skip to content

Commit

Permalink
Merge tag 'trace-v6.5-rc1-3' of git://git.kernel.org/pub/scm/linux/ke…
Browse files Browse the repository at this point in the history
…rnel/git/trace/linux-trace

Pull tracing fixes from Steven Rostedt:

 - Fix some missing-prototype warnings

 - Fix user events struct args (did not include size of struct)

   When creating a user event, the "struct" keyword is to denote that
   the size of the field will be passed in. But the parsing failed to
   handle this case.

 - Add selftest to struct sizes for user events

 - Fix sample code for direct trampolines.

   The sample code for direct trampolines attached to handle_mm_fault().
   But the prototype changed and the direct trampoline sample code was
   not updated. Direct trampolines needs to have the arguments correct
   otherwise it can fail or crash the system.

 - Remove unused ftrace_regs_caller_ret() prototype.

 - Quiet false positive of FORTIFY_SOURCE

   Due to backward compatibility, the structure used to save stack
   traces in the kernel had a fixed size of 8. This structure is
   exported to user space via the tracing format file. A change was made
   to allow more than 8 functions to be recorded, and user space now
   uses the size field to know how many functions are actually in the
   stack.

   But the structure still has size of 8 (even though it points into the
   ring buffer that has the required amount allocated to hold a full
   stack.

   This was fine until the fortifier noticed that the
   memcpy(&entry->caller, stack, size) was greater than the 8 functions
   and would complain at runtime about it.

   Hide this by using a pointer to the stack location on the ring buffer
   instead of using the address of the entry structure caller field.

 - Fix a deadloop in reading trace_pipe that was caused by a mismatch
   between ring_buffer_empty() returning false which then asked to read
   the data, but the read code uses rb_num_of_entries() that returned
   zero, and causing a infinite "retry".

 - Fix a warning caused by not using all pages allocated to store ftrace
   functions, where this can happen if the linker inserts a bunch of
   "NULL" entries, causing the accounting of how many pages needed to be
   off.

 - Fix histogram synthetic event crashing when the start event is
   removed and the end event is still using a variable from it

 - Fix memory leak in freeing iter->temp in tracing_release_pipe()

* tag 'trace-v6.5-rc1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
  tracing: Fix memory leak of iter->temp when reading trace_pipe
  tracing/histograms: Add histograms to hist_vars if they have referenced variables
  tracing: Stop FORTIFY_SOURCE complaining about stack trace caller
  ftrace: Fix possible warning on checking all pages used in ftrace_process_locs()
  ring-buffer: Fix deadloop issue on reading trace_pipe
  tracing: arm64: Avoid missing-prototype warnings
  selftests/user_events: Test struct size match cases
  tracing/user_events: Fix struct arg size match check
  x86/ftrace: Remove unsued extern declaration ftrace_regs_caller_ret()
  arm64: ftrace: Add direct call trampoline samples support
  samples: ftrace: Save required argument registers in sample trampolines
  • Loading branch information
torvalds committed Jul 13, 2023
2 parents 1599932 + d5a8218 commit ebc27aa
Show file tree
Hide file tree
Showing 20 changed files with 268 additions and 40 deletions.
2 changes: 2 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,8 @@ config ARM64
!CC_OPTIMIZE_FOR_SIZE)
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
if DYNAMIC_FTRACE_WITH_ARGS
select HAVE_SAMPLE_FTRACE_DIRECT
select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
Expand Down
4 changes: 4 additions & 0 deletions arch/arm64/include/asm/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,10 @@ static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs
{
return ret_regs->fp;
}

void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer);

#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
#endif

Expand Down
3 changes: 3 additions & 0 deletions arch/arm64/include/asm/syscall.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,4 +85,7 @@ static inline int syscall_get_arch(struct task_struct *task)
return AUDIT_ARCH_AARCH64;
}

int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);

#endif /* __ASM_SYSCALL_H */
3 changes: 0 additions & 3 deletions arch/arm64/kernel/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,6 @@ static inline bool has_syscall_work(unsigned long flags)
return unlikely(flags & _TIF_SYSCALL_WORK);
}

int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);

static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
Expand Down
1 change: 0 additions & 1 deletion arch/x86/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,6 @@ static inline void tramp_free(void *tramp) { }

/* Defined as markers to the end of the ftrace default trampolines */
extern void ftrace_regs_caller_end(void);
extern void ftrace_regs_caller_ret(void);
extern void ftrace_caller_end(void);
extern void ftrace_caller_op_ptr(void);
extern void ftrace_regs_caller_op_ptr(void);
Expand Down
9 changes: 9 additions & 0 deletions include/linux/ftrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,15 @@ struct ftrace_ops;
struct ftrace_regs;
struct dyn_ftrace;

char *arch_ftrace_match_adjust(char *str, const char *search);

#ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL
struct fgraph_ret_regs;
unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs);
#else
unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
#endif

#ifdef CONFIG_FUNCTION_TRACER
/*
* If the arch's mcount caller does not support all of ftrace's
Expand Down
1 change: 1 addition & 0 deletions kernel/trace/fgraph.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#include <trace/events/sched.h>

#include "ftrace_internal.h"
#include "trace.h"

#ifdef CONFIG_DYNAMIC_FTRACE
#define ASSIGN_OPS_HASH(opsname, val) \
Expand Down
45 changes: 31 additions & 14 deletions kernel/trace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -3305,6 +3305,22 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
return cnt;
}

static void ftrace_free_pages(struct ftrace_page *pages)
{
struct ftrace_page *pg = pages;

while (pg) {
if (pg->records) {
free_pages((unsigned long)pg->records, pg->order);
ftrace_number_of_pages -= 1 << pg->order;
}
pages = pg->next;
kfree(pg);
pg = pages;
ftrace_number_of_groups--;
}
}

static struct ftrace_page *
ftrace_allocate_pages(unsigned long num_to_init)
{
Expand Down Expand Up @@ -3343,17 +3359,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
return start_pg;

free_pages:
pg = start_pg;
while (pg) {
if (pg->records) {
free_pages((unsigned long)pg->records, pg->order);
ftrace_number_of_pages -= 1 << pg->order;
}
start_pg = pg->next;
kfree(pg);
pg = start_pg;
ftrace_number_of_groups--;
}
ftrace_free_pages(start_pg);
pr_info("ftrace: FAILED to allocate memory for functions\n");
return NULL;
}
Expand Down Expand Up @@ -6471,9 +6477,11 @@ static int ftrace_process_locs(struct module *mod,
unsigned long *start,
unsigned long *end)
{
struct ftrace_page *pg_unuse = NULL;
struct ftrace_page *start_pg;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
unsigned long skipped = 0;
unsigned long count;
unsigned long *p;
unsigned long addr;
Expand Down Expand Up @@ -6536,8 +6544,10 @@ static int ftrace_process_locs(struct module *mod,
* object files to satisfy alignments.
* Skip any NULL pointers.
*/
if (!addr)
if (!addr) {
skipped++;
continue;
}

end_offset = (pg->index+1) * sizeof(pg->records[0]);
if (end_offset > PAGE_SIZE << pg->order) {
Expand All @@ -6551,8 +6561,10 @@ static int ftrace_process_locs(struct module *mod,
rec->ip = addr;
}

/* We should have used all pages */
WARN_ON(pg->next);
if (pg->next) {
pg_unuse = pg->next;
pg->next = NULL;
}

/* Assign the last page to ftrace_pages */
ftrace_pages = pg;
Expand All @@ -6574,6 +6586,11 @@ static int ftrace_process_locs(struct module *mod,
out:
mutex_unlock(&ftrace_lock);

/* We should have used all pages unless we skipped some */
if (pg_unuse) {
WARN_ON(!skipped);
ftrace_free_pages(pg_unuse);
}
return ret;
}

Expand Down
5 changes: 3 additions & 2 deletions kernel/trace/ftrace_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@
#ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H
#define _LINUX_KERNEL_FTRACE_INTERNAL_H

int __register_ftrace_function(struct ftrace_ops *ops);
int __unregister_ftrace_function(struct ftrace_ops *ops);

#ifdef CONFIG_FUNCTION_TRACER

extern struct mutex ftrace_lock;
Expand All @@ -15,8 +18,6 @@ int ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs);

#else /* !CONFIG_DYNAMIC_FTRACE */

int __register_ftrace_function(struct ftrace_ops *ops);
int __unregister_ftrace_function(struct ftrace_ops *ops);
/* Keep as macros so we do not need to define the commands */
# define ftrace_startup(ops, command) \
({ \
Expand Down
24 changes: 15 additions & 9 deletions kernel/trace/ring_buffer.c
Original file line number Diff line number Diff line change
Expand Up @@ -5242,28 +5242,34 @@ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
}
EXPORT_SYMBOL_GPL(ring_buffer_size);

static void rb_clear_buffer_page(struct buffer_page *page)
{
local_set(&page->write, 0);
local_set(&page->entries, 0);
rb_init_page(page->page);
page->read = 0;
}

static void
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *page;

rb_head_page_deactivate(cpu_buffer);

cpu_buffer->head_page
= list_entry(cpu_buffer->pages, struct buffer_page, list);
local_set(&cpu_buffer->head_page->write, 0);
local_set(&cpu_buffer->head_page->entries, 0);
local_set(&cpu_buffer->head_page->page->commit, 0);

cpu_buffer->head_page->read = 0;
rb_clear_buffer_page(cpu_buffer->head_page);
list_for_each_entry(page, cpu_buffer->pages, list) {
rb_clear_buffer_page(page);
}

cpu_buffer->tail_page = cpu_buffer->head_page;
cpu_buffer->commit_page = cpu_buffer->head_page;

INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
INIT_LIST_HEAD(&cpu_buffer->new_pages);
local_set(&cpu_buffer->reader_page->write, 0);
local_set(&cpu_buffer->reader_page->entries, 0);
local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->read = 0;
rb_clear_buffer_page(cpu_buffer->reader_page);

local_set(&cpu_buffer->entries_bytes, 0);
local_set(&cpu_buffer->overrun, 0);
Expand Down
22 changes: 20 additions & 2 deletions kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -3118,6 +3118,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
struct ftrace_stack *fstack;
struct stack_entry *entry;
int stackidx;
void *ptr;

/*
* Add one, for this function and the call to save_stack_trace()
Expand Down Expand Up @@ -3161,9 +3162,25 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
trace_ctx);
if (!event)
goto out;
entry = ring_buffer_event_data(event);
ptr = ring_buffer_event_data(event);
entry = ptr;

/*
* For backward compatibility reasons, the entry->caller is an
* array of 8 slots to store the stack. This is also exported
* to user space. The amount allocated on the ring buffer actually
* holds enough for the stack specified by nr_entries. This will
* go into the location of entry->caller. Due to string fortifiers
* checking the size of the destination of memcpy() it triggers
* when it detects that size is greater than 8. To hide this from
* the fortifiers, we use "ptr" and pointer arithmetic to assign caller.
*
* The below is really just:
* memcpy(&entry->caller, fstack->calls, size);
*/
ptr += offsetof(typeof(*entry), caller);
memcpy(ptr, fstack->calls, size);

memcpy(&entry->caller, fstack->calls, size);
entry->size = nr_entries;

if (!call_filter_check_discard(call, entry, buffer, event))
Expand Down Expand Up @@ -6764,6 +6781,7 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)

free_cpumask_var(iter->started);
kfree(iter->fmt);
kfree(iter->temp);
mutex_destroy(&iter->mutex);
kfree(iter);

Expand Down
8 changes: 5 additions & 3 deletions kernel/trace/trace_events_hist.c
Original file line number Diff line number Diff line change
Expand Up @@ -6663,13 +6663,15 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops,
if (get_named_trigger_data(trigger_data))
goto enable;

if (has_hist_vars(hist_data))
save_hist_vars(hist_data);

ret = create_actions(hist_data);
if (ret)
goto out_unreg;

if (has_hist_vars(hist_data) || hist_data->n_var_refs) {
if (save_hist_vars(hist_data))
goto out_unreg;
}

ret = tracing_map_init(hist_data->map);
if (ret)
goto out_unreg;
Expand Down
3 changes: 3 additions & 0 deletions kernel/trace/trace_events_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -1317,6 +1317,9 @@ static int user_field_set_string(struct ftrace_event_field *field,
pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);

if (str_has_prefix(field->type, "struct "))
pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size);

if (colon)
pos += snprintf(buf + pos, LEN_OR_ZERO, ";");

Expand Down
3 changes: 3 additions & 0 deletions kernel/trace/trace_kprobe_selftest.c
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0

#include "trace_kprobe_selftest.h"

/*
* Function used during the kprobe self test. This function is in a separate
* compile unit so it can be compile with CC_FLAGS_FTRACE to ensure that it
Expand Down
34 changes: 34 additions & 0 deletions samples/ftrace/ftrace-direct-modify.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/ftrace.h>
#ifndef CONFIG_ARM64
#include <asm/asm-offsets.h>
#endif

extern void my_direct_func1(void);
extern void my_direct_func2(void);
Expand Down Expand Up @@ -96,6 +98,38 @@ asm (

#endif /* CONFIG_S390 */

#ifdef CONFIG_ARM64

asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp1, @function\n"
" .globl my_tramp1\n"
" my_tramp1:"
" bti c\n"
" sub sp, sp, #16\n"
" stp x9, x30, [sp]\n"
" bl my_direct_func1\n"
" ldp x30, x9, [sp]\n"
" add sp, sp, #16\n"
" ret x9\n"
" .size my_tramp1, .-my_tramp1\n"

" .type my_tramp2, @function\n"
" .globl my_tramp2\n"
" my_tramp2:"
" bti c\n"
" sub sp, sp, #16\n"
" stp x9, x30, [sp]\n"
" bl my_direct_func2\n"
" ldp x30, x9, [sp]\n"
" add sp, sp, #16\n"
" ret x9\n"
" .size my_tramp2, .-my_tramp2\n"
" .popsection\n"
);

#endif /* CONFIG_ARM64 */

#ifdef CONFIG_LOONGARCH

asm (
Expand Down
Loading

0 comments on commit ebc27aa

Please sign in to comment.