Skip to content

Commit

Permalink
ftrace: introduce CFG_FTRACE_BUF_WHEN_FULL
Browse files Browse the repository at this point in the history
Function tracing can become extremely slow in case a big buffer size is
used (say, CFG_FTRACE_BUF_SIZE=6000000 instead of the default 2048
bytes). This is because of the "shifting" algorithm used when the buffer
is full, which copies almost the full buffer before inserting a new
line.

In order to mitigate this problem, this patch introduces two new
methods to handle the buffer full condition:

1. Discard existing data and write new lines to the beginning of the
   buffer.
2. Stop adding new lines.

The method can be selected at build time with CFG_FTRACE_BUF_WHEN_FULL.
Supported values are "shift", "wrap" and "stop".

Signed-off-by: Jerome Forissier <[email protected]>
Acked-by: Jens Wiklander <[email protected]>
  • Loading branch information
jforissier committed Feb 12, 2020
1 parent c20f0d1 commit d408db9
Show file tree
Hide file tree
Showing 2 changed files with 80 additions and 29 deletions.
99 changes: 70 additions & 29 deletions lib/libutils/ext/ftrace/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,13 @@ static __noprof struct ftrace_buf *get_fbuf(void)
#endif
}

#if defined(_CFG_FTRACE_BUF_WHEN_FULL_shift)

/*
* This API shifts/moves ftrace buffer to create space for new dump
* in case the buffer size falls short of actual dump.
*/
static void __noprof fbuf_shift(struct ftrace_buf *fbuf, size_t size)
static bool __noprof fbuf_make_room(struct ftrace_buf *fbuf, size_t size)
{
char *dst = (char *)fbuf + fbuf->buf_off;
const char *src = (char *)fbuf + fbuf->buf_off + size;
Expand All @@ -73,8 +75,36 @@ static void __noprof fbuf_shift(struct ftrace_buf *fbuf, size_t size)

for (n = 0; n < fbuf->curr_size; n++)
dst[n] = src[n];

return true;
}

#elif defined(_CFG_FTRACE_BUF_WHEN_FULL_wrap)

/* Makes room in the trace buffer by discarding the previously recorded data. */
static bool __noprof fbuf_make_room(struct ftrace_buf *fbuf,
size_t size)
{
if (fbuf->buf_off + size > fbuf->max_size)
return false;

fbuf->curr_size = 0;

return true;
}

#elif defined(_CFG_FTRACE_BUF_WHEN_FULL_stop)

static bool __noprof fbuf_make_room(struct ftrace_buf *fbuf __unused,
size_t size __unused)
{
return false;
}

#else
#error CFG_FTRACE_BUF_WHEN_FULL value not supported
#endif

static size_t __noprof to_func_enter_fmt(char *buf, uint32_t ret_idx,
unsigned long pc)
{
Expand Down Expand Up @@ -108,6 +138,7 @@ void __noprof ftrace_enter(unsigned long pc, unsigned long *lr)
{
struct ftrace_buf *fbuf = NULL;
size_t dump_size = 0;
bool full = false;

fbuf = get_fbuf();

Expand All @@ -118,16 +149,19 @@ void __noprof ftrace_enter(unsigned long pc, unsigned long *lr)
(2 * sizeof(unsigned long)) + 8;

/*
* Check if we have enough space in ftrace buffer. If not then just
* remove oldest dump under the assumption that its the least
* interesting data.
* Check if we have enough space in ftrace buffer. If not then try to
* make room.
*/
if ((fbuf->curr_size + dump_size) > fbuf->max_size)
fbuf_shift(fbuf, dump_size);
full = (fbuf->curr_size + dump_size) > fbuf->max_size;
if (full)
full = !fbuf_make_room(fbuf, dump_size);

fbuf->curr_size += to_func_enter_fmt((char *)fbuf + fbuf->buf_off +
fbuf->curr_size, fbuf->ret_idx,
pc);
if (!full)
fbuf->curr_size += to_func_enter_fmt((char *)fbuf +
fbuf->buf_off +
fbuf->curr_size,
fbuf->ret_idx,
pc);

if (fbuf->ret_idx < FTRACE_RETFUNC_DEPTH) {
fbuf->ret_stack[fbuf->ret_idx] = *lr;
Expand Down Expand Up @@ -237,27 +271,34 @@ unsigned long __noprof ftrace_return(void)
ftrace_duration(dur_loc, fbuf->begin_time[fbuf->ret_idx],
read_cntpct());
} else {
dump_size = DURATION_MAX_LEN + fbuf->ret_idx + 3;
if ((fbuf->curr_size + dump_size) > fbuf->max_size)
fbuf_shift(fbuf, dump_size);

curr_buf = (char *)fbuf + fbuf->buf_off + fbuf->curr_size;

for (i = 0; i < (DURATION_MAX_LEN + fbuf->ret_idx); i++)
if (i == (DURATION_MAX_LEN - 2))
*curr_buf++ = '|';
else
*curr_buf++ = ' ';
bool full = false;

*curr_buf++ = '}';
*curr_buf++ = '\n';
*curr_buf = '\0';

fbuf->curr_size += dump_size - 1;

dur_loc = curr_buf - fbuf->ret_idx - 6;
ftrace_duration(dur_loc, fbuf->begin_time[fbuf->ret_idx],
read_cntpct());
dump_size = DURATION_MAX_LEN + fbuf->ret_idx + 3;
full = (fbuf->curr_size + dump_size) > fbuf->max_size;
if (full)
full = !fbuf_make_room(fbuf, dump_size);

if (!full) {
curr_buf = (char *)fbuf + fbuf->buf_off +
fbuf->curr_size;

for (i = 0; i < (DURATION_MAX_LEN + fbuf->ret_idx); i++)
if (i == (DURATION_MAX_LEN - 2))
*curr_buf++ = '|';
else
*curr_buf++ = ' ';

*curr_buf++ = '}';
*curr_buf++ = '\n';
*curr_buf = '\0';

fbuf->curr_size += dump_size - 1;

dur_loc = curr_buf - fbuf->ret_idx - 6;
ftrace_duration(dur_loc,
fbuf->begin_time[fbuf->ret_idx],
read_cntpct());
}
}

return fbuf->ret_stack[fbuf->ret_idx];
Expand Down
10 changes: 10 additions & 0 deletions mk/config.mk
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,16 @@ CFG_FTRACE_SUPPORT ?= n
# map the TA is increased by this value compared to when ftrace is disabled.
CFG_FTRACE_BUF_SIZE ?= 2048

# How to make room when the function tracing buffer is full?
# 'shift': shift the previously stored data by the amount needed in order
# to always keep the latest logs (slower, especially with big buffer sizes)
# 'wrap': discard the previous data and start at the beginning of the buffer
# again (fast, but can result in a mostly empty buffer)
# 'stop': stop logging new data
CFG_FTRACE_BUF_WHEN_FULL ?= shift
$(call cfg-check-value,FTRACE_BUF_WHEN_FULL,shift stop wrap)
$(call force,_CFG_FTRACE_BUF_WHEN_FULL_$(CFG_FTRACE_BUF_WHEN_FULL),y)

# Function tracing: unit to be used when displaying durations
# 0: always display durations in microseconds
# >0: if duration is greater or equal to the specified value (in microseconds),
Expand Down

0 comments on commit d408db9

Please sign in to comment.