struct exec_domain;
#include <asm/processor.h>
#include <asm/ftrace.h>
+#include <asm/atomic.h>
struct thread_info {
struct task_struct *task; /* main task structure */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
+ /*
+ * Number of functions that haven't been traced
+ * because of depth overrun.
+ */
+ atomic_t trace_overrun;
#endif
};
.fn = do_no_restart_syscall, \
}, \
.curr_ret_stack = -1,\
+ .trace_overrun = ATOMIC_INIT(0) \
}
#else
#define INIT_THREAD_INFO(tsk) \
struct thread_info *ti = current_thread_info();
/* The return trace stack is full */
- if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1)
+ if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) {
+ atomic_inc(&ti->trace_overrun);
return -EBUSY;
+ }
index = ++ti->curr_ret_stack;
barrier();
/* Retrieve a function return address to the trace stack on thread info.*/
static void pop_return_trace(unsigned long *ret, unsigned long long *time,
- unsigned long *func)
+ unsigned long *func, unsigned long *overrun)
{
int index;
*ret = ti->ret_stack[index].ret;
*func = ti->ret_stack[index].func;
*time = ti->ret_stack[index].calltime;
+ *overrun = atomic_read(&ti->trace_overrun);
ti->curr_ret_stack--;
}
unsigned long ftrace_return_to_handler(void)
{
struct ftrace_retfunc trace;
- pop_return_trace(&trace.ret, &trace.calltime, &trace.func);
+ pop_return_trace(&trace.ret, &trace.calltime, &trace.func,
+ &trace.overrun);
trace.rettime = cpu_clock(raw_smp_processor_id());
ftrace_function_return(&trace);
unsigned long func; /* Current function */
unsigned long long calltime;
unsigned long long rettime;
+ /* Number of functions that overran the depth limit for current task */
+ unsigned long overrun;
};
#ifdef CONFIG_FUNCTION_RET_TRACER
* used.
*/
task_thread_info(p)->curr_ret_stack = -1;
+ atomic_set(&task_thread_info(p)->trace_overrun, 0);
#endif
}
entry->parent_ip = trace->ret;
entry->rettime = trace->rettime;
entry->calltime = trace->calltime;
+ entry->overrun = trace->overrun;
ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags);
}
#endif
unsigned long parent_ip;
unsigned long long calltime;
unsigned long long rettime;
+ unsigned long overrun;
};
extern struct tracer boot_tracer;
#include "trace.h"
+#define TRACE_RETURN_PRINT_OVERRUN 0x1
+static struct tracer_opt trace_opts[] = {
+ /* Display overruns or not */
+ { TRACER_OPT(overrun, TRACE_RETURN_PRINT_OVERRUN) },
+ { } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+ .val = 0, /* Don't display overruns by default */
+ .opts = trace_opts
+};
+
+
static int return_trace_init(struct trace_array *tr)
{
int cpu;
ret = trace_seq_printf(s, "%pF -> ", (void *)field->parent_ip);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
+
ret = seq_print_ip_sym(s, field->ip,
trace_flags & TRACE_ITER_SYM_MASK);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- ret = trace_seq_printf(s, " (%llu ns)\n",
+
+ ret = trace_seq_printf(s, " (%llu ns)",
field->rettime - field->calltime);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
- else
- return TRACE_TYPE_HANDLED;
+
+ if (tracer_flags.val & TRACE_RETURN_PRINT_OVERRUN) {
+ ret = trace_seq_printf(s, " (Overruns: %lu)",
+ field->overrun);
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+ }
+
+ ret = trace_seq_printf(s, "\n");
+ if (!ret)
+ return TRACE_TYPE_PARTIAL_LINE;
+
+ return TRACE_TYPE_HANDLED;
}
return TRACE_TYPE_UNHANDLED;
}
-static struct tracer return_trace __read_mostly =
-{
+static struct tracer return_trace __read_mostly = {
.name = "return",
.init = return_trace_init,
.reset = return_trace_reset,
- .print_line = print_return_function
+ .print_line = print_return_function,
+ .flags = &tracer_flags,
};
static __init int init_return_trace(void)