CFLAGS_REMOVE_tsc.o = -pg
CFLAGS_REMOVE_rtc.o = -pg
CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
+CFLAGS_REMOVE_ftrace.o = -pg
+endif
+
++ifdef CONFIG_FUNCTION_GRAPH_TRACER
++# Don't trace __switch_to() but let it for function tracer
++CFLAGS_REMOVE_process_32.o = -pg
++CFLAGS_REMOVE_process_64.o = -pg
+ endif
+
#
# vsyscalls (which work on the user stack) should have
# no stack-protector checks:
#ifdef CONFIG_LATENCYTOP
int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT];
+#endif
+ /*
+ * time slack values; these are used to round up poll() and
+ * select() etc timeout values. These are in nanoseconds.
+ */
+ unsigned long timer_slack_ns;
+ unsigned long default_timer_slack_ns;
+
+ struct list_head *scm_work_list;
++#ifdef CONFIG_FUNCTION_GRAPH_TRACER
++ /* Index of current stored adress in ret_stack */
++ int curr_ret_stack;
++ /* Stack of return addresses for return function tracing */
++ struct ftrace_ret_stack *ret_stack;
++ /*
++ * Number of functions that haven't been traced
++ * because of depth overrun.
++ */
++ atomic_t trace_overrun;
+ #endif
};
/*
};
#ifdef CONFIG_SMP
- static inline void profile_nop(void *unused)
-static void __init profile_nop(void *unused)
++static void profile_nop(void *unused)
{
}
static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
+ + unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
- - if (rq->nr_running)
- - rq->avg_load_per_task = rq->load.weight / rq->nr_running;
+ + if (nr_running)
+ + rq->avg_load_per_task = rq->load.weight / nr_running;
+ else
+ rq->avg_load_per_task = 0;
return rq->avg_load_per_task;
}