Skip to content

Commit 4322708

Browse files
Joel Fernandespundiramit
authored andcommitted
FROMLIST: tracing: Add support for preempt and irq enable/disable events
Preempt and irq trace events can be used for tracing the start and end of an atomic section which can be used by a trace viewer like systrace to graphically view the start and end of an atomic section and correlate them with latencies and scheduling issues. This also serves as a prelude to using synthetic events or probes to rewrite the preempt and irqsoff tracers, along with numerous benefits of using trace events features for these events. Change-Id: I718d40f7c3c48579adf9d7121b21495a669c89bd Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Peter Zilstra <peterz@infradead.org> Cc: kernel-team@android.com Link: https://patchwork.kernel.org/patch/9988157/ Signed-off-by: Joel Fernandes <joelaf@google.com>
1 parent 6ef223a commit 4322708

5 files changed

Lines changed: 118 additions & 2 deletions

File tree

include/linux/ftrace.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -702,7 +702,8 @@ static inline void __ftrace_enabled_restore(int enabled)
702702
static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
703703
#endif
704704

705-
#ifdef CONFIG_PREEMPT_TRACER
705+
#if defined(CONFIG_PREEMPT_TRACER) || \
706+
(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
706707
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
707708
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
708709
#else

include/trace/events/preemptirq.h

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
#ifdef CONFIG_PREEMPTIRQ_EVENTS
2+
3+
#undef TRACE_SYSTEM
4+
#define TRACE_SYSTEM preemptirq
5+
6+
#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
7+
#define _TRACE_PREEMPTIRQ_H
8+
9+
#include <linux/ktime.h>
10+
#include <linux/tracepoint.h>
11+
#include <linux/string.h>
12+
#include <asm/sections.h>
13+
14+
DECLARE_EVENT_CLASS(preemptirq_template,
15+
16+
TP_PROTO(unsigned long ip, unsigned long parent_ip),
17+
18+
TP_ARGS(ip, parent_ip),
19+
20+
TP_STRUCT__entry(
21+
__field(u32, caller_offs)
22+
__field(u32, parent_offs)
23+
),
24+
25+
TP_fast_assign(
26+
__entry->caller_offs = (u32)(ip - (unsigned long)_stext);
27+
__entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
28+
),
29+
30+
TP_printk("caller=%pF parent=%pF",
31+
(void *)((unsigned long)(_stext) + __entry->caller_offs),
32+
(void *)((unsigned long)(_stext) + __entry->parent_offs))
33+
);
34+
35+
#ifndef CONFIG_PROVE_LOCKING
36+
DEFINE_EVENT(preemptirq_template, irq_disable,
37+
TP_PROTO(unsigned long ip, unsigned long parent_ip),
38+
TP_ARGS(ip, parent_ip));
39+
40+
DEFINE_EVENT(preemptirq_template, irq_enable,
41+
TP_PROTO(unsigned long ip, unsigned long parent_ip),
42+
TP_ARGS(ip, parent_ip));
43+
#endif
44+
45+
#ifdef CONFIG_DEBUG_PREEMPT
46+
DEFINE_EVENT(preemptirq_template, preempt_disable,
47+
TP_PROTO(unsigned long ip, unsigned long parent_ip),
48+
TP_ARGS(ip, parent_ip));
49+
50+
DEFINE_EVENT(preemptirq_template, preempt_enable,
51+
TP_PROTO(unsigned long ip, unsigned long parent_ip),
52+
TP_ARGS(ip, parent_ip));
53+
#endif
54+
55+
#endif /* _TRACE_PREEMPTIRQ_H */
56+
57+
#include <trace/define_trace.h>
58+
59+
#else /* !CONFIG_PREEMPTIRQ_EVENTS */
60+
61+
#define trace_irq_enable(...)
62+
#define trace_irq_disable(...)
63+
#define trace_preempt_enable(...)
64+
#define trace_preempt_disable(...)
65+
#define trace_irq_enable_rcuidle(...)
66+
#define trace_irq_disable_rcuidle(...)
67+
#define trace_preempt_enable_rcuidle(...)
68+
#define trace_preempt_disable_rcuidle(...)
69+
70+
#endif

kernel/trace/Kconfig

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,17 @@ config FUNCTION_GRAPH_TRACER
165165
address on the current task structure into a stack of calls.
166166

167167

168+
config PREEMPTIRQ_EVENTS
169+
bool "Enable trace events for preempt and irq disable/enable"
170+
select TRACE_IRQFLAGS
171+
depends on DEBUG_PREEMPT || !PROVE_LOCKING
172+
default n
173+
help
174+
Enable tracing of disable and enable events for preemption and irqs.
175+
For tracing preempt disable/enable events, DEBUG_PREEMPT must be
176+
enabled. For tracing irq disable/enable events, PROVE_LOCKING must
177+
be disabled.
178+
168179
config IRQSOFF_TRACER
169180
bool "Interrupts-off Latency Tracer"
170181
default n

kernel/trace/Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@ obj-$(CONFIG_TRACING) += trace_stat.o
3737
obj-$(CONFIG_TRACING) += trace_printk.o
3838
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
3939
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
40+
obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
4041
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
4142
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
4243
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o

kernel/trace/trace_irqsoff.c

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@
1616

1717
#include "trace.h"
1818

19+
#define CREATE_TRACE_POINTS
20+
#include <trace/events/preemptirq.h>
21+
1922
#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
2023
static struct trace_array *irqsoff_trace __read_mostly;
2124
static int tracer_enabled __read_mostly;
@@ -765,27 +768,54 @@ static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
765768
static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
766769
#endif
767770

771+
/* Per-cpu variable to prevent redundant calls when IRQs already off */
772+
static DEFINE_PER_CPU(int, tracing_irq_cpu);
773+
768774
#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
769775
void trace_hardirqs_on(void)
770776
{
777+
if (!this_cpu_read(tracing_irq_cpu))
778+
return;
779+
780+
trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
771781
tracer_hardirqs_on();
782+
783+
this_cpu_write(tracing_irq_cpu, 0);
772784
}
773785
EXPORT_SYMBOL(trace_hardirqs_on);
774786

775787
void trace_hardirqs_off(void)
776788
{
789+
if (this_cpu_read(tracing_irq_cpu))
790+
return;
791+
792+
this_cpu_write(tracing_irq_cpu, 1);
793+
794+
trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
777795
tracer_hardirqs_off();
778796
}
779797
EXPORT_SYMBOL(trace_hardirqs_off);
780798

781799
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
782800
{
801+
if (!this_cpu_read(tracing_irq_cpu))
802+
return;
803+
804+
trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
783805
tracer_hardirqs_on_caller(caller_addr);
806+
807+
this_cpu_write(tracing_irq_cpu, 0);
784808
}
785809
EXPORT_SYMBOL(trace_hardirqs_on_caller);
786810

787811
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
788812
{
813+
if (this_cpu_read(tracing_irq_cpu))
814+
return;
815+
816+
this_cpu_write(tracing_irq_cpu, 1);
817+
818+
trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
789819
tracer_hardirqs_off_caller(caller_addr);
790820
}
791821
EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -807,14 +837,17 @@ inline void print_irqtrace_events(struct task_struct *curr)
807837
}
808838
#endif
809839

810-
#ifdef CONFIG_PREEMPT_TRACER
840+
#if defined(CONFIG_PREEMPT_TRACER) || \
841+
(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
811842
void trace_preempt_on(unsigned long a0, unsigned long a1)
812843
{
844+
trace_preempt_enable_rcuidle(a0, a1);
813845
tracer_preempt_on(a0, a1);
814846
}
815847

816848
void trace_preempt_off(unsigned long a0, unsigned long a1)
817849
{
850+
trace_preempt_disable_rcuidle(a0, a1);
818851
tracer_preempt_off(a0, a1);
819852
}
820853
#endif

0 commit comments

Comments
 (0)