|
16 | 16 |
|
17 | 17 | #include "trace.h" |
18 | 18 |
|
| 19 | +#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) |
19 | 20 | static struct trace_array *irqsoff_trace __read_mostly; |
20 | 21 | static int tracer_enabled __read_mostly; |
21 | 22 |
|
@@ -450,64 +451,44 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) |
450 | 451 |
|
451 | 452 | #else /* !CONFIG_PROVE_LOCKING */ |
452 | 453 |
|
453 | | -/* |
454 | | - * Stubs: |
455 | | - */ |
456 | | - |
457 | | -void trace_softirqs_on(unsigned long ip) |
458 | | -{ |
459 | | -} |
460 | | - |
461 | | -void trace_softirqs_off(unsigned long ip) |
462 | | -{ |
463 | | -} |
464 | | - |
465 | | -inline void print_irqtrace_events(struct task_struct *curr) |
466 | | -{ |
467 | | -} |
468 | | - |
469 | 454 | /* |
470 | 455 | * We are only interested in hardirq on/off events: |
471 | 456 | */ |
472 | | -void trace_hardirqs_on(void) |
| 457 | +static inline void tracer_hardirqs_on(void) |
473 | 458 | { |
474 | 459 | if (!preempt_trace() && irq_trace()) |
475 | 460 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
476 | 461 | } |
477 | | -EXPORT_SYMBOL(trace_hardirqs_on); |
478 | 462 |
|
479 | | -void trace_hardirqs_off(void) |
| 463 | +static inline void tracer_hardirqs_off(void) |
480 | 464 | { |
481 | 465 | if (!preempt_trace() && irq_trace()) |
482 | 466 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
483 | 467 | } |
484 | | -EXPORT_SYMBOL(trace_hardirqs_off); |
485 | 468 |
|
486 | | -__visible void trace_hardirqs_on_caller(unsigned long caller_addr) |
| 469 | +static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) |
487 | 470 | { |
488 | 471 | if (!preempt_trace() && irq_trace()) |
489 | 472 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
490 | 473 | } |
491 | | -EXPORT_SYMBOL(trace_hardirqs_on_caller); |
492 | 474 |
|
493 | | -__visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
| 475 | +static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) |
494 | 476 | { |
495 | 477 | if (!preempt_trace() && irq_trace()) |
496 | 478 | start_critical_timing(CALLER_ADDR0, caller_addr); |
497 | 479 | } |
498 | | -EXPORT_SYMBOL(trace_hardirqs_off_caller); |
499 | 480 |
|
500 | 481 | #endif /* CONFIG_PROVE_LOCKING */ |
501 | 482 | #endif /* CONFIG_IRQSOFF_TRACER */ |
502 | 483 |
|
503 | 484 | #ifdef CONFIG_PREEMPT_TRACER |
504 | | -void trace_preempt_on(unsigned long a0, unsigned long a1) |
| 485 | +static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) |
505 | 486 | { |
506 | 487 | if (preempt_trace() && !irq_trace()) |
507 | 488 | stop_critical_timing(a0, a1); |
508 | 489 | } |
509 | 490 |
|
510 | | -void trace_preempt_off(unsigned long a0, unsigned long a1) |
| 491 | +static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) |
511 | 492 | { |
512 | 493 | if (preempt_trace() && !irq_trace()) |
513 | 494 | start_critical_timing(a0, a1); |
@@ -770,3 +751,70 @@ __init static int init_irqsoff_tracer(void) |
770 | 751 | return 0; |
771 | 752 | } |
772 | 753 | core_initcall(init_irqsoff_tracer); |
| 754 | +#endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */ |
| 755 | + |
| 756 | +#ifndef CONFIG_IRQSOFF_TRACER |
| 757 | +static inline void tracer_hardirqs_on(void) { } |
| 758 | +static inline void tracer_hardirqs_off(void) { } |
| 759 | +static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { } |
| 760 | +static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { } |
| 761 | +#endif |
| 762 | + |
| 763 | +#ifndef CONFIG_PREEMPT_TRACER |
| 764 | +static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } |
| 765 | +static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } |
| 766 | +#endif |
| 767 | + |
| 768 | +#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING) |
| 769 | +void trace_hardirqs_on(void) |
| 770 | +{ |
| 771 | + tracer_hardirqs_on(); |
| 772 | +} |
| 773 | +EXPORT_SYMBOL(trace_hardirqs_on); |
| 774 | + |
| 775 | +void trace_hardirqs_off(void) |
| 776 | +{ |
| 777 | + tracer_hardirqs_off(); |
| 778 | +} |
| 779 | +EXPORT_SYMBOL(trace_hardirqs_off); |
| 780 | + |
| 781 | +__visible void trace_hardirqs_on_caller(unsigned long caller_addr) |
| 782 | +{ |
| 783 | + tracer_hardirqs_on_caller(caller_addr); |
| 784 | +} |
| 785 | +EXPORT_SYMBOL(trace_hardirqs_on_caller); |
| 786 | + |
| 787 | +__visible void trace_hardirqs_off_caller(unsigned long caller_addr) |
| 788 | +{ |
| 789 | + tracer_hardirqs_off_caller(caller_addr); |
| 790 | +} |
| 791 | +EXPORT_SYMBOL(trace_hardirqs_off_caller); |
| 792 | + |
| 793 | +/* |
| 794 | + * Stubs: |
| 795 | + */ |
| 796 | + |
| 797 | +void trace_softirqs_on(unsigned long ip) |
| 798 | +{ |
| 799 | +} |
| 800 | + |
| 801 | +void trace_softirqs_off(unsigned long ip) |
| 802 | +{ |
| 803 | +} |
| 804 | + |
| 805 | +inline void print_irqtrace_events(struct task_struct *curr) |
| 806 | +{ |
| 807 | +} |
| 808 | +#endif |
| 809 | + |
| 810 | +#ifdef CONFIG_PREEMPT_TRACER |
| 811 | +void trace_preempt_on(unsigned long a0, unsigned long a1) |
| 812 | +{ |
| 813 | + tracer_preempt_on(a0, a1); |
| 814 | +} |
| 815 | + |
| 816 | +void trace_preempt_off(unsigned long a0, unsigned long a1) |
| 817 | +{ |
| 818 | + tracer_preempt_off(a0, a1); |
| 819 | +} |
| 820 | +#endif |
0 commit comments