1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * trace task wakeup timings 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Based on code from the latency_tracer, that is: 9 * 10 * Copyright (C) 2004-2006 Ingo Molnar 11 * Copyright (C) 2004 Nadia Yvette Chambers 12 */ 13 #include <linux/module.h> 14 #include <linux/kallsyms.h> 15 #include <linux/uaccess.h> 16 #include <linux/ftrace.h> 17 #include <linux/sched/rt.h> 18 #include <linux/sched/deadline.h> 19 #include <trace/events/sched.h> 20 #include "trace.h" 21 22 static struct trace_array *wakeup_trace; 23 static int __read_mostly tracer_enabled; 24 25 static struct task_struct *wakeup_task; 26 static int wakeup_cpu; 27 static int wakeup_current_cpu; 28 static unsigned wakeup_prio = -1; 29 static int wakeup_rt; 30 static int wakeup_dl; 31 static int tracing_dl = 0; 32 33 static arch_spinlock_t wakeup_lock = 34 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 35 36 static void wakeup_reset(struct trace_array *tr); 37 static void __wakeup_reset(struct trace_array *tr); 38 39 static int save_flags; 40 41 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 42 static int wakeup_display_graph(struct trace_array *tr, int set); 43 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH) 44 #else 45 static inline int wakeup_display_graph(struct trace_array *tr, int set) 46 { 47 return 0; 48 } 49 # define is_graph(tr) false 50 #endif 51 52 53 #ifdef CONFIG_FUNCTION_TRACER 54 55 static int wakeup_graph_entry(struct ftrace_graph_ent *trace); 56 static void wakeup_graph_return(struct ftrace_graph_ret *trace); 57 58 static bool function_enabled; 59 60 /* 61 * Prologue for the wakeup function tracers. 62 * 63 * Returns 1 if it is OK to continue, and preemption 64 * is disabled and data->disabled is incremented. 65 * 0 if the trace is to be ignored, and preemption 66 * is not disabled and data->disabled is 67 * kept the same. 68 * 69 * Note, this function is also used outside this ifdef but 70 * inside the #ifdef of the function graph tracer below. 71 * This is OK, since the function graph tracer is 72 * dependent on the function tracer. 73 */ 74 static int 75 func_prolog_preempt_disable(struct trace_array *tr, 76 struct trace_array_cpu **data, 77 int *pc) 78 { 79 long disabled; 80 int cpu; 81 82 if (likely(!wakeup_task)) 83 return 0; 84 85 *pc = preempt_count(); 86 preempt_disable_notrace(); 87 88 cpu = raw_smp_processor_id(); 89 if (cpu != wakeup_current_cpu) 90 goto out_enable; 91 92 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); 93 disabled = atomic_inc_return(&(*data)->disabled); 94 if (unlikely(disabled != 1)) 95 goto out; 96 97 return 1; 98 99 out: 100 atomic_dec(&(*data)->disabled); 101 102 out_enable: 103 preempt_enable_notrace(); 104 return 0; 105 } 106 107 /* 108 * wakeup uses its own tracer function to keep the overhead down: 109 */ 110 static void 111 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, 112 struct ftrace_ops *op, struct pt_regs *pt_regs) 113 { 114 struct trace_array *tr = wakeup_trace; 115 struct trace_array_cpu *data; 116 unsigned long flags; 117 int pc; 118 119 if (!func_prolog_preempt_disable(tr, &data, &pc)) 120 return; 121 122 local_irq_save(flags); 123 trace_function(tr, ip, parent_ip, flags, pc); 124 local_irq_restore(flags); 125 126 atomic_dec(&data->disabled); 127 preempt_enable_notrace(); 128 } 129 130 static int register_wakeup_function(struct trace_array *tr, int graph, int set) 131 { 132 int ret; 133 134 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ 135 if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION))) 136 return 0; 137 138 if (graph) 139 ret = register_ftrace_graph(&wakeup_graph_return, 140 &wakeup_graph_entry); 141 else 142 ret = register_ftrace_function(tr->ops); 143 144 if (!ret) 145 function_enabled = true; 146 147 return ret; 148 } 149 150 static void unregister_wakeup_function(struct trace_array *tr, int graph) 151 { 152 if (!function_enabled) 153 return; 154 155 if (graph) 156 unregister_ftrace_graph(); 157 else 158 unregister_ftrace_function(tr->ops); 159 160 function_enabled = false; 161 } 162 163 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) 164 { 165 if (!(mask & TRACE_ITER_FUNCTION)) 166 return 0; 167 168 if (set) 169 register_wakeup_function(tr, is_graph(tr), 1); 170 else 171 unregister_wakeup_function(tr, is_graph(tr)); 172 return 1; 173 } 174 #else 175 static int register_wakeup_function(struct trace_array *tr, int graph, int set) 176 { 177 return 0; 178 } 179 static void unregister_wakeup_function(struct trace_array *tr, int graph) { } 180 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set) 181 { 182 return 0; 183 } 184 #endif /* CONFIG_FUNCTION_TRACER */ 185 186 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set) 187 { 188 struct tracer *tracer = tr->current_trace; 189 190 if (wakeup_function_set(tr, mask, set)) 191 return 0; 192 193 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 194 if (mask & TRACE_ITER_DISPLAY_GRAPH) 195 return wakeup_display_graph(tr, set); 196 #endif 197 198 return trace_keep_overwrite(tracer, mask, set); 199 } 200 201 static int start_func_tracer(struct trace_array *tr, int graph) 202 { 203 int ret; 204 205 ret = register_wakeup_function(tr, graph, 0); 206 207 if (!ret && tracing_is_enabled()) 208 tracer_enabled = 1; 209 else 210 tracer_enabled = 0; 211 212 return ret; 213 } 214 215 static void stop_func_tracer(struct trace_array *tr, int graph) 216 { 217 tracer_enabled = 0; 218 219 unregister_wakeup_function(tr, graph); 220 } 221 222 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 223 static int wakeup_display_graph(struct trace_array *tr, int set) 224 { 225 if (!(is_graph(tr) ^ set)) 226 return 0; 227 228 stop_func_tracer(tr, !set); 229 230 wakeup_reset(wakeup_trace); 231 tr->max_latency = 0; 232 233 return start_func_tracer(tr, set); 234 } 235 236 static int wakeup_graph_entry(struct ftrace_graph_ent *trace) 237 { 238 struct trace_array *tr = wakeup_trace; 239 struct trace_array_cpu *data; 240 unsigned long flags; 241 int pc, ret = 0; 242 243 if (ftrace_graph_ignore_func(trace)) 244 return 0; 245 /* 246 * Do not trace a function if it's filtered by set_graph_notrace. 247 * Make the index of ret stack negative to indicate that it should 248 * ignore further functions. But it needs its own ret stack entry 249 * to recover the original index in order to continue tracing after 250 * returning from the function. 251 */ 252 if (ftrace_graph_notrace_addr(trace->func)) 253 return 1; 254 255 if (!func_prolog_preempt_disable(tr, &data, &pc)) 256 return 0; 257 258 local_save_flags(flags); 259 ret = __trace_graph_entry(tr, trace, flags, pc); 260 atomic_dec(&data->disabled); 261 preempt_enable_notrace(); 262 263 return ret; 264 } 265 266 static void wakeup_graph_return(struct ftrace_graph_ret *trace) 267 { 268 struct trace_array *tr = wakeup_trace; 269 struct trace_array_cpu *data; 270 unsigned long flags; 271 int pc; 272 273 if (!func_prolog_preempt_disable(tr, &data, &pc)) 274 return; 275 276 local_save_flags(flags); 277 __trace_graph_return(tr, trace, flags, pc); 278 atomic_dec(&data->disabled); 279 280 preempt_enable_notrace(); 281 return; 282 } 283 284 static void wakeup_trace_open(struct trace_iterator *iter) 285 { 286 if (is_graph(iter->tr)) 287 graph_trace_open(iter); 288 } 289 290 static void wakeup_trace_close(struct trace_iterator *iter) 291 { 292 if (iter->private) 293 graph_trace_close(iter); 294 } 295 296 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ 297 TRACE_GRAPH_PRINT_ABS_TIME | \ 298 TRACE_GRAPH_PRINT_DURATION) 299 300 static enum print_line_t wakeup_print_line(struct trace_iterator *iter) 301 { 302 /* 303 * In graph mode call the graph tracer output function, 304 * otherwise go with the TRACE_FN event handler 305 */ 306 if (is_graph(iter->tr)) 307 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); 308 309 return TRACE_TYPE_UNHANDLED; 310 } 311 312 static void wakeup_print_header(struct seq_file *s) 313 { 314 if (is_graph(wakeup_trace)) 315 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); 316 else 317 trace_default_header(s); 318 } 319 320 static void 321 __trace_function(struct trace_array *tr, 322 unsigned long ip, unsigned long parent_ip, 323 unsigned long flags, int pc) 324 { 325 if (is_graph(tr)) 326 trace_graph_function(tr, ip, parent_ip, flags, pc); 327 else 328 trace_function(tr, ip, parent_ip, flags, pc); 329 } 330 #else 331 #define __trace_function trace_function 332 333 static enum print_line_t wakeup_print_line(struct trace_iterator *iter) 334 { 335 return TRACE_TYPE_UNHANDLED; 336 } 337 338 static void wakeup_trace_open(struct trace_iterator *iter) { } 339 static void wakeup_trace_close(struct trace_iterator *iter) { } 340 341 #ifdef CONFIG_FUNCTION_TRACER 342 static int wakeup_graph_entry(struct ftrace_graph_ent *trace) 343 { 344 return -1; 345 } 346 static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } 347 static void wakeup_print_header(struct seq_file *s) 348 { 349 trace_default_header(s); 350 } 351 #else 352 static void wakeup_print_header(struct seq_file *s) 353 { 354 trace_latency_header(s); 355 } 356 #endif /* CONFIG_FUNCTION_TRACER */ 357 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 358 359 /* 360 * Should this new latency be reported/recorded? 361 */ 362 static bool report_latency(struct trace_array *tr, u64 delta) 363 { 364 if (tracing_thresh) { 365 if (delta < tracing_thresh) 366 return false; 367 } else { 368 if (delta <= tr->max_latency) 369 return false; 370 } 371 return true; 372 } 373 374 static void 375 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) 376 { 377 if (task != wakeup_task) 378 return; 379 380 wakeup_current_cpu = cpu; 381 } 382 383 static void 384 tracing_sched_switch_trace(struct trace_array *tr, 385 struct task_struct *prev, 386 struct task_struct *next, 387 unsigned long flags, int pc) 388 { 389 struct trace_event_call *call = &event_context_switch; 390 struct ring_buffer *buffer = tr->trace_buffer.buffer; 391 struct ring_buffer_event *event; 392 struct ctx_switch_entry *entry; 393 394 event = trace_buffer_lock_reserve(buffer, TRACE_CTX, 395 sizeof(*entry), flags, pc); 396 if (!event) 397 return; 398 entry = ring_buffer_event_data(event); 399 entry->prev_pid = prev->pid; 400 entry->prev_prio = prev->prio; 401 entry->prev_state = task_state_index(prev); 402 entry->next_pid = next->pid; 403 entry->next_prio = next->prio; 404 entry->next_state = task_state_index(next); 405 entry->next_cpu = task_cpu(next); 406 407 if (!call_filter_check_discard(call, entry, buffer, event)) 408 trace_buffer_unlock_commit(tr, buffer, event, flags, pc); 409 } 410 411 static void 412 tracing_sched_wakeup_trace(struct trace_array *tr, 413 struct task_struct *wakee, 414 struct task_struct *curr, 415 unsigned long flags, int pc) 416 { 417 struct trace_event_call *call = &event_wakeup; 418 struct ring_buffer_event *event; 419 struct ctx_switch_entry *entry; 420 struct ring_buffer *buffer = tr->trace_buffer.buffer; 421 422 event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, 423 sizeof(*entry), flags, pc); 424 if (!event) 425 return; 426 entry = ring_buffer_event_data(event); 427 entry->prev_pid = curr->pid; 428 entry->prev_prio = curr->prio; 429 entry->prev_state = task_state_index(curr); 430 entry->next_pid = wakee->pid; 431 entry->next_prio = wakee->prio; 432 entry->next_state = task_state_index(wakee); 433 entry->next_cpu = task_cpu(wakee); 434 435 if (!call_filter_check_discard(call, entry, buffer, event)) 436 trace_buffer_unlock_commit(tr, buffer, event, flags, pc); 437 } 438 439 static void notrace 440 probe_wakeup_sched_switch(void *ignore, bool preempt, 441 struct task_struct *prev, struct task_struct *next) 442 { 443 struct trace_array_cpu *data; 444 u64 T0, T1, delta; 445 unsigned long flags; 446 long disabled; 447 int cpu; 448 int pc; 449 450 tracing_record_cmdline(prev); 451 452 if (unlikely(!tracer_enabled)) 453 return; 454 455 /* 456 * When we start a new trace, we set wakeup_task to NULL 457 * and then set tracer_enabled = 1. We want to make sure 458 * that another CPU does not see the tracer_enabled = 1 459 * and the wakeup_task with an older task, that might 460 * actually be the same as next. 461 */ 462 smp_rmb(); 463 464 if (next != wakeup_task) 465 return; 466 467 pc = preempt_count(); 468 469 /* disable local data, not wakeup_cpu data */ 470 cpu = raw_smp_processor_id(); 471 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); 472 if (likely(disabled != 1)) 473 goto out; 474 475 local_irq_save(flags); 476 arch_spin_lock(&wakeup_lock); 477 478 /* We could race with grabbing wakeup_lock */ 479 if (unlikely(!tracer_enabled || next != wakeup_task)) 480 goto out_unlock; 481 482 /* The task we are waiting for is waking up */ 483 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); 484 485 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); 486 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); 487 488 T0 = data->preempt_timestamp; 489 T1 = ftrace_now(cpu); 490 delta = T1-T0; 491 492 if (!report_latency(wakeup_trace, delta)) 493 goto out_unlock; 494 495 if (likely(!is_tracing_stopped())) { 496 wakeup_trace->max_latency = delta; 497 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); 498 } 499 500 out_unlock: 501 __wakeup_reset(wakeup_trace); 502 arch_spin_unlock(&wakeup_lock); 503 local_irq_restore(flags); 504 out: 505 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); 506 } 507 508 static void __wakeup_reset(struct trace_array *tr) 509 { 510 wakeup_cpu = -1; 511 wakeup_prio = -1; 512 tracing_dl = 0; 513 514 if (wakeup_task) 515 put_task_struct(wakeup_task); 516 517 wakeup_task = NULL; 518 } 519 520 static void wakeup_reset(struct trace_array *tr) 521 { 522 unsigned long flags; 523 524 tracing_reset_online_cpus(&tr->trace_buffer); 525 526 local_irq_save(flags); 527 arch_spin_lock(&wakeup_lock); 528 __wakeup_reset(tr); 529 arch_spin_unlock(&wakeup_lock); 530 local_irq_restore(flags); 531 } 532 533 static void 534 probe_wakeup(void *ignore, struct task_struct *p) 535 { 536 struct trace_array_cpu *data; 537 int cpu = smp_processor_id(); 538 unsigned long flags; 539 long disabled; 540 int pc; 541 542 if (likely(!tracer_enabled)) 543 return; 544 545 tracing_record_cmdline(p); 546 tracing_record_cmdline(current); 547 548 /* 549 * Semantic is like this: 550 * - wakeup tracer handles all tasks in the system, independently 551 * from their scheduling class; 552 * - wakeup_rt tracer handles tasks belonging to sched_dl and 553 * sched_rt class; 554 * - wakeup_dl handles tasks belonging to sched_dl class only. 555 */ 556 if (tracing_dl || (wakeup_dl && !dl_task(p)) || 557 (wakeup_rt && !dl_task(p) && !rt_task(p)) || 558 (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) 559 return; 560 561 pc = preempt_count(); 562 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); 563 if (unlikely(disabled != 1)) 564 goto out; 565 566 /* interrupts should be off from try_to_wake_up */ 567 arch_spin_lock(&wakeup_lock); 568 569 /* check for races. */ 570 if (!tracer_enabled || tracing_dl || 571 (!dl_task(p) && p->prio >= wakeup_prio)) 572 goto out_locked; 573 574 /* reset the trace */ 575 __wakeup_reset(wakeup_trace); 576 577 wakeup_cpu = task_cpu(p); 578 wakeup_current_cpu = wakeup_cpu; 579 wakeup_prio = p->prio; 580 581 /* 582 * Once you start tracing a -deadline task, don't bother tracing 583 * another task until the first one wakes up. 584 */ 585 if (dl_task(p)) 586 tracing_dl = 1; 587 else 588 tracing_dl = 0; 589 590 wakeup_task = p; 591 get_task_struct(wakeup_task); 592 593 local_save_flags(flags); 594 595 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); 596 data->preempt_timestamp = ftrace_now(cpu); 597 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); 598 599 /* 600 * We must be careful in using CALLER_ADDR2. But since wake_up 601 * is not called by an assembly function (where as schedule is) 602 * it should be safe to use it here. 603 */ 604 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 605 606 out_locked: 607 arch_spin_unlock(&wakeup_lock); 608 out: 609 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); 610 } 611 612 static void start_wakeup_tracer(struct trace_array *tr) 613 { 614 int ret; 615 616 ret = register_trace_sched_wakeup(probe_wakeup, NULL); 617 if (ret) { 618 pr_info("wakeup trace: Couldn't activate tracepoint" 619 " probe to kernel_sched_wakeup\n"); 620 return; 621 } 622 623 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); 624 if (ret) { 625 pr_info("wakeup trace: Couldn't activate tracepoint" 626 " probe to kernel_sched_wakeup_new\n"); 627 goto fail_deprobe; 628 } 629 630 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); 631 if (ret) { 632 pr_info("sched trace: Couldn't activate tracepoint" 633 " probe to kernel_sched_switch\n"); 634 goto fail_deprobe_wake_new; 635 } 636 637 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); 638 if (ret) { 639 pr_info("wakeup trace: Couldn't activate tracepoint" 640 " probe to kernel_sched_migrate_task\n"); 641 return; 642 } 643 644 wakeup_reset(tr); 645 646 /* 647 * Don't let the tracer_enabled = 1 show up before 648 * the wakeup_task is reset. This may be overkill since 649 * wakeup_reset does a spin_unlock after setting the 650 * wakeup_task to NULL, but I want to be safe. 651 * This is a slow path anyway. 652 */ 653 smp_wmb(); 654 655 if (start_func_tracer(tr, is_graph(tr))) 656 printk(KERN_ERR "failed to start wakeup tracer\n"); 657 658 return; 659 fail_deprobe_wake_new: 660 unregister_trace_sched_wakeup_new(probe_wakeup, NULL); 661 fail_deprobe: 662 unregister_trace_sched_wakeup(probe_wakeup, NULL); 663 } 664 665 static void stop_wakeup_tracer(struct trace_array *tr) 666 { 667 tracer_enabled = 0; 668 stop_func_tracer(tr, is_graph(tr)); 669 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); 670 unregister_trace_sched_wakeup_new(probe_wakeup, NULL); 671 unregister_trace_sched_wakeup(probe_wakeup, NULL); 672 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); 673 } 674 675 static bool wakeup_busy; 676 677 static int __wakeup_tracer_init(struct trace_array *tr) 678 { 679 save_flags = tr->trace_flags; 680 681 /* non overwrite screws up the latency tracers */ 682 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); 683 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); 684 685 tr->max_latency = 0; 686 wakeup_trace = tr; 687 ftrace_init_array_ops(tr, wakeup_tracer_call); 688 start_wakeup_tracer(tr); 689 690 wakeup_busy = true; 691 return 0; 692 } 693 694 static int wakeup_tracer_init(struct trace_array *tr) 695 { 696 if (wakeup_busy) 697 return -EBUSY; 698 699 wakeup_dl = 0; 700 wakeup_rt = 0; 701 return __wakeup_tracer_init(tr); 702 } 703 704 static int wakeup_rt_tracer_init(struct trace_array *tr) 705 { 706 if (wakeup_busy) 707 return -EBUSY; 708 709 wakeup_dl = 0; 710 wakeup_rt = 1; 711 return __wakeup_tracer_init(tr); 712 } 713 714 static int wakeup_dl_tracer_init(struct trace_array *tr) 715 { 716 if (wakeup_busy) 717 return -EBUSY; 718 719 wakeup_dl = 1; 720 wakeup_rt = 0; 721 return __wakeup_tracer_init(tr); 722 } 723 724 static void wakeup_tracer_reset(struct trace_array *tr) 725 { 726 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; 727 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; 728 729 stop_wakeup_tracer(tr); 730 /* make sure we put back any tasks we are tracing */ 731 wakeup_reset(tr); 732 733 set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); 734 set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); 735 ftrace_reset_array_ops(tr); 736 wakeup_busy = false; 737 } 738 739 static void wakeup_tracer_start(struct trace_array *tr) 740 { 741 wakeup_reset(tr); 742 tracer_enabled = 1; 743 } 744 745 static void wakeup_tracer_stop(struct trace_array *tr) 746 { 747 tracer_enabled = 0; 748 } 749 750 static struct tracer wakeup_tracer __read_mostly = 751 { 752 .name = "wakeup", 753 .init = wakeup_tracer_init, 754 .reset = wakeup_tracer_reset, 755 .start = wakeup_tracer_start, 756 .stop = wakeup_tracer_stop, 757 .print_max = true, 758 .print_header = wakeup_print_header, 759 .print_line = wakeup_print_line, 760 .flag_changed = wakeup_flag_changed, 761 #ifdef CONFIG_FTRACE_SELFTEST 762 .selftest = trace_selftest_startup_wakeup, 763 #endif 764 .open = wakeup_trace_open, 765 .close = wakeup_trace_close, 766 .allow_instances = true, 767 .use_max_tr = true, 768 }; 769 770 static struct tracer wakeup_rt_tracer __read_mostly = 771 { 772 .name = "wakeup_rt", 773 .init = wakeup_rt_tracer_init, 774 .reset = wakeup_tracer_reset, 775 .start = wakeup_tracer_start, 776 .stop = wakeup_tracer_stop, 777 .print_max = true, 778 .print_header = wakeup_print_header, 779 .print_line = wakeup_print_line, 780 .flag_changed = wakeup_flag_changed, 781 #ifdef CONFIG_FTRACE_SELFTEST 782 .selftest = trace_selftest_startup_wakeup, 783 #endif 784 .open = wakeup_trace_open, 785 .close = wakeup_trace_close, 786 .allow_instances = true, 787 .use_max_tr = true, 788 }; 789 790 static struct tracer wakeup_dl_tracer __read_mostly = 791 { 792 .name = "wakeup_dl", 793 .init = wakeup_dl_tracer_init, 794 .reset = wakeup_tracer_reset, 795 .start = wakeup_tracer_start, 796 .stop = wakeup_tracer_stop, 797 .print_max = true, 798 .print_header = wakeup_print_header, 799 .print_line = wakeup_print_line, 800 .flag_changed = wakeup_flag_changed, 801 #ifdef CONFIG_FTRACE_SELFTEST 802 .selftest = trace_selftest_startup_wakeup, 803 #endif 804 .open = wakeup_trace_open, 805 .close = wakeup_trace_close, 806 .allow_instances = true, 807 .use_max_tr = true, 808 }; 809 810 __init static int init_wakeup_tracer(void) 811 { 812 int ret; 813 814 ret = register_tracer(&wakeup_tracer); 815 if (ret) 816 return ret; 817 818 ret = register_tracer(&wakeup_rt_tracer); 819 if (ret) 820 return ret; 821 822 ret = register_tracer(&wakeup_dl_tracer); 823 if (ret) 824 return ret; 825 826 return 0; 827 } 828 core_initcall(init_wakeup_tracer); 829