1 /* 2 * trace task wakeup timings 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Based on code from the latency_tracer, that is: 8 * 9 * Copyright (C) 2004-2006 Ingo Molnar 10 * Copyright (C) 2004 William Lee Irwin III 11 */ 12 #include <linux/module.h> 13 #include <linux/fs.h> 14 #include <linux/debugfs.h> 15 #include <linux/kallsyms.h> 16 #include <linux/uaccess.h> 17 #include <linux/ftrace.h> 18 #include <trace/events/sched.h> 19 20 #include "trace.h" 21 22 static struct trace_array *wakeup_trace; 23 static int __read_mostly tracer_enabled; 24 25 static struct task_struct *wakeup_task; 26 static int wakeup_cpu; 27 static int wakeup_current_cpu; 28 static unsigned wakeup_prio = -1; 29 static int wakeup_rt; 30 31 static arch_spinlock_t wakeup_lock = 32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 33 34 static void wakeup_reset(struct trace_array *tr); 35 static void __wakeup_reset(struct trace_array *tr); 36 static int wakeup_graph_entry(struct ftrace_graph_ent *trace); 37 static void wakeup_graph_return(struct ftrace_graph_ret *trace); 38 39 static int save_lat_flag; 40 41 #define TRACE_DISPLAY_GRAPH 1 42 43 static struct tracer_opt trace_opts[] = { 44 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 45 /* display latency trace as call graph */ 46 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, 47 #endif 48 { } /* Empty entry */ 49 }; 50 51 static struct tracer_flags tracer_flags = { 52 .val = 0, 53 .opts = trace_opts, 54 }; 55 56 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) 57 58 #ifdef CONFIG_FUNCTION_TRACER 59 60 /* 61 * Prologue for the wakeup function tracers. 62 * 63 * Returns 1 if it is OK to continue, and preemption 64 * is disabled and data->disabled is incremented. 65 * 0 if the trace is to be ignored, and preemption 66 * is not disabled and data->disabled is 67 * kept the same. 68 * 69 * Note, this function is also used outside this ifdef but 70 * inside the #ifdef of the function graph tracer below. 71 * This is OK, since the function graph tracer is 72 * dependent on the function tracer. 73 */ 74 static int 75 func_prolog_preempt_disable(struct trace_array *tr, 76 struct trace_array_cpu **data, 77 int *pc) 78 { 79 long disabled; 80 int cpu; 81 82 if (likely(!wakeup_task)) 83 return 0; 84 85 *pc = preempt_count(); 86 preempt_disable_notrace(); 87 88 cpu = raw_smp_processor_id(); 89 if (cpu != wakeup_current_cpu) 90 goto out_enable; 91 92 *data = tr->data[cpu]; 93 disabled = atomic_inc_return(&(*data)->disabled); 94 if (unlikely(disabled != 1)) 95 goto out; 96 97 return 1; 98 99 out: 100 atomic_dec(&(*data)->disabled); 101 102 out_enable: 103 preempt_enable_notrace(); 104 return 0; 105 } 106 107 /* 108 * wakeup uses its own tracer function to keep the overhead down: 109 */ 110 static void 111 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) 112 { 113 struct trace_array *tr = wakeup_trace; 114 struct trace_array_cpu *data; 115 unsigned long flags; 116 int pc; 117 118 if (!func_prolog_preempt_disable(tr, &data, &pc)) 119 return; 120 121 local_irq_save(flags); 122 trace_function(tr, ip, parent_ip, flags, pc); 123 local_irq_restore(flags); 124 125 atomic_dec(&data->disabled); 126 preempt_enable_notrace(); 127 } 128 129 static struct ftrace_ops trace_ops __read_mostly = 130 { 131 .func = wakeup_tracer_call, 132 }; 133 #endif /* CONFIG_FUNCTION_TRACER */ 134 135 static int start_func_tracer(int graph) 136 { 137 int ret; 138 139 if (!graph) 140 ret = register_ftrace_function(&trace_ops); 141 else 142 ret = register_ftrace_graph(&wakeup_graph_return, 143 &wakeup_graph_entry); 144 145 if (!ret && tracing_is_enabled()) 146 tracer_enabled = 1; 147 else 148 tracer_enabled = 0; 149 150 return ret; 151 } 152 153 static void stop_func_tracer(int graph) 154 { 155 tracer_enabled = 0; 156 157 if (!graph) 158 unregister_ftrace_function(&trace_ops); 159 else 160 unregister_ftrace_graph(); 161 } 162 163 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 164 static int wakeup_set_flag(u32 old_flags, u32 bit, int set) 165 { 166 167 if (!(bit & TRACE_DISPLAY_GRAPH)) 168 return -EINVAL; 169 170 if (!(is_graph() ^ set)) 171 return 0; 172 173 stop_func_tracer(!set); 174 175 wakeup_reset(wakeup_trace); 176 tracing_max_latency = 0; 177 178 return start_func_tracer(set); 179 } 180 181 static int wakeup_graph_entry(struct ftrace_graph_ent *trace) 182 { 183 struct trace_array *tr = wakeup_trace; 184 struct trace_array_cpu *data; 185 unsigned long flags; 186 int pc, ret = 0; 187 188 if (!func_prolog_preempt_disable(tr, &data, &pc)) 189 return 0; 190 191 local_save_flags(flags); 192 ret = __trace_graph_entry(tr, trace, flags, pc); 193 atomic_dec(&data->disabled); 194 preempt_enable_notrace(); 195 196 return ret; 197 } 198 199 static void wakeup_graph_return(struct ftrace_graph_ret *trace) 200 { 201 struct trace_array *tr = wakeup_trace; 202 struct trace_array_cpu *data; 203 unsigned long flags; 204 int pc; 205 206 if (!func_prolog_preempt_disable(tr, &data, &pc)) 207 return; 208 209 local_save_flags(flags); 210 __trace_graph_return(tr, trace, flags, pc); 211 atomic_dec(&data->disabled); 212 213 preempt_enable_notrace(); 214 return; 215 } 216 217 static void wakeup_trace_open(struct trace_iterator *iter) 218 { 219 if (is_graph()) 220 graph_trace_open(iter); 221 } 222 223 static void wakeup_trace_close(struct trace_iterator *iter) 224 { 225 if (iter->private) 226 graph_trace_close(iter); 227 } 228 229 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC) 230 231 static enum print_line_t wakeup_print_line(struct trace_iterator *iter) 232 { 233 /* 234 * In graph mode call the graph tracer output function, 235 * otherwise go with the TRACE_FN event handler 236 */ 237 if (is_graph()) 238 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); 239 240 return TRACE_TYPE_UNHANDLED; 241 } 242 243 static void wakeup_print_header(struct seq_file *s) 244 { 245 if (is_graph()) 246 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); 247 else 248 trace_default_header(s); 249 } 250 251 static void 252 __trace_function(struct trace_array *tr, 253 unsigned long ip, unsigned long parent_ip, 254 unsigned long flags, int pc) 255 { 256 if (is_graph()) 257 trace_graph_function(tr, ip, parent_ip, flags, pc); 258 else 259 trace_function(tr, ip, parent_ip, flags, pc); 260 } 261 #else 262 #define __trace_function trace_function 263 264 static int wakeup_set_flag(u32 old_flags, u32 bit, int set) 265 { 266 return -EINVAL; 267 } 268 269 static int wakeup_graph_entry(struct ftrace_graph_ent *trace) 270 { 271 return -1; 272 } 273 274 static enum print_line_t wakeup_print_line(struct trace_iterator *iter) 275 { 276 return TRACE_TYPE_UNHANDLED; 277 } 278 279 static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } 280 static void wakeup_print_header(struct seq_file *s) { } 281 static void wakeup_trace_open(struct trace_iterator *iter) { } 282 static void wakeup_trace_close(struct trace_iterator *iter) { } 283 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 284 285 /* 286 * Should this new latency be reported/recorded? 287 */ 288 static int report_latency(cycle_t delta) 289 { 290 if (tracing_thresh) { 291 if (delta < tracing_thresh) 292 return 0; 293 } else { 294 if (delta <= tracing_max_latency) 295 return 0; 296 } 297 return 1; 298 } 299 300 static void 301 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) 302 { 303 if (task != wakeup_task) 304 return; 305 306 wakeup_current_cpu = cpu; 307 } 308 309 static void notrace 310 probe_wakeup_sched_switch(void *ignore, 311 struct task_struct *prev, struct task_struct *next) 312 { 313 struct trace_array_cpu *data; 314 cycle_t T0, T1, delta; 315 unsigned long flags; 316 long disabled; 317 int cpu; 318 int pc; 319 320 tracing_record_cmdline(prev); 321 322 if (unlikely(!tracer_enabled)) 323 return; 324 325 /* 326 * When we start a new trace, we set wakeup_task to NULL 327 * and then set tracer_enabled = 1. We want to make sure 328 * that another CPU does not see the tracer_enabled = 1 329 * and the wakeup_task with an older task, that might 330 * actually be the same as next. 331 */ 332 smp_rmb(); 333 334 if (next != wakeup_task) 335 return; 336 337 pc = preempt_count(); 338 339 /* disable local data, not wakeup_cpu data */ 340 cpu = raw_smp_processor_id(); 341 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); 342 if (likely(disabled != 1)) 343 goto out; 344 345 local_irq_save(flags); 346 arch_spin_lock(&wakeup_lock); 347 348 /* We could race with grabbing wakeup_lock */ 349 if (unlikely(!tracer_enabled || next != wakeup_task)) 350 goto out_unlock; 351 352 /* The task we are waiting for is waking up */ 353 data = wakeup_trace->data[wakeup_cpu]; 354 355 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); 356 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); 357 358 T0 = data->preempt_timestamp; 359 T1 = ftrace_now(cpu); 360 delta = T1-T0; 361 362 if (!report_latency(delta)) 363 goto out_unlock; 364 365 if (likely(!is_tracing_stopped())) { 366 tracing_max_latency = delta; 367 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); 368 } 369 370 out_unlock: 371 __wakeup_reset(wakeup_trace); 372 arch_spin_unlock(&wakeup_lock); 373 local_irq_restore(flags); 374 out: 375 atomic_dec(&wakeup_trace->data[cpu]->disabled); 376 } 377 378 static void __wakeup_reset(struct trace_array *tr) 379 { 380 wakeup_cpu = -1; 381 wakeup_prio = -1; 382 383 if (wakeup_task) 384 put_task_struct(wakeup_task); 385 386 wakeup_task = NULL; 387 } 388 389 static void wakeup_reset(struct trace_array *tr) 390 { 391 unsigned long flags; 392 393 tracing_reset_online_cpus(tr); 394 395 local_irq_save(flags); 396 arch_spin_lock(&wakeup_lock); 397 __wakeup_reset(tr); 398 arch_spin_unlock(&wakeup_lock); 399 local_irq_restore(flags); 400 } 401 402 static void 403 probe_wakeup(void *ignore, struct task_struct *p, int success) 404 { 405 struct trace_array_cpu *data; 406 int cpu = smp_processor_id(); 407 unsigned long flags; 408 long disabled; 409 int pc; 410 411 if (likely(!tracer_enabled)) 412 return; 413 414 tracing_record_cmdline(p); 415 tracing_record_cmdline(current); 416 417 if ((wakeup_rt && !rt_task(p)) || 418 p->prio >= wakeup_prio || 419 p->prio >= current->prio) 420 return; 421 422 pc = preempt_count(); 423 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); 424 if (unlikely(disabled != 1)) 425 goto out; 426 427 /* interrupts should be off from try_to_wake_up */ 428 arch_spin_lock(&wakeup_lock); 429 430 /* check for races. */ 431 if (!tracer_enabled || p->prio >= wakeup_prio) 432 goto out_locked; 433 434 /* reset the trace */ 435 __wakeup_reset(wakeup_trace); 436 437 wakeup_cpu = task_cpu(p); 438 wakeup_current_cpu = wakeup_cpu; 439 wakeup_prio = p->prio; 440 441 wakeup_task = p; 442 get_task_struct(wakeup_task); 443 444 local_save_flags(flags); 445 446 data = wakeup_trace->data[wakeup_cpu]; 447 data->preempt_timestamp = ftrace_now(cpu); 448 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); 449 450 /* 451 * We must be careful in using CALLER_ADDR2. But since wake_up 452 * is not called by an assembly function (where as schedule is) 453 * it should be safe to use it here. 454 */ 455 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 456 457 out_locked: 458 arch_spin_unlock(&wakeup_lock); 459 out: 460 atomic_dec(&wakeup_trace->data[cpu]->disabled); 461 } 462 463 static void start_wakeup_tracer(struct trace_array *tr) 464 { 465 int ret; 466 467 ret = register_trace_sched_wakeup(probe_wakeup, NULL); 468 if (ret) { 469 pr_info("wakeup trace: Couldn't activate tracepoint" 470 " probe to kernel_sched_wakeup\n"); 471 return; 472 } 473 474 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); 475 if (ret) { 476 pr_info("wakeup trace: Couldn't activate tracepoint" 477 " probe to kernel_sched_wakeup_new\n"); 478 goto fail_deprobe; 479 } 480 481 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); 482 if (ret) { 483 pr_info("sched trace: Couldn't activate tracepoint" 484 " probe to kernel_sched_switch\n"); 485 goto fail_deprobe_wake_new; 486 } 487 488 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); 489 if (ret) { 490 pr_info("wakeup trace: Couldn't activate tracepoint" 491 " probe to kernel_sched_migrate_task\n"); 492 return; 493 } 494 495 wakeup_reset(tr); 496 497 /* 498 * Don't let the tracer_enabled = 1 show up before 499 * the wakeup_task is reset. This may be overkill since 500 * wakeup_reset does a spin_unlock after setting the 501 * wakeup_task to NULL, but I want to be safe. 502 * This is a slow path anyway. 503 */ 504 smp_wmb(); 505 506 if (start_func_tracer(is_graph())) 507 printk(KERN_ERR "failed to start wakeup tracer\n"); 508 509 return; 510 fail_deprobe_wake_new: 511 unregister_trace_sched_wakeup_new(probe_wakeup, NULL); 512 fail_deprobe: 513 unregister_trace_sched_wakeup(probe_wakeup, NULL); 514 } 515 516 static void stop_wakeup_tracer(struct trace_array *tr) 517 { 518 tracer_enabled = 0; 519 stop_func_tracer(is_graph()); 520 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); 521 unregister_trace_sched_wakeup_new(probe_wakeup, NULL); 522 unregister_trace_sched_wakeup(probe_wakeup, NULL); 523 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); 524 } 525 526 static int __wakeup_tracer_init(struct trace_array *tr) 527 { 528 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 529 trace_flags |= TRACE_ITER_LATENCY_FMT; 530 531 tracing_max_latency = 0; 532 wakeup_trace = tr; 533 start_wakeup_tracer(tr); 534 return 0; 535 } 536 537 static int wakeup_tracer_init(struct trace_array *tr) 538 { 539 wakeup_rt = 0; 540 return __wakeup_tracer_init(tr); 541 } 542 543 static int wakeup_rt_tracer_init(struct trace_array *tr) 544 { 545 wakeup_rt = 1; 546 return __wakeup_tracer_init(tr); 547 } 548 549 static void wakeup_tracer_reset(struct trace_array *tr) 550 { 551 stop_wakeup_tracer(tr); 552 /* make sure we put back any tasks we are tracing */ 553 wakeup_reset(tr); 554 555 if (!save_lat_flag) 556 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 557 } 558 559 static void wakeup_tracer_start(struct trace_array *tr) 560 { 561 wakeup_reset(tr); 562 tracer_enabled = 1; 563 } 564 565 static void wakeup_tracer_stop(struct trace_array *tr) 566 { 567 tracer_enabled = 0; 568 } 569 570 static struct tracer wakeup_tracer __read_mostly = 571 { 572 .name = "wakeup", 573 .init = wakeup_tracer_init, 574 .reset = wakeup_tracer_reset, 575 .start = wakeup_tracer_start, 576 .stop = wakeup_tracer_stop, 577 .print_max = 1, 578 .print_header = wakeup_print_header, 579 .print_line = wakeup_print_line, 580 .flags = &tracer_flags, 581 .set_flag = wakeup_set_flag, 582 #ifdef CONFIG_FTRACE_SELFTEST 583 .selftest = trace_selftest_startup_wakeup, 584 #endif 585 .open = wakeup_trace_open, 586 .close = wakeup_trace_close, 587 .use_max_tr = 1, 588 }; 589 590 static struct tracer wakeup_rt_tracer __read_mostly = 591 { 592 .name = "wakeup_rt", 593 .init = wakeup_rt_tracer_init, 594 .reset = wakeup_tracer_reset, 595 .start = wakeup_tracer_start, 596 .stop = wakeup_tracer_stop, 597 .wait_pipe = poll_wait_pipe, 598 .print_max = 1, 599 .print_header = wakeup_print_header, 600 .print_line = wakeup_print_line, 601 .flags = &tracer_flags, 602 .set_flag = wakeup_set_flag, 603 #ifdef CONFIG_FTRACE_SELFTEST 604 .selftest = trace_selftest_startup_wakeup, 605 #endif 606 .open = wakeup_trace_open, 607 .close = wakeup_trace_close, 608 .use_max_tr = 1, 609 }; 610 611 __init static int init_wakeup_tracer(void) 612 { 613 int ret; 614 615 ret = register_tracer(&wakeup_tracer); 616 if (ret) 617 return ret; 618 619 ret = register_tracer(&wakeup_rt_tracer); 620 if (ret) 621 return ret; 622 623 return 0; 624 } 625 device_initcall(init_wakeup_tracer); 626