1 /* 2 * trace irqs off critical timings 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * From code in the latency_tracer, that is: 8 * 9 * Copyright (C) 2004-2006 Ingo Molnar 10 * Copyright (C) 2004 William Lee Irwin III 11 */ 12 #include <linux/kallsyms.h> 13 #include <linux/debugfs.h> 14 #include <linux/uaccess.h> 15 #include <linux/module.h> 16 #include <linux/ftrace.h> 17 #include <linux/fs.h> 18 19 #include "trace.h" 20 21 static struct trace_array *irqsoff_trace __read_mostly; 22 static int tracer_enabled __read_mostly; 23 24 static DEFINE_PER_CPU(int, tracing_cpu); 25 26 static DEFINE_SPINLOCK(max_trace_lock); 27 28 enum { 29 TRACER_IRQS_OFF = (1 << 1), 30 TRACER_PREEMPT_OFF = (1 << 2), 31 }; 32 33 static int trace_type __read_mostly; 34 35 static int save_lat_flag; 36 37 static void stop_irqsoff_tracer(struct trace_array *tr, int graph); 38 static int start_irqsoff_tracer(struct trace_array *tr, int graph); 39 40 #ifdef CONFIG_PREEMPT_TRACER 41 static inline int 42 preempt_trace(void) 43 { 44 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); 45 } 46 #else 47 # define preempt_trace() (0) 48 #endif 49 50 #ifdef CONFIG_IRQSOFF_TRACER 51 static inline int 52 irq_trace(void) 53 { 54 return ((trace_type & TRACER_IRQS_OFF) && 55 irqs_disabled()); 56 } 57 #else 58 # define irq_trace() (0) 59 #endif 60 61 #define TRACE_DISPLAY_GRAPH 1 62 63 static struct tracer_opt trace_opts[] = { 64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 65 /* display latency trace as call graph */ 66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, 67 #endif 68 { } /* Empty entry */ 69 }; 70 71 static struct tracer_flags tracer_flags = { 72 .val = 0, 73 .opts = trace_opts, 74 }; 75 76 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) 77 78 /* 79 * Sequence count - we record it when starting a measurement and 80 * skip the latency if the sequence has changed - some other section 81 * did a maximum and could disturb our measurement with serial console 82 * printouts, etc. Truly coinciding maximum latencies should be rare 83 * and what happens together happens separately as well, so this doesnt 84 * decrease the validity of the maximum found: 85 */ 86 static __cacheline_aligned_in_smp unsigned long max_sequence; 87 88 #ifdef CONFIG_FUNCTION_TRACER 89 /* 90 * Prologue for the preempt and irqs off function tracers. 91 * 92 * Returns 1 if it is OK to continue, and data->disabled is 93 * incremented. 94 * 0 if the trace is to be ignored, and data->disabled 95 * is kept the same. 96 * 97 * Note, this function is also used outside this ifdef but 98 * inside the #ifdef of the function graph tracer below. 99 * This is OK, since the function graph tracer is 100 * dependent on the function tracer. 101 */ 102 static int func_prolog_dec(struct trace_array *tr, 103 struct trace_array_cpu **data, 104 unsigned long *flags) 105 { 106 long disabled; 107 int cpu; 108 109 /* 110 * Does not matter if we preempt. We test the flags 111 * afterward, to see if irqs are disabled or not. 112 * If we preempt and get a false positive, the flags 113 * test will fail. 114 */ 115 cpu = raw_smp_processor_id(); 116 if (likely(!per_cpu(tracing_cpu, cpu))) 117 return 0; 118 119 local_save_flags(*flags); 120 /* slight chance to get a false positive on tracing_cpu */ 121 if (!irqs_disabled_flags(*flags)) 122 return 0; 123 124 *data = tr->data[cpu]; 125 disabled = atomic_inc_return(&(*data)->disabled); 126 127 if (likely(disabled == 1)) 128 return 1; 129 130 atomic_dec(&(*data)->disabled); 131 132 return 0; 133 } 134 135 /* 136 * irqsoff uses its own tracer function to keep the overhead down: 137 */ 138 static void 139 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) 140 { 141 struct trace_array *tr = irqsoff_trace; 142 struct trace_array_cpu *data; 143 unsigned long flags; 144 145 if (!func_prolog_dec(tr, &data, &flags)) 146 return; 147 148 trace_function(tr, ip, parent_ip, flags, preempt_count()); 149 150 atomic_dec(&data->disabled); 151 } 152 153 static struct ftrace_ops trace_ops __read_mostly = 154 { 155 .func = irqsoff_tracer_call, 156 }; 157 #endif /* CONFIG_FUNCTION_TRACER */ 158 159 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 160 static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) 161 { 162 int cpu; 163 164 if (!(bit & TRACE_DISPLAY_GRAPH)) 165 return -EINVAL; 166 167 if (!(is_graph() ^ set)) 168 return 0; 169 170 stop_irqsoff_tracer(irqsoff_trace, !set); 171 172 for_each_possible_cpu(cpu) 173 per_cpu(tracing_cpu, cpu) = 0; 174 175 tracing_max_latency = 0; 176 tracing_reset_online_cpus(irqsoff_trace); 177 178 return start_irqsoff_tracer(irqsoff_trace, set); 179 } 180 181 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) 182 { 183 struct trace_array *tr = irqsoff_trace; 184 struct trace_array_cpu *data; 185 unsigned long flags; 186 int ret; 187 int pc; 188 189 if (!func_prolog_dec(tr, &data, &flags)) 190 return 0; 191 192 pc = preempt_count(); 193 ret = __trace_graph_entry(tr, trace, flags, pc); 194 atomic_dec(&data->disabled); 195 196 return ret; 197 } 198 199 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) 200 { 201 struct trace_array *tr = irqsoff_trace; 202 struct trace_array_cpu *data; 203 unsigned long flags; 204 int pc; 205 206 if (!func_prolog_dec(tr, &data, &flags)) 207 return; 208 209 pc = preempt_count(); 210 __trace_graph_return(tr, trace, flags, pc); 211 atomic_dec(&data->disabled); 212 } 213 214 static void irqsoff_trace_open(struct trace_iterator *iter) 215 { 216 if (is_graph()) 217 graph_trace_open(iter); 218 219 } 220 221 static void irqsoff_trace_close(struct trace_iterator *iter) 222 { 223 if (iter->private) 224 graph_trace_close(iter); 225 } 226 227 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \ 228 TRACE_GRAPH_PRINT_PROC) 229 230 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) 231 { 232 /* 233 * In graph mode call the graph tracer output function, 234 * otherwise go with the TRACE_FN event handler 235 */ 236 if (is_graph()) 237 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); 238 239 return TRACE_TYPE_UNHANDLED; 240 } 241 242 static void irqsoff_print_header(struct seq_file *s) 243 { 244 if (is_graph()) 245 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); 246 else 247 trace_default_header(s); 248 } 249 250 static void 251 __trace_function(struct trace_array *tr, 252 unsigned long ip, unsigned long parent_ip, 253 unsigned long flags, int pc) 254 { 255 if (is_graph()) 256 trace_graph_function(tr, ip, parent_ip, flags, pc); 257 else 258 trace_function(tr, ip, parent_ip, flags, pc); 259 } 260 261 #else 262 #define __trace_function trace_function 263 264 static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) 265 { 266 return -EINVAL; 267 } 268 269 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace) 270 { 271 return -1; 272 } 273 274 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter) 275 { 276 return TRACE_TYPE_UNHANDLED; 277 } 278 279 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { } 280 static void irqsoff_print_header(struct seq_file *s) { } 281 static void irqsoff_trace_open(struct trace_iterator *iter) { } 282 static void irqsoff_trace_close(struct trace_iterator *iter) { } 283 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 284 285 /* 286 * Should this new latency be reported/recorded? 287 */ 288 static int report_latency(cycle_t delta) 289 { 290 if (tracing_thresh) { 291 if (delta < tracing_thresh) 292 return 0; 293 } else { 294 if (delta <= tracing_max_latency) 295 return 0; 296 } 297 return 1; 298 } 299 300 static void 301 check_critical_timing(struct trace_array *tr, 302 struct trace_array_cpu *data, 303 unsigned long parent_ip, 304 int cpu) 305 { 306 cycle_t T0, T1, delta; 307 unsigned long flags; 308 int pc; 309 310 T0 = data->preempt_timestamp; 311 T1 = ftrace_now(cpu); 312 delta = T1-T0; 313 314 local_save_flags(flags); 315 316 pc = preempt_count(); 317 318 if (!report_latency(delta)) 319 goto out; 320 321 spin_lock_irqsave(&max_trace_lock, flags); 322 323 /* check if we are still the max latency */ 324 if (!report_latency(delta)) 325 goto out_unlock; 326 327 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 328 /* Skip 5 functions to get to the irq/preempt enable function */ 329 __trace_stack(tr, flags, 5, pc); 330 331 if (data->critical_sequence != max_sequence) 332 goto out_unlock; 333 334 data->critical_end = parent_ip; 335 336 if (likely(!is_tracing_stopped())) { 337 tracing_max_latency = delta; 338 update_max_tr_single(tr, current, cpu); 339 } 340 341 max_sequence++; 342 343 out_unlock: 344 spin_unlock_irqrestore(&max_trace_lock, flags); 345 346 out: 347 data->critical_sequence = max_sequence; 348 data->preempt_timestamp = ftrace_now(cpu); 349 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); 350 } 351 352 static inline void 353 start_critical_timing(unsigned long ip, unsigned long parent_ip) 354 { 355 int cpu; 356 struct trace_array *tr = irqsoff_trace; 357 struct trace_array_cpu *data; 358 unsigned long flags; 359 360 if (likely(!tracer_enabled)) 361 return; 362 363 cpu = raw_smp_processor_id(); 364 365 if (per_cpu(tracing_cpu, cpu)) 366 return; 367 368 data = tr->data[cpu]; 369 370 if (unlikely(!data) || atomic_read(&data->disabled)) 371 return; 372 373 atomic_inc(&data->disabled); 374 375 data->critical_sequence = max_sequence; 376 data->preempt_timestamp = ftrace_now(cpu); 377 data->critical_start = parent_ip ? : ip; 378 379 local_save_flags(flags); 380 381 __trace_function(tr, ip, parent_ip, flags, preempt_count()); 382 383 per_cpu(tracing_cpu, cpu) = 1; 384 385 atomic_dec(&data->disabled); 386 } 387 388 static inline void 389 stop_critical_timing(unsigned long ip, unsigned long parent_ip) 390 { 391 int cpu; 392 struct trace_array *tr = irqsoff_trace; 393 struct trace_array_cpu *data; 394 unsigned long flags; 395 396 cpu = raw_smp_processor_id(); 397 /* Always clear the tracing cpu on stopping the trace */ 398 if (unlikely(per_cpu(tracing_cpu, cpu))) 399 per_cpu(tracing_cpu, cpu) = 0; 400 else 401 return; 402 403 if (!tracer_enabled) 404 return; 405 406 data = tr->data[cpu]; 407 408 if (unlikely(!data) || 409 !data->critical_start || atomic_read(&data->disabled)) 410 return; 411 412 atomic_inc(&data->disabled); 413 414 local_save_flags(flags); 415 __trace_function(tr, ip, parent_ip, flags, preempt_count()); 416 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 417 data->critical_start = 0; 418 atomic_dec(&data->disabled); 419 } 420 421 /* start and stop critical timings used to for stoppage (in idle) */ 422 void start_critical_timings(void) 423 { 424 if (preempt_trace() || irq_trace()) 425 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 426 } 427 EXPORT_SYMBOL_GPL(start_critical_timings); 428 429 void stop_critical_timings(void) 430 { 431 if (preempt_trace() || irq_trace()) 432 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 433 } 434 EXPORT_SYMBOL_GPL(stop_critical_timings); 435 436 #ifdef CONFIG_IRQSOFF_TRACER 437 #ifdef CONFIG_PROVE_LOCKING 438 void time_hardirqs_on(unsigned long a0, unsigned long a1) 439 { 440 if (!preempt_trace() && irq_trace()) 441 stop_critical_timing(a0, a1); 442 } 443 444 void time_hardirqs_off(unsigned long a0, unsigned long a1) 445 { 446 if (!preempt_trace() && irq_trace()) 447 start_critical_timing(a0, a1); 448 } 449 450 #else /* !CONFIG_PROVE_LOCKING */ 451 452 /* 453 * Stubs: 454 */ 455 456 void trace_softirqs_on(unsigned long ip) 457 { 458 } 459 460 void trace_softirqs_off(unsigned long ip) 461 { 462 } 463 464 inline void print_irqtrace_events(struct task_struct *curr) 465 { 466 } 467 468 /* 469 * We are only interested in hardirq on/off events: 470 */ 471 void trace_hardirqs_on(void) 472 { 473 if (!preempt_trace() && irq_trace()) 474 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 475 } 476 EXPORT_SYMBOL(trace_hardirqs_on); 477 478 void trace_hardirqs_off(void) 479 { 480 if (!preempt_trace() && irq_trace()) 481 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 482 } 483 EXPORT_SYMBOL(trace_hardirqs_off); 484 485 void trace_hardirqs_on_caller(unsigned long caller_addr) 486 { 487 if (!preempt_trace() && irq_trace()) 488 stop_critical_timing(CALLER_ADDR0, caller_addr); 489 } 490 EXPORT_SYMBOL(trace_hardirqs_on_caller); 491 492 void trace_hardirqs_off_caller(unsigned long caller_addr) 493 { 494 if (!preempt_trace() && irq_trace()) 495 start_critical_timing(CALLER_ADDR0, caller_addr); 496 } 497 EXPORT_SYMBOL(trace_hardirqs_off_caller); 498 499 #endif /* CONFIG_PROVE_LOCKING */ 500 #endif /* CONFIG_IRQSOFF_TRACER */ 501 502 #ifdef CONFIG_PREEMPT_TRACER 503 void trace_preempt_on(unsigned long a0, unsigned long a1) 504 { 505 if (preempt_trace()) 506 stop_critical_timing(a0, a1); 507 } 508 509 void trace_preempt_off(unsigned long a0, unsigned long a1) 510 { 511 if (preempt_trace()) 512 start_critical_timing(a0, a1); 513 } 514 #endif /* CONFIG_PREEMPT_TRACER */ 515 516 static int start_irqsoff_tracer(struct trace_array *tr, int graph) 517 { 518 int ret = 0; 519 520 if (!graph) 521 ret = register_ftrace_function(&trace_ops); 522 else 523 ret = register_ftrace_graph(&irqsoff_graph_return, 524 &irqsoff_graph_entry); 525 526 if (!ret && tracing_is_enabled()) 527 tracer_enabled = 1; 528 else 529 tracer_enabled = 0; 530 531 return ret; 532 } 533 534 static void stop_irqsoff_tracer(struct trace_array *tr, int graph) 535 { 536 tracer_enabled = 0; 537 538 if (!graph) 539 unregister_ftrace_function(&trace_ops); 540 else 541 unregister_ftrace_graph(); 542 } 543 544 static void __irqsoff_tracer_init(struct trace_array *tr) 545 { 546 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 547 trace_flags |= TRACE_ITER_LATENCY_FMT; 548 549 tracing_max_latency = 0; 550 irqsoff_trace = tr; 551 /* make sure that the tracer is visible */ 552 smp_wmb(); 553 tracing_reset_online_cpus(tr); 554 555 if (start_irqsoff_tracer(tr, is_graph())) 556 printk(KERN_ERR "failed to start irqsoff tracer\n"); 557 } 558 559 static void irqsoff_tracer_reset(struct trace_array *tr) 560 { 561 stop_irqsoff_tracer(tr, is_graph()); 562 563 if (!save_lat_flag) 564 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 565 } 566 567 static void irqsoff_tracer_start(struct trace_array *tr) 568 { 569 tracer_enabled = 1; 570 } 571 572 static void irqsoff_tracer_stop(struct trace_array *tr) 573 { 574 tracer_enabled = 0; 575 } 576 577 #ifdef CONFIG_IRQSOFF_TRACER 578 static int irqsoff_tracer_init(struct trace_array *tr) 579 { 580 trace_type = TRACER_IRQS_OFF; 581 582 __irqsoff_tracer_init(tr); 583 return 0; 584 } 585 static struct tracer irqsoff_tracer __read_mostly = 586 { 587 .name = "irqsoff", 588 .init = irqsoff_tracer_init, 589 .reset = irqsoff_tracer_reset, 590 .start = irqsoff_tracer_start, 591 .stop = irqsoff_tracer_stop, 592 .print_max = 1, 593 .print_header = irqsoff_print_header, 594 .print_line = irqsoff_print_line, 595 .flags = &tracer_flags, 596 .set_flag = irqsoff_set_flag, 597 #ifdef CONFIG_FTRACE_SELFTEST 598 .selftest = trace_selftest_startup_irqsoff, 599 #endif 600 .open = irqsoff_trace_open, 601 .close = irqsoff_trace_close, 602 .use_max_tr = 1, 603 }; 604 # define register_irqsoff(trace) register_tracer(&trace) 605 #else 606 # define register_irqsoff(trace) do { } while (0) 607 #endif 608 609 #ifdef CONFIG_PREEMPT_TRACER 610 static int preemptoff_tracer_init(struct trace_array *tr) 611 { 612 trace_type = TRACER_PREEMPT_OFF; 613 614 __irqsoff_tracer_init(tr); 615 return 0; 616 } 617 618 static struct tracer preemptoff_tracer __read_mostly = 619 { 620 .name = "preemptoff", 621 .init = preemptoff_tracer_init, 622 .reset = irqsoff_tracer_reset, 623 .start = irqsoff_tracer_start, 624 .stop = irqsoff_tracer_stop, 625 .print_max = 1, 626 .print_header = irqsoff_print_header, 627 .print_line = irqsoff_print_line, 628 .flags = &tracer_flags, 629 .set_flag = irqsoff_set_flag, 630 #ifdef CONFIG_FTRACE_SELFTEST 631 .selftest = trace_selftest_startup_preemptoff, 632 #endif 633 .open = irqsoff_trace_open, 634 .close = irqsoff_trace_close, 635 .use_max_tr = 1, 636 }; 637 # define register_preemptoff(trace) register_tracer(&trace) 638 #else 639 # define register_preemptoff(trace) do { } while (0) 640 #endif 641 642 #if defined(CONFIG_IRQSOFF_TRACER) && \ 643 defined(CONFIG_PREEMPT_TRACER) 644 645 static int preemptirqsoff_tracer_init(struct trace_array *tr) 646 { 647 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 648 649 __irqsoff_tracer_init(tr); 650 return 0; 651 } 652 653 static struct tracer preemptirqsoff_tracer __read_mostly = 654 { 655 .name = "preemptirqsoff", 656 .init = preemptirqsoff_tracer_init, 657 .reset = irqsoff_tracer_reset, 658 .start = irqsoff_tracer_start, 659 .stop = irqsoff_tracer_stop, 660 .print_max = 1, 661 .print_header = irqsoff_print_header, 662 .print_line = irqsoff_print_line, 663 .flags = &tracer_flags, 664 .set_flag = irqsoff_set_flag, 665 #ifdef CONFIG_FTRACE_SELFTEST 666 .selftest = trace_selftest_startup_preemptirqsoff, 667 #endif 668 .open = irqsoff_trace_open, 669 .close = irqsoff_trace_close, 670 .use_max_tr = 1, 671 }; 672 673 # define register_preemptirqsoff(trace) register_tracer(&trace) 674 #else 675 # define register_preemptirqsoff(trace) do { } while (0) 676 #endif 677 678 __init static int init_irqsoff_tracer(void) 679 { 680 register_irqsoff(irqsoff_tracer); 681 register_preemptoff(preemptoff_tracer); 682 register_preemptirqsoff(preemptirqsoff_tracer); 683 684 return 0; 685 } 686 device_initcall(init_irqsoff_tracer); 687