1 /* 2 * trace irqs off criticall timings 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * From code in the latency_tracer, that is: 8 * 9 * Copyright (C) 2004-2006 Ingo Molnar 10 * Copyright (C) 2004 William Lee Irwin III 11 */ 12 #include <linux/kallsyms.h> 13 #include <linux/debugfs.h> 14 #include <linux/uaccess.h> 15 #include <linux/module.h> 16 #include <linux/ftrace.h> 17 #include <linux/fs.h> 18 19 #include "trace.h" 20 21 static struct trace_array *irqsoff_trace __read_mostly; 22 static int tracer_enabled __read_mostly; 23 24 static DEFINE_PER_CPU(int, tracing_cpu); 25 26 static DEFINE_SPINLOCK(max_trace_lock); 27 28 enum { 29 TRACER_IRQS_OFF = (1 << 1), 30 TRACER_PREEMPT_OFF = (1 << 2), 31 }; 32 33 static int trace_type __read_mostly; 34 35 #ifdef CONFIG_PREEMPT_TRACER 36 static inline int 37 preempt_trace(void) 38 { 39 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); 40 } 41 #else 42 # define preempt_trace() (0) 43 #endif 44 45 #ifdef CONFIG_IRQSOFF_TRACER 46 static inline int 47 irq_trace(void) 48 { 49 return ((trace_type & TRACER_IRQS_OFF) && 50 irqs_disabled()); 51 } 52 #else 53 # define irq_trace() (0) 54 #endif 55 56 /* 57 * Sequence count - we record it when starting a measurement and 58 * skip the latency if the sequence has changed - some other section 59 * did a maximum and could disturb our measurement with serial console 60 * printouts, etc. Truly coinciding maximum latencies should be rare 61 * and what happens together happens separately as well, so this doesnt 62 * decrease the validity of the maximum found: 63 */ 64 static __cacheline_aligned_in_smp unsigned long max_sequence; 65 66 #ifdef CONFIG_FUNCTION_TRACER 67 /* 68 * irqsoff uses its own tracer function to keep the overhead down: 69 */ 70 static void 71 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) 72 { 73 struct trace_array *tr = irqsoff_trace; 74 struct trace_array_cpu *data; 75 unsigned long flags; 76 long disabled; 77 int cpu; 78 79 /* 80 * Does not matter if we preempt. We test the flags 81 * afterward, to see if irqs are disabled or not. 82 * If we preempt and get a false positive, the flags 83 * test will fail. 84 */ 85 cpu = raw_smp_processor_id(); 86 if (likely(!per_cpu(tracing_cpu, cpu))) 87 return; 88 89 local_save_flags(flags); 90 /* slight chance to get a false positive on tracing_cpu */ 91 if (!irqs_disabled_flags(flags)) 92 return; 93 94 data = tr->data[cpu]; 95 disabled = atomic_inc_return(&data->disabled); 96 97 if (likely(disabled == 1)) 98 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 99 100 atomic_dec(&data->disabled); 101 } 102 103 static struct ftrace_ops trace_ops __read_mostly = 104 { 105 .func = irqsoff_tracer_call, 106 }; 107 #endif /* CONFIG_FUNCTION_TRACER */ 108 109 /* 110 * Should this new latency be reported/recorded? 111 */ 112 static int report_latency(cycle_t delta) 113 { 114 if (tracing_thresh) { 115 if (delta < tracing_thresh) 116 return 0; 117 } else { 118 if (delta <= tracing_max_latency) 119 return 0; 120 } 121 return 1; 122 } 123 124 static void 125 check_critical_timing(struct trace_array *tr, 126 struct trace_array_cpu *data, 127 unsigned long parent_ip, 128 int cpu) 129 { 130 unsigned long latency, t0, t1; 131 cycle_t T0, T1, delta; 132 unsigned long flags; 133 int pc; 134 135 /* 136 * usecs conversion is slow so we try to delay the conversion 137 * as long as possible: 138 */ 139 T0 = data->preempt_timestamp; 140 T1 = ftrace_now(cpu); 141 delta = T1-T0; 142 143 local_save_flags(flags); 144 145 pc = preempt_count(); 146 147 if (!report_latency(delta)) 148 goto out; 149 150 spin_lock_irqsave(&max_trace_lock, flags); 151 152 /* check if we are still the max latency */ 153 if (!report_latency(delta)) 154 goto out_unlock; 155 156 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); 157 158 latency = nsecs_to_usecs(delta); 159 160 if (data->critical_sequence != max_sequence) 161 goto out_unlock; 162 163 tracing_max_latency = delta; 164 t0 = nsecs_to_usecs(T0); 165 t1 = nsecs_to_usecs(T1); 166 167 data->critical_end = parent_ip; 168 169 update_max_tr_single(tr, current, cpu); 170 171 max_sequence++; 172 173 out_unlock: 174 spin_unlock_irqrestore(&max_trace_lock, flags); 175 176 out: 177 data->critical_sequence = max_sequence; 178 data->preempt_timestamp = ftrace_now(cpu); 179 tracing_reset(tr, cpu); 180 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); 181 } 182 183 static inline void 184 start_critical_timing(unsigned long ip, unsigned long parent_ip) 185 { 186 int cpu; 187 struct trace_array *tr = irqsoff_trace; 188 struct trace_array_cpu *data; 189 unsigned long flags; 190 191 if (likely(!tracer_enabled)) 192 return; 193 194 cpu = raw_smp_processor_id(); 195 196 if (per_cpu(tracing_cpu, cpu)) 197 return; 198 199 data = tr->data[cpu]; 200 201 if (unlikely(!data) || atomic_read(&data->disabled)) 202 return; 203 204 atomic_inc(&data->disabled); 205 206 data->critical_sequence = max_sequence; 207 data->preempt_timestamp = ftrace_now(cpu); 208 data->critical_start = parent_ip ? : ip; 209 tracing_reset(tr, cpu); 210 211 local_save_flags(flags); 212 213 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 214 215 per_cpu(tracing_cpu, cpu) = 1; 216 217 atomic_dec(&data->disabled); 218 } 219 220 static inline void 221 stop_critical_timing(unsigned long ip, unsigned long parent_ip) 222 { 223 int cpu; 224 struct trace_array *tr = irqsoff_trace; 225 struct trace_array_cpu *data; 226 unsigned long flags; 227 228 cpu = raw_smp_processor_id(); 229 /* Always clear the tracing cpu on stopping the trace */ 230 if (unlikely(per_cpu(tracing_cpu, cpu))) 231 per_cpu(tracing_cpu, cpu) = 0; 232 else 233 return; 234 235 if (!tracer_enabled) 236 return; 237 238 data = tr->data[cpu]; 239 240 if (unlikely(!data) || 241 !data->critical_start || atomic_read(&data->disabled)) 242 return; 243 244 atomic_inc(&data->disabled); 245 246 local_save_flags(flags); 247 trace_function(tr, data, ip, parent_ip, flags, preempt_count()); 248 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 249 data->critical_start = 0; 250 atomic_dec(&data->disabled); 251 } 252 253 /* start and stop critical timings used to for stoppage (in idle) */ 254 void start_critical_timings(void) 255 { 256 if (preempt_trace() || irq_trace()) 257 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 258 } 259 EXPORT_SYMBOL_GPL(start_critical_timings); 260 261 void stop_critical_timings(void) 262 { 263 if (preempt_trace() || irq_trace()) 264 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 265 } 266 EXPORT_SYMBOL_GPL(stop_critical_timings); 267 268 #ifdef CONFIG_IRQSOFF_TRACER 269 #ifdef CONFIG_PROVE_LOCKING 270 void time_hardirqs_on(unsigned long a0, unsigned long a1) 271 { 272 if (!preempt_trace() && irq_trace()) 273 stop_critical_timing(a0, a1); 274 } 275 276 void time_hardirqs_off(unsigned long a0, unsigned long a1) 277 { 278 if (!preempt_trace() && irq_trace()) 279 start_critical_timing(a0, a1); 280 } 281 282 #else /* !CONFIG_PROVE_LOCKING */ 283 284 /* 285 * Stubs: 286 */ 287 288 void early_boot_irqs_off(void) 289 { 290 } 291 292 void early_boot_irqs_on(void) 293 { 294 } 295 296 void trace_softirqs_on(unsigned long ip) 297 { 298 } 299 300 void trace_softirqs_off(unsigned long ip) 301 { 302 } 303 304 inline void print_irqtrace_events(struct task_struct *curr) 305 { 306 } 307 308 /* 309 * We are only interested in hardirq on/off events: 310 */ 311 void trace_hardirqs_on(void) 312 { 313 if (!preempt_trace() && irq_trace()) 314 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 315 } 316 EXPORT_SYMBOL(trace_hardirqs_on); 317 318 void trace_hardirqs_off(void) 319 { 320 if (!preempt_trace() && irq_trace()) 321 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); 322 } 323 EXPORT_SYMBOL(trace_hardirqs_off); 324 325 void trace_hardirqs_on_caller(unsigned long caller_addr) 326 { 327 if (!preempt_trace() && irq_trace()) 328 stop_critical_timing(CALLER_ADDR0, caller_addr); 329 } 330 EXPORT_SYMBOL(trace_hardirqs_on_caller); 331 332 void trace_hardirqs_off_caller(unsigned long caller_addr) 333 { 334 if (!preempt_trace() && irq_trace()) 335 start_critical_timing(CALLER_ADDR0, caller_addr); 336 } 337 EXPORT_SYMBOL(trace_hardirqs_off_caller); 338 339 #endif /* CONFIG_PROVE_LOCKING */ 340 #endif /* CONFIG_IRQSOFF_TRACER */ 341 342 #ifdef CONFIG_PREEMPT_TRACER 343 void trace_preempt_on(unsigned long a0, unsigned long a1) 344 { 345 if (preempt_trace()) 346 stop_critical_timing(a0, a1); 347 } 348 349 void trace_preempt_off(unsigned long a0, unsigned long a1) 350 { 351 if (preempt_trace()) 352 start_critical_timing(a0, a1); 353 } 354 #endif /* CONFIG_PREEMPT_TRACER */ 355 356 /* 357 * save_tracer_enabled is used to save the state of the tracer_enabled 358 * variable when we disable it when we open a trace output file. 359 */ 360 static int save_tracer_enabled; 361 362 static void start_irqsoff_tracer(struct trace_array *tr) 363 { 364 register_ftrace_function(&trace_ops); 365 if (tracing_is_enabled()) { 366 tracer_enabled = 1; 367 save_tracer_enabled = 1; 368 } else { 369 tracer_enabled = 0; 370 save_tracer_enabled = 0; 371 } 372 } 373 374 static void stop_irqsoff_tracer(struct trace_array *tr) 375 { 376 tracer_enabled = 0; 377 save_tracer_enabled = 0; 378 unregister_ftrace_function(&trace_ops); 379 } 380 381 static void __irqsoff_tracer_init(struct trace_array *tr) 382 { 383 tracing_max_latency = 0; 384 irqsoff_trace = tr; 385 /* make sure that the tracer is visible */ 386 smp_wmb(); 387 start_irqsoff_tracer(tr); 388 } 389 390 static void irqsoff_tracer_reset(struct trace_array *tr) 391 { 392 stop_irqsoff_tracer(tr); 393 } 394 395 static void irqsoff_tracer_start(struct trace_array *tr) 396 { 397 tracer_enabled = 1; 398 save_tracer_enabled = 1; 399 } 400 401 static void irqsoff_tracer_stop(struct trace_array *tr) 402 { 403 tracer_enabled = 0; 404 save_tracer_enabled = 0; 405 } 406 407 static void irqsoff_tracer_open(struct trace_iterator *iter) 408 { 409 /* stop the trace while dumping */ 410 tracer_enabled = 0; 411 } 412 413 static void irqsoff_tracer_close(struct trace_iterator *iter) 414 { 415 /* restart tracing */ 416 tracer_enabled = save_tracer_enabled; 417 } 418 419 #ifdef CONFIG_IRQSOFF_TRACER 420 static int irqsoff_tracer_init(struct trace_array *tr) 421 { 422 trace_type = TRACER_IRQS_OFF; 423 424 __irqsoff_tracer_init(tr); 425 return 0; 426 } 427 static struct tracer irqsoff_tracer __read_mostly = 428 { 429 .name = "irqsoff", 430 .init = irqsoff_tracer_init, 431 .reset = irqsoff_tracer_reset, 432 .start = irqsoff_tracer_start, 433 .stop = irqsoff_tracer_stop, 434 .open = irqsoff_tracer_open, 435 .close = irqsoff_tracer_close, 436 .print_max = 1, 437 #ifdef CONFIG_FTRACE_SELFTEST 438 .selftest = trace_selftest_startup_irqsoff, 439 #endif 440 }; 441 # define register_irqsoff(trace) register_tracer(&trace) 442 #else 443 # define register_irqsoff(trace) do { } while (0) 444 #endif 445 446 #ifdef CONFIG_PREEMPT_TRACER 447 static int preemptoff_tracer_init(struct trace_array *tr) 448 { 449 trace_type = TRACER_PREEMPT_OFF; 450 451 __irqsoff_tracer_init(tr); 452 return 0; 453 } 454 455 static struct tracer preemptoff_tracer __read_mostly = 456 { 457 .name = "preemptoff", 458 .init = preemptoff_tracer_init, 459 .reset = irqsoff_tracer_reset, 460 .start = irqsoff_tracer_start, 461 .stop = irqsoff_tracer_stop, 462 .open = irqsoff_tracer_open, 463 .close = irqsoff_tracer_close, 464 .print_max = 1, 465 #ifdef CONFIG_FTRACE_SELFTEST 466 .selftest = trace_selftest_startup_preemptoff, 467 #endif 468 }; 469 # define register_preemptoff(trace) register_tracer(&trace) 470 #else 471 # define register_preemptoff(trace) do { } while (0) 472 #endif 473 474 #if defined(CONFIG_IRQSOFF_TRACER) && \ 475 defined(CONFIG_PREEMPT_TRACER) 476 477 static int preemptirqsoff_tracer_init(struct trace_array *tr) 478 { 479 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; 480 481 __irqsoff_tracer_init(tr); 482 return 0; 483 } 484 485 static struct tracer preemptirqsoff_tracer __read_mostly = 486 { 487 .name = "preemptirqsoff", 488 .init = preemptirqsoff_tracer_init, 489 .reset = irqsoff_tracer_reset, 490 .start = irqsoff_tracer_start, 491 .stop = irqsoff_tracer_stop, 492 .open = irqsoff_tracer_open, 493 .close = irqsoff_tracer_close, 494 .print_max = 1, 495 #ifdef CONFIG_FTRACE_SELFTEST 496 .selftest = trace_selftest_startup_preemptirqsoff, 497 #endif 498 }; 499 500 # define register_preemptirqsoff(trace) register_tracer(&trace) 501 #else 502 # define register_preemptirqsoff(trace) do { } while (0) 503 #endif 504 505 __init static int init_irqsoff_tracer(void) 506 { 507 register_irqsoff(irqsoff_tracer); 508 register_preemptoff(preemptoff_tracer); 509 register_preemptirqsoff(preemptirqsoff_tracer); 510 511 return 0; 512 } 513 device_initcall(init_irqsoff_tracer); 514