1 /* 2 * linux/kernel/profile.c 3 * Simple profiling. Manages a direct-mapped profile hit count buffer, 4 * with configurable resolution, support for restricting the cpus on 5 * which profiling is done, and switching between cpu time and 6 * schedule() calls via kernel command line parameters passed at boot. 7 * 8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, 9 * Red Hat, July 2004 10 * Consolidation of architecture support code for profiling, 11 * Nadia Yvette Chambers, Oracle, July 2004 12 * Amortized hit count accounting via per-cpu open-addressed hashtables 13 * to resolve timer interrupt livelocks, Nadia Yvette Chambers, 14 * Oracle, 2004 15 */ 16 17 #include <linux/export.h> 18 #include <linux/profile.h> 19 #include <linux/bootmem.h> 20 #include <linux/notifier.h> 21 #include <linux/mm.h> 22 #include <linux/cpumask.h> 23 #include <linux/cpu.h> 24 #include <linux/highmem.h> 25 #include <linux/mutex.h> 26 #include <linux/slab.h> 27 #include <linux/vmalloc.h> 28 #include <asm/sections.h> 29 #include <asm/irq_regs.h> 30 #include <asm/ptrace.h> 31 32 struct profile_hit { 33 u32 pc, hits; 34 }; 35 #define PROFILE_GRPSHIFT 3 36 #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT) 37 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit)) 38 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ) 39 40 /* Oprofile timer tick hook */ 41 static int (*timer_hook)(struct pt_regs *) __read_mostly; 42 43 static atomic_t *prof_buffer; 44 static unsigned long prof_len, prof_shift; 45 46 int prof_on __read_mostly; 47 EXPORT_SYMBOL_GPL(prof_on); 48 49 static cpumask_var_t prof_cpu_mask; 50 #ifdef CONFIG_SMP 51 static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits); 52 static DEFINE_PER_CPU(int, cpu_profile_flip); 53 static DEFINE_MUTEX(profile_flip_mutex); 54 #endif /* CONFIG_SMP */ 55 56 int profile_setup(char *str) 57 { 58 static char schedstr[] = "schedule"; 59 static char sleepstr[] = "sleep"; 60 static char kvmstr[] = "kvm"; 61 int par; 62 63 if (!strncmp(str, sleepstr, strlen(sleepstr))) { 64 #ifdef CONFIG_SCHEDSTATS 65 prof_on = SLEEP_PROFILING; 66 if (str[strlen(sleepstr)] == ',') 67 str += strlen(sleepstr) + 1; 68 if (get_option(&str, &par)) 69 prof_shift = par; 70 printk(KERN_INFO 71 "kernel sleep profiling enabled (shift: %ld)\n", 72 prof_shift); 73 #else 74 printk(KERN_WARNING 75 "kernel sleep profiling requires CONFIG_SCHEDSTATS\n"); 76 #endif /* CONFIG_SCHEDSTATS */ 77 } else if (!strncmp(str, schedstr, strlen(schedstr))) { 78 prof_on = SCHED_PROFILING; 79 if (str[strlen(schedstr)] == ',') 80 str += strlen(schedstr) + 1; 81 if (get_option(&str, &par)) 82 prof_shift = par; 83 printk(KERN_INFO 84 "kernel schedule profiling enabled (shift: %ld)\n", 85 prof_shift); 86 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) { 87 prof_on = KVM_PROFILING; 88 if (str[strlen(kvmstr)] == ',') 89 str += strlen(kvmstr) + 1; 90 if (get_option(&str, &par)) 91 prof_shift = par; 92 printk(KERN_INFO 93 "kernel KVM profiling enabled (shift: %ld)\n", 94 prof_shift); 95 } else if (get_option(&str, &par)) { 96 prof_shift = par; 97 prof_on = CPU_PROFILING; 98 printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n", 99 prof_shift); 100 } 101 return 1; 102 } 103 __setup("profile=", profile_setup); 104 105 106 int __ref profile_init(void) 107 { 108 int buffer_bytes; 109 if (!prof_on) 110 return 0; 111 112 /* only text is profiled */ 113 prof_len = (_etext - _stext) >> prof_shift; 114 buffer_bytes = prof_len*sizeof(atomic_t); 115 116 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL)) 117 return -ENOMEM; 118 119 cpumask_copy(prof_cpu_mask, cpu_possible_mask); 120 121 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN); 122 if (prof_buffer) 123 return 0; 124 125 prof_buffer = alloc_pages_exact(buffer_bytes, 126 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); 127 if (prof_buffer) 128 return 0; 129 130 prof_buffer = vzalloc(buffer_bytes); 131 if (prof_buffer) 132 return 0; 133 134 free_cpumask_var(prof_cpu_mask); 135 return -ENOMEM; 136 } 137 138 /* Profile event notifications */ 139 140 static BLOCKING_NOTIFIER_HEAD(task_exit_notifier); 141 static ATOMIC_NOTIFIER_HEAD(task_free_notifier); 142 static BLOCKING_NOTIFIER_HEAD(munmap_notifier); 143 144 void profile_task_exit(struct task_struct *task) 145 { 146 blocking_notifier_call_chain(&task_exit_notifier, 0, task); 147 } 148 149 int profile_handoff_task(struct task_struct *task) 150 { 151 int ret; 152 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task); 153 return (ret == NOTIFY_OK) ? 1 : 0; 154 } 155 156 void profile_munmap(unsigned long addr) 157 { 158 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr); 159 } 160 161 int task_handoff_register(struct notifier_block *n) 162 { 163 return atomic_notifier_chain_register(&task_free_notifier, n); 164 } 165 EXPORT_SYMBOL_GPL(task_handoff_register); 166 167 int task_handoff_unregister(struct notifier_block *n) 168 { 169 return atomic_notifier_chain_unregister(&task_free_notifier, n); 170 } 171 EXPORT_SYMBOL_GPL(task_handoff_unregister); 172 173 int profile_event_register(enum profile_type type, struct notifier_block *n) 174 { 175 int err = -EINVAL; 176 177 switch (type) { 178 case PROFILE_TASK_EXIT: 179 err = blocking_notifier_chain_register( 180 &task_exit_notifier, n); 181 break; 182 case PROFILE_MUNMAP: 183 err = blocking_notifier_chain_register( 184 &munmap_notifier, n); 185 break; 186 } 187 188 return err; 189 } 190 EXPORT_SYMBOL_GPL(profile_event_register); 191 192 int profile_event_unregister(enum profile_type type, struct notifier_block *n) 193 { 194 int err = -EINVAL; 195 196 switch (type) { 197 case PROFILE_TASK_EXIT: 198 err = blocking_notifier_chain_unregister( 199 &task_exit_notifier, n); 200 break; 201 case PROFILE_MUNMAP: 202 err = blocking_notifier_chain_unregister( 203 &munmap_notifier, n); 204 break; 205 } 206 207 return err; 208 } 209 EXPORT_SYMBOL_GPL(profile_event_unregister); 210 211 int register_timer_hook(int (*hook)(struct pt_regs *)) 212 { 213 if (timer_hook) 214 return -EBUSY; 215 timer_hook = hook; 216 return 0; 217 } 218 EXPORT_SYMBOL_GPL(register_timer_hook); 219 220 void unregister_timer_hook(int (*hook)(struct pt_regs *)) 221 { 222 WARN_ON(hook != timer_hook); 223 timer_hook = NULL; 224 /* make sure all CPUs see the NULL hook */ 225 synchronize_sched(); /* Allow ongoing interrupts to complete. */ 226 } 227 EXPORT_SYMBOL_GPL(unregister_timer_hook); 228 229 230 #ifdef CONFIG_SMP 231 /* 232 * Each cpu has a pair of open-addressed hashtables for pending 233 * profile hits. read_profile() IPI's all cpus to request them 234 * to flip buffers and flushes their contents to prof_buffer itself. 235 * Flip requests are serialized by the profile_flip_mutex. The sole 236 * use of having a second hashtable is for avoiding cacheline 237 * contention that would otherwise happen during flushes of pending 238 * profile hits required for the accuracy of reported profile hits 239 * and so resurrect the interrupt livelock issue. 240 * 241 * The open-addressed hashtables are indexed by profile buffer slot 242 * and hold the number of pending hits to that profile buffer slot on 243 * a cpu in an entry. When the hashtable overflows, all pending hits 244 * are accounted to their corresponding profile buffer slots with 245 * atomic_add() and the hashtable emptied. As numerous pending hits 246 * may be accounted to a profile buffer slot in a hashtable entry, 247 * this amortizes a number of atomic profile buffer increments likely 248 * to be far larger than the number of entries in the hashtable, 249 * particularly given that the number of distinct profile buffer 250 * positions to which hits are accounted during short intervals (e.g. 251 * several seconds) is usually very small. Exclusion from buffer 252 * flipping is provided by interrupt disablement (note that for 253 * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from 254 * process context). 255 * The hash function is meant to be lightweight as opposed to strong, 256 * and was vaguely inspired by ppc64 firmware-supported inverted 257 * pagetable hash functions, but uses a full hashtable full of finite 258 * collision chains, not just pairs of them. 259 * 260 * -- nyc 261 */ 262 static void __profile_flip_buffers(void *unused) 263 { 264 int cpu = smp_processor_id(); 265 266 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); 267 } 268 269 static void profile_flip_buffers(void) 270 { 271 int i, j, cpu; 272 273 mutex_lock(&profile_flip_mutex); 274 j = per_cpu(cpu_profile_flip, get_cpu()); 275 put_cpu(); 276 on_each_cpu(__profile_flip_buffers, NULL, 1); 277 for_each_online_cpu(cpu) { 278 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; 279 for (i = 0; i < NR_PROFILE_HIT; ++i) { 280 if (!hits[i].hits) { 281 if (hits[i].pc) 282 hits[i].pc = 0; 283 continue; 284 } 285 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); 286 hits[i].hits = hits[i].pc = 0; 287 } 288 } 289 mutex_unlock(&profile_flip_mutex); 290 } 291 292 static void profile_discard_flip_buffers(void) 293 { 294 int i, cpu; 295 296 mutex_lock(&profile_flip_mutex); 297 i = per_cpu(cpu_profile_flip, get_cpu()); 298 put_cpu(); 299 on_each_cpu(__profile_flip_buffers, NULL, 1); 300 for_each_online_cpu(cpu) { 301 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; 302 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit)); 303 } 304 mutex_unlock(&profile_flip_mutex); 305 } 306 307 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) 308 { 309 unsigned long primary, secondary, flags, pc = (unsigned long)__pc; 310 int i, j, cpu; 311 struct profile_hit *hits; 312 313 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); 314 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 315 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; 316 cpu = get_cpu(); 317 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; 318 if (!hits) { 319 put_cpu(); 320 return; 321 } 322 /* 323 * We buffer the global profiler buffer into a per-CPU 324 * queue and thus reduce the number of global (and possibly 325 * NUMA-alien) accesses. The write-queue is self-coalescing: 326 */ 327 local_irq_save(flags); 328 do { 329 for (j = 0; j < PROFILE_GRPSZ; ++j) { 330 if (hits[i + j].pc == pc) { 331 hits[i + j].hits += nr_hits; 332 goto out; 333 } else if (!hits[i + j].hits) { 334 hits[i + j].pc = pc; 335 hits[i + j].hits = nr_hits; 336 goto out; 337 } 338 } 339 i = (i + secondary) & (NR_PROFILE_HIT - 1); 340 } while (i != primary); 341 342 /* 343 * Add the current hit(s) and flush the write-queue out 344 * to the global buffer: 345 */ 346 atomic_add(nr_hits, &prof_buffer[pc]); 347 for (i = 0; i < NR_PROFILE_HIT; ++i) { 348 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]); 349 hits[i].pc = hits[i].hits = 0; 350 } 351 out: 352 local_irq_restore(flags); 353 put_cpu(); 354 } 355 356 static int __cpuinit profile_cpu_callback(struct notifier_block *info, 357 unsigned long action, void *__cpu) 358 { 359 int node, cpu = (unsigned long)__cpu; 360 struct page *page; 361 362 switch (action) { 363 case CPU_UP_PREPARE: 364 case CPU_UP_PREPARE_FROZEN: 365 node = cpu_to_mem(cpu); 366 per_cpu(cpu_profile_flip, cpu) = 0; 367 if (!per_cpu(cpu_profile_hits, cpu)[1]) { 368 page = alloc_pages_exact_node(node, 369 GFP_KERNEL | __GFP_ZERO, 370 0); 371 if (!page) 372 return notifier_from_errno(-ENOMEM); 373 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); 374 } 375 if (!per_cpu(cpu_profile_hits, cpu)[0]) { 376 page = alloc_pages_exact_node(node, 377 GFP_KERNEL | __GFP_ZERO, 378 0); 379 if (!page) 380 goto out_free; 381 per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); 382 } 383 break; 384 out_free: 385 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); 386 per_cpu(cpu_profile_hits, cpu)[1] = NULL; 387 __free_page(page); 388 return notifier_from_errno(-ENOMEM); 389 case CPU_ONLINE: 390 case CPU_ONLINE_FROZEN: 391 if (prof_cpu_mask != NULL) 392 cpumask_set_cpu(cpu, prof_cpu_mask); 393 break; 394 case CPU_UP_CANCELED: 395 case CPU_UP_CANCELED_FROZEN: 396 case CPU_DEAD: 397 case CPU_DEAD_FROZEN: 398 if (prof_cpu_mask != NULL) 399 cpumask_clear_cpu(cpu, prof_cpu_mask); 400 if (per_cpu(cpu_profile_hits, cpu)[0]) { 401 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); 402 per_cpu(cpu_profile_hits, cpu)[0] = NULL; 403 __free_page(page); 404 } 405 if (per_cpu(cpu_profile_hits, cpu)[1]) { 406 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); 407 per_cpu(cpu_profile_hits, cpu)[1] = NULL; 408 __free_page(page); 409 } 410 break; 411 } 412 return NOTIFY_OK; 413 } 414 #else /* !CONFIG_SMP */ 415 #define profile_flip_buffers() do { } while (0) 416 #define profile_discard_flip_buffers() do { } while (0) 417 #define profile_cpu_callback NULL 418 419 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) 420 { 421 unsigned long pc; 422 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; 423 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); 424 } 425 #endif /* !CONFIG_SMP */ 426 427 void profile_hits(int type, void *__pc, unsigned int nr_hits) 428 { 429 if (prof_on != type || !prof_buffer) 430 return; 431 do_profile_hits(type, __pc, nr_hits); 432 } 433 EXPORT_SYMBOL_GPL(profile_hits); 434 435 void profile_tick(int type) 436 { 437 struct pt_regs *regs = get_irq_regs(); 438 439 if (type == CPU_PROFILING && timer_hook) 440 timer_hook(regs); 441 if (!user_mode(regs) && prof_cpu_mask != NULL && 442 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask)) 443 profile_hit(type, (void *)profile_pc(regs)); 444 } 445 446 #ifdef CONFIG_PROC_FS 447 #include <linux/proc_fs.h> 448 #include <linux/seq_file.h> 449 #include <asm/uaccess.h> 450 451 static int prof_cpu_mask_proc_show(struct seq_file *m, void *v) 452 { 453 seq_cpumask(m, prof_cpu_mask); 454 seq_putc(m, '\n'); 455 return 0; 456 } 457 458 static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file) 459 { 460 return single_open(file, prof_cpu_mask_proc_show, NULL); 461 } 462 463 static ssize_t prof_cpu_mask_proc_write(struct file *file, 464 const char __user *buffer, size_t count, loff_t *pos) 465 { 466 cpumask_var_t new_value; 467 int err; 468 469 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 470 return -ENOMEM; 471 472 err = cpumask_parse_user(buffer, count, new_value); 473 if (!err) { 474 cpumask_copy(prof_cpu_mask, new_value); 475 err = count; 476 } 477 free_cpumask_var(new_value); 478 return err; 479 } 480 481 static const struct file_operations prof_cpu_mask_proc_fops = { 482 .open = prof_cpu_mask_proc_open, 483 .read = seq_read, 484 .llseek = seq_lseek, 485 .release = single_release, 486 .write = prof_cpu_mask_proc_write, 487 }; 488 489 void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir) 490 { 491 /* create /proc/irq/prof_cpu_mask */ 492 proc_create("prof_cpu_mask", 0600, root_irq_dir, &prof_cpu_mask_proc_fops); 493 } 494 495 /* 496 * This function accesses profiling information. The returned data is 497 * binary: the sampling step and the actual contents of the profile 498 * buffer. Use of the program readprofile is recommended in order to 499 * get meaningful info out of these data. 500 */ 501 static ssize_t 502 read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) 503 { 504 unsigned long p = *ppos; 505 ssize_t read; 506 char *pnt; 507 unsigned int sample_step = 1 << prof_shift; 508 509 profile_flip_buffers(); 510 if (p >= (prof_len+1)*sizeof(unsigned int)) 511 return 0; 512 if (count > (prof_len+1)*sizeof(unsigned int) - p) 513 count = (prof_len+1)*sizeof(unsigned int) - p; 514 read = 0; 515 516 while (p < sizeof(unsigned int) && count > 0) { 517 if (put_user(*((char *)(&sample_step)+p), buf)) 518 return -EFAULT; 519 buf++; p++; count--; read++; 520 } 521 pnt = (char *)prof_buffer + p - sizeof(atomic_t); 522 if (copy_to_user(buf, (void *)pnt, count)) 523 return -EFAULT; 524 read += count; 525 *ppos += read; 526 return read; 527 } 528 529 /* 530 * Writing to /proc/profile resets the counters 531 * 532 * Writing a 'profiling multiplier' value into it also re-sets the profiling 533 * interrupt frequency, on architectures that support this. 534 */ 535 static ssize_t write_profile(struct file *file, const char __user *buf, 536 size_t count, loff_t *ppos) 537 { 538 #ifdef CONFIG_SMP 539 extern int setup_profiling_timer(unsigned int multiplier); 540 541 if (count == sizeof(int)) { 542 unsigned int multiplier; 543 544 if (copy_from_user(&multiplier, buf, sizeof(int))) 545 return -EFAULT; 546 547 if (setup_profiling_timer(multiplier)) 548 return -EINVAL; 549 } 550 #endif 551 profile_discard_flip_buffers(); 552 memset(prof_buffer, 0, prof_len * sizeof(atomic_t)); 553 return count; 554 } 555 556 static const struct file_operations proc_profile_operations = { 557 .read = read_profile, 558 .write = write_profile, 559 .llseek = default_llseek, 560 }; 561 562 #ifdef CONFIG_SMP 563 static void profile_nop(void *unused) 564 { 565 } 566 567 static int create_hash_tables(void) 568 { 569 int cpu; 570 571 for_each_online_cpu(cpu) { 572 int node = cpu_to_mem(cpu); 573 struct page *page; 574 575 page = alloc_pages_exact_node(node, 576 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 577 0); 578 if (!page) 579 goto out_cleanup; 580 per_cpu(cpu_profile_hits, cpu)[1] 581 = (struct profile_hit *)page_address(page); 582 page = alloc_pages_exact_node(node, 583 GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 584 0); 585 if (!page) 586 goto out_cleanup; 587 per_cpu(cpu_profile_hits, cpu)[0] 588 = (struct profile_hit *)page_address(page); 589 } 590 return 0; 591 out_cleanup: 592 prof_on = 0; 593 smp_mb(); 594 on_each_cpu(profile_nop, NULL, 1); 595 for_each_online_cpu(cpu) { 596 struct page *page; 597 598 if (per_cpu(cpu_profile_hits, cpu)[0]) { 599 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); 600 per_cpu(cpu_profile_hits, cpu)[0] = NULL; 601 __free_page(page); 602 } 603 if (per_cpu(cpu_profile_hits, cpu)[1]) { 604 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); 605 per_cpu(cpu_profile_hits, cpu)[1] = NULL; 606 __free_page(page); 607 } 608 } 609 return -1; 610 } 611 #else 612 #define create_hash_tables() ({ 0; }) 613 #endif 614 615 int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ 616 { 617 struct proc_dir_entry *entry; 618 619 if (!prof_on) 620 return 0; 621 if (create_hash_tables()) 622 return -ENOMEM; 623 entry = proc_create("profile", S_IWUSR | S_IRUGO, 624 NULL, &proc_profile_operations); 625 if (!entry) 626 return 0; 627 entry->size = (1+prof_len) * sizeof(atomic_t); 628 hotcpu_notifier(profile_cpu_callback, 0); 629 return 0; 630 } 631 module_init(create_proc_profile); 632 #endif /* CONFIG_PROC_FS */ 633