1 /* 2 * linux/mm/vmstat.c 3 * 4 * Manages VM statistics 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * zoned VM statistics 8 * Copyright (C) 2006 Silicon Graphics, Inc., 9 * Christoph Lameter <christoph@lameter.com> 10 */ 11 #include <linux/fs.h> 12 #include <linux/mm.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/cpu.h> 17 #include <linux/vmstat.h> 18 #include <linux/sched.h> 19 #include <linux/math64.h> 20 21 #ifdef CONFIG_VM_EVENT_COUNTERS 22 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 23 EXPORT_PER_CPU_SYMBOL(vm_event_states); 24 25 static void sum_vm_events(unsigned long *ret) 26 { 27 int cpu; 28 int i; 29 30 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 31 32 for_each_online_cpu(cpu) { 33 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 34 35 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 36 ret[i] += this->event[i]; 37 } 38 } 39 40 /* 41 * Accumulate the vm event counters across all CPUs. 42 * The result is unavoidably approximate - it can change 43 * during and after execution of this function. 44 */ 45 void all_vm_events(unsigned long *ret) 46 { 47 get_online_cpus(); 48 sum_vm_events(ret); 49 put_online_cpus(); 50 } 51 EXPORT_SYMBOL_GPL(all_vm_events); 52 53 #ifdef CONFIG_HOTPLUG 54 /* 55 * Fold the foreign cpu events into our own. 56 * 57 * This is adding to the events on one processor 58 * but keeps the global counts constant. 59 */ 60 void vm_events_fold_cpu(int cpu) 61 { 62 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); 63 int i; 64 65 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 66 count_vm_events(i, fold_state->event[i]); 67 fold_state->event[i] = 0; 68 } 69 } 70 #endif /* CONFIG_HOTPLUG */ 71 72 #endif /* CONFIG_VM_EVENT_COUNTERS */ 73 74 /* 75 * Manage combined zone based / global counters 76 * 77 * vm_stat contains the global counters 78 */ 79 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 80 EXPORT_SYMBOL(vm_stat); 81 82 #ifdef CONFIG_SMP 83 84 static int calculate_threshold(struct zone *zone) 85 { 86 int threshold; 87 int mem; /* memory in 128 MB units */ 88 89 /* 90 * The threshold scales with the number of processors and the amount 91 * of memory per zone. More memory means that we can defer updates for 92 * longer, more processors could lead to more contention. 93 * fls() is used to have a cheap way of logarithmic scaling. 94 * 95 * Some sample thresholds: 96 * 97 * Threshold Processors (fls) Zonesize fls(mem+1) 98 * ------------------------------------------------------------------ 99 * 8 1 1 0.9-1 GB 4 100 * 16 2 2 0.9-1 GB 4 101 * 20 2 2 1-2 GB 5 102 * 24 2 2 2-4 GB 6 103 * 28 2 2 4-8 GB 7 104 * 32 2 2 8-16 GB 8 105 * 4 2 2 <128M 1 106 * 30 4 3 2-4 GB 5 107 * 48 4 3 8-16 GB 8 108 * 32 8 4 1-2 GB 4 109 * 32 8 4 0.9-1GB 4 110 * 10 16 5 <128M 1 111 * 40 16 5 900M 4 112 * 70 64 7 2-4 GB 5 113 * 84 64 7 4-8 GB 6 114 * 108 512 9 4-8 GB 6 115 * 125 1024 10 8-16 GB 8 116 * 125 1024 10 16-32 GB 9 117 */ 118 119 mem = zone->present_pages >> (27 - PAGE_SHIFT); 120 121 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); 122 123 /* 124 * Maximum threshold is 125 125 */ 126 threshold = min(125, threshold); 127 128 return threshold; 129 } 130 131 /* 132 * Refresh the thresholds for each zone. 133 */ 134 static void refresh_zone_stat_thresholds(void) 135 { 136 struct zone *zone; 137 int cpu; 138 int threshold; 139 140 for_each_populated_zone(zone) { 141 threshold = calculate_threshold(zone); 142 143 for_each_online_cpu(cpu) 144 per_cpu_ptr(zone->pageset, cpu)->stat_threshold 145 = threshold; 146 } 147 } 148 149 /* 150 * For use when we know that interrupts are disabled. 151 */ 152 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 153 int delta) 154 { 155 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); 156 157 s8 *p = pcp->vm_stat_diff + item; 158 long x; 159 160 x = delta + *p; 161 162 if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) { 163 zone_page_state_add(x, zone, item); 164 x = 0; 165 } 166 *p = x; 167 } 168 EXPORT_SYMBOL(__mod_zone_page_state); 169 170 /* 171 * For an unknown interrupt state 172 */ 173 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 174 int delta) 175 { 176 unsigned long flags; 177 178 local_irq_save(flags); 179 __mod_zone_page_state(zone, item, delta); 180 local_irq_restore(flags); 181 } 182 EXPORT_SYMBOL(mod_zone_page_state); 183 184 /* 185 * Optimized increment and decrement functions. 186 * 187 * These are only for a single page and therefore can take a struct page * 188 * argument instead of struct zone *. This allows the inclusion of the code 189 * generated for page_zone(page) into the optimized functions. 190 * 191 * No overflow check is necessary and therefore the differential can be 192 * incremented or decremented in place which may allow the compilers to 193 * generate better code. 194 * The increment or decrement is known and therefore one boundary check can 195 * be omitted. 196 * 197 * NOTE: These functions are very performance sensitive. Change only 198 * with care. 199 * 200 * Some processors have inc/dec instructions that are atomic vs an interrupt. 201 * However, the code must first determine the differential location in a zone 202 * based on the processor number and then inc/dec the counter. There is no 203 * guarantee without disabling preemption that the processor will not change 204 * in between and therefore the atomicity vs. interrupt cannot be exploited 205 * in a useful way here. 206 */ 207 void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 208 { 209 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); 210 s8 *p = pcp->vm_stat_diff + item; 211 212 (*p)++; 213 214 if (unlikely(*p > pcp->stat_threshold)) { 215 int overstep = pcp->stat_threshold / 2; 216 217 zone_page_state_add(*p + overstep, zone, item); 218 *p = -overstep; 219 } 220 } 221 222 void __inc_zone_page_state(struct page *page, enum zone_stat_item item) 223 { 224 __inc_zone_state(page_zone(page), item); 225 } 226 EXPORT_SYMBOL(__inc_zone_page_state); 227 228 void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 229 { 230 struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset); 231 s8 *p = pcp->vm_stat_diff + item; 232 233 (*p)--; 234 235 if (unlikely(*p < - pcp->stat_threshold)) { 236 int overstep = pcp->stat_threshold / 2; 237 238 zone_page_state_add(*p - overstep, zone, item); 239 *p = overstep; 240 } 241 } 242 243 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 244 { 245 __dec_zone_state(page_zone(page), item); 246 } 247 EXPORT_SYMBOL(__dec_zone_page_state); 248 249 void inc_zone_state(struct zone *zone, enum zone_stat_item item) 250 { 251 unsigned long flags; 252 253 local_irq_save(flags); 254 __inc_zone_state(zone, item); 255 local_irq_restore(flags); 256 } 257 258 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 259 { 260 unsigned long flags; 261 struct zone *zone; 262 263 zone = page_zone(page); 264 local_irq_save(flags); 265 __inc_zone_state(zone, item); 266 local_irq_restore(flags); 267 } 268 EXPORT_SYMBOL(inc_zone_page_state); 269 270 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 271 { 272 unsigned long flags; 273 274 local_irq_save(flags); 275 __dec_zone_page_state(page, item); 276 local_irq_restore(flags); 277 } 278 EXPORT_SYMBOL(dec_zone_page_state); 279 280 /* 281 * Update the zone counters for one cpu. 282 * 283 * The cpu specified must be either the current cpu or a processor that 284 * is not online. If it is the current cpu then the execution thread must 285 * be pinned to the current cpu. 286 * 287 * Note that refresh_cpu_vm_stats strives to only access 288 * node local memory. The per cpu pagesets on remote zones are placed 289 * in the memory local to the processor using that pageset. So the 290 * loop over all zones will access a series of cachelines local to 291 * the processor. 292 * 293 * The call to zone_page_state_add updates the cachelines with the 294 * statistics in the remote zone struct as well as the global cachelines 295 * with the global counters. These could cause remote node cache line 296 * bouncing and will have to be only done when necessary. 297 */ 298 void refresh_cpu_vm_stats(int cpu) 299 { 300 struct zone *zone; 301 int i; 302 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 303 304 for_each_populated_zone(zone) { 305 struct per_cpu_pageset *p; 306 307 p = per_cpu_ptr(zone->pageset, cpu); 308 309 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 310 if (p->vm_stat_diff[i]) { 311 unsigned long flags; 312 int v; 313 314 local_irq_save(flags); 315 v = p->vm_stat_diff[i]; 316 p->vm_stat_diff[i] = 0; 317 local_irq_restore(flags); 318 atomic_long_add(v, &zone->vm_stat[i]); 319 global_diff[i] += v; 320 #ifdef CONFIG_NUMA 321 /* 3 seconds idle till flush */ 322 p->expire = 3; 323 #endif 324 } 325 cond_resched(); 326 #ifdef CONFIG_NUMA 327 /* 328 * Deal with draining the remote pageset of this 329 * processor 330 * 331 * Check if there are pages remaining in this pageset 332 * if not then there is nothing to expire. 333 */ 334 if (!p->expire || !p->pcp.count) 335 continue; 336 337 /* 338 * We never drain zones local to this processor. 339 */ 340 if (zone_to_nid(zone) == numa_node_id()) { 341 p->expire = 0; 342 continue; 343 } 344 345 p->expire--; 346 if (p->expire) 347 continue; 348 349 if (p->pcp.count) 350 drain_zone_pages(zone, &p->pcp); 351 #endif 352 } 353 354 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 355 if (global_diff[i]) 356 atomic_long_add(global_diff[i], &vm_stat[i]); 357 } 358 359 #endif 360 361 #ifdef CONFIG_NUMA 362 /* 363 * zonelist = the list of zones passed to the allocator 364 * z = the zone from which the allocation occurred. 365 * 366 * Must be called with interrupts disabled. 367 */ 368 void zone_statistics(struct zone *preferred_zone, struct zone *z) 369 { 370 if (z->zone_pgdat == preferred_zone->zone_pgdat) { 371 __inc_zone_state(z, NUMA_HIT); 372 } else { 373 __inc_zone_state(z, NUMA_MISS); 374 __inc_zone_state(preferred_zone, NUMA_FOREIGN); 375 } 376 if (z->node == numa_node_id()) 377 __inc_zone_state(z, NUMA_LOCAL); 378 else 379 __inc_zone_state(z, NUMA_OTHER); 380 } 381 #endif 382 383 #ifdef CONFIG_COMPACTION 384 struct contig_page_info { 385 unsigned long free_pages; 386 unsigned long free_blocks_total; 387 unsigned long free_blocks_suitable; 388 }; 389 390 /* 391 * Calculate the number of free pages in a zone, how many contiguous 392 * pages are free and how many are large enough to satisfy an allocation of 393 * the target size. Note that this function makes no attempt to estimate 394 * how many suitable free blocks there *might* be if MOVABLE pages were 395 * migrated. Calculating that is possible, but expensive and can be 396 * figured out from userspace 397 */ 398 static void fill_contig_page_info(struct zone *zone, 399 unsigned int suitable_order, 400 struct contig_page_info *info) 401 { 402 unsigned int order; 403 404 info->free_pages = 0; 405 info->free_blocks_total = 0; 406 info->free_blocks_suitable = 0; 407 408 for (order = 0; order < MAX_ORDER; order++) { 409 unsigned long blocks; 410 411 /* Count number of free blocks */ 412 blocks = zone->free_area[order].nr_free; 413 info->free_blocks_total += blocks; 414 415 /* Count free base pages */ 416 info->free_pages += blocks << order; 417 418 /* Count the suitable free blocks */ 419 if (order >= suitable_order) 420 info->free_blocks_suitable += blocks << 421 (order - suitable_order); 422 } 423 } 424 425 /* 426 * A fragmentation index only makes sense if an allocation of a requested 427 * size would fail. If that is true, the fragmentation index indicates 428 * whether external fragmentation or a lack of memory was the problem. 429 * The value can be used to determine if page reclaim or compaction 430 * should be used 431 */ 432 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) 433 { 434 unsigned long requested = 1UL << order; 435 436 if (!info->free_blocks_total) 437 return 0; 438 439 /* Fragmentation index only makes sense when a request would fail */ 440 if (info->free_blocks_suitable) 441 return -1000; 442 443 /* 444 * Index is between 0 and 1 so return within 3 decimal places 445 * 446 * 0 => allocation would fail due to lack of memory 447 * 1 => allocation would fail due to fragmentation 448 */ 449 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); 450 } 451 452 /* Same as __fragmentation index but allocs contig_page_info on stack */ 453 int fragmentation_index(struct zone *zone, unsigned int order) 454 { 455 struct contig_page_info info; 456 457 fill_contig_page_info(zone, order, &info); 458 return __fragmentation_index(order, &info); 459 } 460 #endif 461 462 #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION) 463 #include <linux/proc_fs.h> 464 #include <linux/seq_file.h> 465 466 static char * const migratetype_names[MIGRATE_TYPES] = { 467 "Unmovable", 468 "Reclaimable", 469 "Movable", 470 "Reserve", 471 "Isolate", 472 }; 473 474 static void *frag_start(struct seq_file *m, loff_t *pos) 475 { 476 pg_data_t *pgdat; 477 loff_t node = *pos; 478 for (pgdat = first_online_pgdat(); 479 pgdat && node; 480 pgdat = next_online_pgdat(pgdat)) 481 --node; 482 483 return pgdat; 484 } 485 486 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 487 { 488 pg_data_t *pgdat = (pg_data_t *)arg; 489 490 (*pos)++; 491 return next_online_pgdat(pgdat); 492 } 493 494 static void frag_stop(struct seq_file *m, void *arg) 495 { 496 } 497 498 /* Walk all the zones in a node and print using a callback */ 499 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, 500 void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) 501 { 502 struct zone *zone; 503 struct zone *node_zones = pgdat->node_zones; 504 unsigned long flags; 505 506 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 507 if (!populated_zone(zone)) 508 continue; 509 510 spin_lock_irqsave(&zone->lock, flags); 511 print(m, pgdat, zone); 512 spin_unlock_irqrestore(&zone->lock, flags); 513 } 514 } 515 #endif 516 517 #ifdef CONFIG_PROC_FS 518 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, 519 struct zone *zone) 520 { 521 int order; 522 523 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 524 for (order = 0; order < MAX_ORDER; ++order) 525 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); 526 seq_putc(m, '\n'); 527 } 528 529 /* 530 * This walks the free areas for each zone. 531 */ 532 static int frag_show(struct seq_file *m, void *arg) 533 { 534 pg_data_t *pgdat = (pg_data_t *)arg; 535 walk_zones_in_node(m, pgdat, frag_show_print); 536 return 0; 537 } 538 539 static void pagetypeinfo_showfree_print(struct seq_file *m, 540 pg_data_t *pgdat, struct zone *zone) 541 { 542 int order, mtype; 543 544 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { 545 seq_printf(m, "Node %4d, zone %8s, type %12s ", 546 pgdat->node_id, 547 zone->name, 548 migratetype_names[mtype]); 549 for (order = 0; order < MAX_ORDER; ++order) { 550 unsigned long freecount = 0; 551 struct free_area *area; 552 struct list_head *curr; 553 554 area = &(zone->free_area[order]); 555 556 list_for_each(curr, &area->free_list[mtype]) 557 freecount++; 558 seq_printf(m, "%6lu ", freecount); 559 } 560 seq_putc(m, '\n'); 561 } 562 } 563 564 /* Print out the free pages at each order for each migatetype */ 565 static int pagetypeinfo_showfree(struct seq_file *m, void *arg) 566 { 567 int order; 568 pg_data_t *pgdat = (pg_data_t *)arg; 569 570 /* Print header */ 571 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); 572 for (order = 0; order < MAX_ORDER; ++order) 573 seq_printf(m, "%6d ", order); 574 seq_putc(m, '\n'); 575 576 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print); 577 578 return 0; 579 } 580 581 static void pagetypeinfo_showblockcount_print(struct seq_file *m, 582 pg_data_t *pgdat, struct zone *zone) 583 { 584 int mtype; 585 unsigned long pfn; 586 unsigned long start_pfn = zone->zone_start_pfn; 587 unsigned long end_pfn = start_pfn + zone->spanned_pages; 588 unsigned long count[MIGRATE_TYPES] = { 0, }; 589 590 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 591 struct page *page; 592 593 if (!pfn_valid(pfn)) 594 continue; 595 596 page = pfn_to_page(pfn); 597 598 /* Watch for unexpected holes punched in the memmap */ 599 if (!memmap_valid_within(pfn, page, zone)) 600 continue; 601 602 mtype = get_pageblock_migratetype(page); 603 604 if (mtype < MIGRATE_TYPES) 605 count[mtype]++; 606 } 607 608 /* Print counts */ 609 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 610 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 611 seq_printf(m, "%12lu ", count[mtype]); 612 seq_putc(m, '\n'); 613 } 614 615 /* Print out the free pages at each order for each migratetype */ 616 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) 617 { 618 int mtype; 619 pg_data_t *pgdat = (pg_data_t *)arg; 620 621 seq_printf(m, "\n%-23s", "Number of blocks type "); 622 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 623 seq_printf(m, "%12s ", migratetype_names[mtype]); 624 seq_putc(m, '\n'); 625 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print); 626 627 return 0; 628 } 629 630 /* 631 * This prints out statistics in relation to grouping pages by mobility. 632 * It is expensive to collect so do not constantly read the file. 633 */ 634 static int pagetypeinfo_show(struct seq_file *m, void *arg) 635 { 636 pg_data_t *pgdat = (pg_data_t *)arg; 637 638 /* check memoryless node */ 639 if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) 640 return 0; 641 642 seq_printf(m, "Page block order: %d\n", pageblock_order); 643 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); 644 seq_putc(m, '\n'); 645 pagetypeinfo_showfree(m, pgdat); 646 pagetypeinfo_showblockcount(m, pgdat); 647 648 return 0; 649 } 650 651 static const struct seq_operations fragmentation_op = { 652 .start = frag_start, 653 .next = frag_next, 654 .stop = frag_stop, 655 .show = frag_show, 656 }; 657 658 static int fragmentation_open(struct inode *inode, struct file *file) 659 { 660 return seq_open(file, &fragmentation_op); 661 } 662 663 static const struct file_operations fragmentation_file_operations = { 664 .open = fragmentation_open, 665 .read = seq_read, 666 .llseek = seq_lseek, 667 .release = seq_release, 668 }; 669 670 static const struct seq_operations pagetypeinfo_op = { 671 .start = frag_start, 672 .next = frag_next, 673 .stop = frag_stop, 674 .show = pagetypeinfo_show, 675 }; 676 677 static int pagetypeinfo_open(struct inode *inode, struct file *file) 678 { 679 return seq_open(file, &pagetypeinfo_op); 680 } 681 682 static const struct file_operations pagetypeinfo_file_ops = { 683 .open = pagetypeinfo_open, 684 .read = seq_read, 685 .llseek = seq_lseek, 686 .release = seq_release, 687 }; 688 689 #ifdef CONFIG_ZONE_DMA 690 #define TEXT_FOR_DMA(xx) xx "_dma", 691 #else 692 #define TEXT_FOR_DMA(xx) 693 #endif 694 695 #ifdef CONFIG_ZONE_DMA32 696 #define TEXT_FOR_DMA32(xx) xx "_dma32", 697 #else 698 #define TEXT_FOR_DMA32(xx) 699 #endif 700 701 #ifdef CONFIG_HIGHMEM 702 #define TEXT_FOR_HIGHMEM(xx) xx "_high", 703 #else 704 #define TEXT_FOR_HIGHMEM(xx) 705 #endif 706 707 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ 708 TEXT_FOR_HIGHMEM(xx) xx "_movable", 709 710 static const char * const vmstat_text[] = { 711 /* Zoned VM counters */ 712 "nr_free_pages", 713 "nr_inactive_anon", 714 "nr_active_anon", 715 "nr_inactive_file", 716 "nr_active_file", 717 "nr_unevictable", 718 "nr_mlock", 719 "nr_anon_pages", 720 "nr_mapped", 721 "nr_file_pages", 722 "nr_dirty", 723 "nr_writeback", 724 "nr_slab_reclaimable", 725 "nr_slab_unreclaimable", 726 "nr_page_table_pages", 727 "nr_kernel_stack", 728 "nr_unstable", 729 "nr_bounce", 730 "nr_vmscan_write", 731 "nr_writeback_temp", 732 "nr_isolated_anon", 733 "nr_isolated_file", 734 "nr_shmem", 735 #ifdef CONFIG_NUMA 736 "numa_hit", 737 "numa_miss", 738 "numa_foreign", 739 "numa_interleave", 740 "numa_local", 741 "numa_other", 742 #endif 743 744 #ifdef CONFIG_VM_EVENT_COUNTERS 745 "pgpgin", 746 "pgpgout", 747 "pswpin", 748 "pswpout", 749 750 TEXTS_FOR_ZONES("pgalloc") 751 752 "pgfree", 753 "pgactivate", 754 "pgdeactivate", 755 756 "pgfault", 757 "pgmajfault", 758 759 TEXTS_FOR_ZONES("pgrefill") 760 TEXTS_FOR_ZONES("pgsteal") 761 TEXTS_FOR_ZONES("pgscan_kswapd") 762 TEXTS_FOR_ZONES("pgscan_direct") 763 764 #ifdef CONFIG_NUMA 765 "zone_reclaim_failed", 766 #endif 767 "pginodesteal", 768 "slabs_scanned", 769 "kswapd_steal", 770 "kswapd_inodesteal", 771 "kswapd_low_wmark_hit_quickly", 772 "kswapd_high_wmark_hit_quickly", 773 "kswapd_skip_congestion_wait", 774 "pageoutrun", 775 "allocstall", 776 777 "pgrotated", 778 779 #ifdef CONFIG_COMPACTION 780 "compact_blocks_moved", 781 "compact_pages_moved", 782 "compact_pagemigrate_failed", 783 "compact_stall", 784 "compact_fail", 785 "compact_success", 786 #endif 787 788 #ifdef CONFIG_HUGETLB_PAGE 789 "htlb_buddy_alloc_success", 790 "htlb_buddy_alloc_fail", 791 #endif 792 "unevictable_pgs_culled", 793 "unevictable_pgs_scanned", 794 "unevictable_pgs_rescued", 795 "unevictable_pgs_mlocked", 796 "unevictable_pgs_munlocked", 797 "unevictable_pgs_cleared", 798 "unevictable_pgs_stranded", 799 "unevictable_pgs_mlockfreed", 800 #endif 801 }; 802 803 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 804 struct zone *zone) 805 { 806 int i; 807 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 808 seq_printf(m, 809 "\n pages free %lu" 810 "\n min %lu" 811 "\n low %lu" 812 "\n high %lu" 813 "\n scanned %lu" 814 "\n spanned %lu" 815 "\n present %lu", 816 zone_page_state(zone, NR_FREE_PAGES), 817 min_wmark_pages(zone), 818 low_wmark_pages(zone), 819 high_wmark_pages(zone), 820 zone->pages_scanned, 821 zone->spanned_pages, 822 zone->present_pages); 823 824 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 825 seq_printf(m, "\n %-12s %lu", vmstat_text[i], 826 zone_page_state(zone, i)); 827 828 seq_printf(m, 829 "\n protection: (%lu", 830 zone->lowmem_reserve[0]); 831 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 832 seq_printf(m, ", %lu", zone->lowmem_reserve[i]); 833 seq_printf(m, 834 ")" 835 "\n pagesets"); 836 for_each_online_cpu(i) { 837 struct per_cpu_pageset *pageset; 838 839 pageset = per_cpu_ptr(zone->pageset, i); 840 seq_printf(m, 841 "\n cpu: %i" 842 "\n count: %i" 843 "\n high: %i" 844 "\n batch: %i", 845 i, 846 pageset->pcp.count, 847 pageset->pcp.high, 848 pageset->pcp.batch); 849 #ifdef CONFIG_SMP 850 seq_printf(m, "\n vm stats threshold: %d", 851 pageset->stat_threshold); 852 #endif 853 } 854 seq_printf(m, 855 "\n all_unreclaimable: %u" 856 "\n start_pfn: %lu" 857 "\n inactive_ratio: %u", 858 zone->all_unreclaimable, 859 zone->zone_start_pfn, 860 zone->inactive_ratio); 861 seq_putc(m, '\n'); 862 } 863 864 /* 865 * Output information about zones in @pgdat. 866 */ 867 static int zoneinfo_show(struct seq_file *m, void *arg) 868 { 869 pg_data_t *pgdat = (pg_data_t *)arg; 870 walk_zones_in_node(m, pgdat, zoneinfo_show_print); 871 return 0; 872 } 873 874 static const struct seq_operations zoneinfo_op = { 875 .start = frag_start, /* iterate over all zones. The same as in 876 * fragmentation. */ 877 .next = frag_next, 878 .stop = frag_stop, 879 .show = zoneinfo_show, 880 }; 881 882 static int zoneinfo_open(struct inode *inode, struct file *file) 883 { 884 return seq_open(file, &zoneinfo_op); 885 } 886 887 static const struct file_operations proc_zoneinfo_file_operations = { 888 .open = zoneinfo_open, 889 .read = seq_read, 890 .llseek = seq_lseek, 891 .release = seq_release, 892 }; 893 894 static void *vmstat_start(struct seq_file *m, loff_t *pos) 895 { 896 unsigned long *v; 897 #ifdef CONFIG_VM_EVENT_COUNTERS 898 unsigned long *e; 899 #endif 900 int i; 901 902 if (*pos >= ARRAY_SIZE(vmstat_text)) 903 return NULL; 904 905 #ifdef CONFIG_VM_EVENT_COUNTERS 906 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) 907 + sizeof(struct vm_event_state), GFP_KERNEL); 908 #else 909 v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long), 910 GFP_KERNEL); 911 #endif 912 m->private = v; 913 if (!v) 914 return ERR_PTR(-ENOMEM); 915 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 916 v[i] = global_page_state(i); 917 #ifdef CONFIG_VM_EVENT_COUNTERS 918 e = v + NR_VM_ZONE_STAT_ITEMS; 919 all_vm_events(e); 920 e[PGPGIN] /= 2; /* sectors -> kbytes */ 921 e[PGPGOUT] /= 2; 922 #endif 923 return v + *pos; 924 } 925 926 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 927 { 928 (*pos)++; 929 if (*pos >= ARRAY_SIZE(vmstat_text)) 930 return NULL; 931 return (unsigned long *)m->private + *pos; 932 } 933 934 static int vmstat_show(struct seq_file *m, void *arg) 935 { 936 unsigned long *l = arg; 937 unsigned long off = l - (unsigned long *)m->private; 938 939 seq_printf(m, "%s %lu\n", vmstat_text[off], *l); 940 return 0; 941 } 942 943 static void vmstat_stop(struct seq_file *m, void *arg) 944 { 945 kfree(m->private); 946 m->private = NULL; 947 } 948 949 static const struct seq_operations vmstat_op = { 950 .start = vmstat_start, 951 .next = vmstat_next, 952 .stop = vmstat_stop, 953 .show = vmstat_show, 954 }; 955 956 static int vmstat_open(struct inode *inode, struct file *file) 957 { 958 return seq_open(file, &vmstat_op); 959 } 960 961 static const struct file_operations proc_vmstat_file_operations = { 962 .open = vmstat_open, 963 .read = seq_read, 964 .llseek = seq_lseek, 965 .release = seq_release, 966 }; 967 #endif /* CONFIG_PROC_FS */ 968 969 #ifdef CONFIG_SMP 970 static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 971 int sysctl_stat_interval __read_mostly = HZ; 972 973 static void vmstat_update(struct work_struct *w) 974 { 975 refresh_cpu_vm_stats(smp_processor_id()); 976 schedule_delayed_work(&__get_cpu_var(vmstat_work), 977 round_jiffies_relative(sysctl_stat_interval)); 978 } 979 980 static void __cpuinit start_cpu_timer(int cpu) 981 { 982 struct delayed_work *work = &per_cpu(vmstat_work, cpu); 983 984 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); 985 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); 986 } 987 988 /* 989 * Use the cpu notifier to insure that the thresholds are recalculated 990 * when necessary. 991 */ 992 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, 993 unsigned long action, 994 void *hcpu) 995 { 996 long cpu = (long)hcpu; 997 998 switch (action) { 999 case CPU_ONLINE: 1000 case CPU_ONLINE_FROZEN: 1001 start_cpu_timer(cpu); 1002 node_set_state(cpu_to_node(cpu), N_CPU); 1003 break; 1004 case CPU_DOWN_PREPARE: 1005 case CPU_DOWN_PREPARE_FROZEN: 1006 cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); 1007 per_cpu(vmstat_work, cpu).work.func = NULL; 1008 break; 1009 case CPU_DOWN_FAILED: 1010 case CPU_DOWN_FAILED_FROZEN: 1011 start_cpu_timer(cpu); 1012 break; 1013 case CPU_DEAD: 1014 case CPU_DEAD_FROZEN: 1015 refresh_zone_stat_thresholds(); 1016 break; 1017 default: 1018 break; 1019 } 1020 return NOTIFY_OK; 1021 } 1022 1023 static struct notifier_block __cpuinitdata vmstat_notifier = 1024 { &vmstat_cpuup_callback, NULL, 0 }; 1025 #endif 1026 1027 static int __init setup_vmstat(void) 1028 { 1029 #ifdef CONFIG_SMP 1030 int cpu; 1031 1032 refresh_zone_stat_thresholds(); 1033 register_cpu_notifier(&vmstat_notifier); 1034 1035 for_each_online_cpu(cpu) 1036 start_cpu_timer(cpu); 1037 #endif 1038 #ifdef CONFIG_PROC_FS 1039 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); 1040 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); 1041 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); 1042 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); 1043 #endif 1044 return 0; 1045 } 1046 module_init(setup_vmstat) 1047 1048 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 1049 #include <linux/debugfs.h> 1050 1051 static struct dentry *extfrag_debug_root; 1052 1053 /* 1054 * Return an index indicating how much of the available free memory is 1055 * unusable for an allocation of the requested size. 1056 */ 1057 static int unusable_free_index(unsigned int order, 1058 struct contig_page_info *info) 1059 { 1060 /* No free memory is interpreted as all free memory is unusable */ 1061 if (info->free_pages == 0) 1062 return 1000; 1063 1064 /* 1065 * Index should be a value between 0 and 1. Return a value to 3 1066 * decimal places. 1067 * 1068 * 0 => no fragmentation 1069 * 1 => high fragmentation 1070 */ 1071 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages); 1072 1073 } 1074 1075 static void unusable_show_print(struct seq_file *m, 1076 pg_data_t *pgdat, struct zone *zone) 1077 { 1078 unsigned int order; 1079 int index; 1080 struct contig_page_info info; 1081 1082 seq_printf(m, "Node %d, zone %8s ", 1083 pgdat->node_id, 1084 zone->name); 1085 for (order = 0; order < MAX_ORDER; ++order) { 1086 fill_contig_page_info(zone, order, &info); 1087 index = unusable_free_index(order, &info); 1088 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); 1089 } 1090 1091 seq_putc(m, '\n'); 1092 } 1093 1094 /* 1095 * Display unusable free space index 1096 * 1097 * The unusable free space index measures how much of the available free 1098 * memory cannot be used to satisfy an allocation of a given size and is a 1099 * value between 0 and 1. The higher the value, the more of free memory is 1100 * unusable and by implication, the worse the external fragmentation is. This 1101 * can be expressed as a percentage by multiplying by 100. 1102 */ 1103 static int unusable_show(struct seq_file *m, void *arg) 1104 { 1105 pg_data_t *pgdat = (pg_data_t *)arg; 1106 1107 /* check memoryless node */ 1108 if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) 1109 return 0; 1110 1111 walk_zones_in_node(m, pgdat, unusable_show_print); 1112 1113 return 0; 1114 } 1115 1116 static const struct seq_operations unusable_op = { 1117 .start = frag_start, 1118 .next = frag_next, 1119 .stop = frag_stop, 1120 .show = unusable_show, 1121 }; 1122 1123 static int unusable_open(struct inode *inode, struct file *file) 1124 { 1125 return seq_open(file, &unusable_op); 1126 } 1127 1128 static const struct file_operations unusable_file_ops = { 1129 .open = unusable_open, 1130 .read = seq_read, 1131 .llseek = seq_lseek, 1132 .release = seq_release, 1133 }; 1134 1135 static void extfrag_show_print(struct seq_file *m, 1136 pg_data_t *pgdat, struct zone *zone) 1137 { 1138 unsigned int order; 1139 int index; 1140 1141 /* Alloc on stack as interrupts are disabled for zone walk */ 1142 struct contig_page_info info; 1143 1144 seq_printf(m, "Node %d, zone %8s ", 1145 pgdat->node_id, 1146 zone->name); 1147 for (order = 0; order < MAX_ORDER; ++order) { 1148 fill_contig_page_info(zone, order, &info); 1149 index = __fragmentation_index(order, &info); 1150 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); 1151 } 1152 1153 seq_putc(m, '\n'); 1154 } 1155 1156 /* 1157 * Display fragmentation index for orders that allocations would fail for 1158 */ 1159 static int extfrag_show(struct seq_file *m, void *arg) 1160 { 1161 pg_data_t *pgdat = (pg_data_t *)arg; 1162 1163 walk_zones_in_node(m, pgdat, extfrag_show_print); 1164 1165 return 0; 1166 } 1167 1168 static const struct seq_operations extfrag_op = { 1169 .start = frag_start, 1170 .next = frag_next, 1171 .stop = frag_stop, 1172 .show = extfrag_show, 1173 }; 1174 1175 static int extfrag_open(struct inode *inode, struct file *file) 1176 { 1177 return seq_open(file, &extfrag_op); 1178 } 1179 1180 static const struct file_operations extfrag_file_ops = { 1181 .open = extfrag_open, 1182 .read = seq_read, 1183 .llseek = seq_lseek, 1184 .release = seq_release, 1185 }; 1186 1187 static int __init extfrag_debug_init(void) 1188 { 1189 extfrag_debug_root = debugfs_create_dir("extfrag", NULL); 1190 if (!extfrag_debug_root) 1191 return -ENOMEM; 1192 1193 if (!debugfs_create_file("unusable_index", 0444, 1194 extfrag_debug_root, NULL, &unusable_file_ops)) 1195 return -ENOMEM; 1196 1197 if (!debugfs_create_file("extfrag_index", 0444, 1198 extfrag_debug_root, NULL, &extfrag_file_ops)) 1199 return -ENOMEM; 1200 1201 return 0; 1202 } 1203 1204 module_init(extfrag_debug_init); 1205 #endif 1206