1 /* 2 * linux/mm/vmstat.c 3 * 4 * Manages VM statistics 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * zoned VM statistics 8 * Copyright (C) 2006 Silicon Graphics, Inc., 9 * Christoph Lameter <christoph@lameter.com> 10 */ 11 #include <linux/fs.h> 12 #include <linux/mm.h> 13 #include <linux/err.h> 14 #include <linux/module.h> 15 #include <linux/slab.h> 16 #include <linux/cpu.h> 17 #include <linux/vmstat.h> 18 #include <linux/sched.h> 19 #include <linux/math64.h> 20 #include <linux/writeback.h> 21 #include <linux/compaction.h> 22 23 #ifdef CONFIG_VM_EVENT_COUNTERS 24 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 25 EXPORT_PER_CPU_SYMBOL(vm_event_states); 26 27 static void sum_vm_events(unsigned long *ret) 28 { 29 int cpu; 30 int i; 31 32 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 33 34 for_each_online_cpu(cpu) { 35 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 36 37 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 38 ret[i] += this->event[i]; 39 } 40 } 41 42 /* 43 * Accumulate the vm event counters across all CPUs. 44 * The result is unavoidably approximate - it can change 45 * during and after execution of this function. 46 */ 47 void all_vm_events(unsigned long *ret) 48 { 49 get_online_cpus(); 50 sum_vm_events(ret); 51 put_online_cpus(); 52 } 53 EXPORT_SYMBOL_GPL(all_vm_events); 54 55 #ifdef CONFIG_HOTPLUG 56 /* 57 * Fold the foreign cpu events into our own. 58 * 59 * This is adding to the events on one processor 60 * but keeps the global counts constant. 61 */ 62 void vm_events_fold_cpu(int cpu) 63 { 64 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); 65 int i; 66 67 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 68 count_vm_events(i, fold_state->event[i]); 69 fold_state->event[i] = 0; 70 } 71 } 72 #endif /* CONFIG_HOTPLUG */ 73 74 #endif /* CONFIG_VM_EVENT_COUNTERS */ 75 76 /* 77 * Manage combined zone based / global counters 78 * 79 * vm_stat contains the global counters 80 */ 81 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 82 EXPORT_SYMBOL(vm_stat); 83 84 #ifdef CONFIG_SMP 85 86 int calculate_pressure_threshold(struct zone *zone) 87 { 88 int threshold; 89 int watermark_distance; 90 91 /* 92 * As vmstats are not up to date, there is drift between the estimated 93 * and real values. For high thresholds and a high number of CPUs, it 94 * is possible for the min watermark to be breached while the estimated 95 * value looks fine. The pressure threshold is a reduced value such 96 * that even the maximum amount of drift will not accidentally breach 97 * the min watermark 98 */ 99 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); 100 threshold = max(1, (int)(watermark_distance / num_online_cpus())); 101 102 /* 103 * Maximum threshold is 125 104 */ 105 threshold = min(125, threshold); 106 107 return threshold; 108 } 109 110 int calculate_normal_threshold(struct zone *zone) 111 { 112 int threshold; 113 int mem; /* memory in 128 MB units */ 114 115 /* 116 * The threshold scales with the number of processors and the amount 117 * of memory per zone. More memory means that we can defer updates for 118 * longer, more processors could lead to more contention. 119 * fls() is used to have a cheap way of logarithmic scaling. 120 * 121 * Some sample thresholds: 122 * 123 * Threshold Processors (fls) Zonesize fls(mem+1) 124 * ------------------------------------------------------------------ 125 * 8 1 1 0.9-1 GB 4 126 * 16 2 2 0.9-1 GB 4 127 * 20 2 2 1-2 GB 5 128 * 24 2 2 2-4 GB 6 129 * 28 2 2 4-8 GB 7 130 * 32 2 2 8-16 GB 8 131 * 4 2 2 <128M 1 132 * 30 4 3 2-4 GB 5 133 * 48 4 3 8-16 GB 8 134 * 32 8 4 1-2 GB 4 135 * 32 8 4 0.9-1GB 4 136 * 10 16 5 <128M 1 137 * 40 16 5 900M 4 138 * 70 64 7 2-4 GB 5 139 * 84 64 7 4-8 GB 6 140 * 108 512 9 4-8 GB 6 141 * 125 1024 10 8-16 GB 8 142 * 125 1024 10 16-32 GB 9 143 */ 144 145 mem = zone->present_pages >> (27 - PAGE_SHIFT); 146 147 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); 148 149 /* 150 * Maximum threshold is 125 151 */ 152 threshold = min(125, threshold); 153 154 return threshold; 155 } 156 157 /* 158 * Refresh the thresholds for each zone. 159 */ 160 static void refresh_zone_stat_thresholds(void) 161 { 162 struct zone *zone; 163 int cpu; 164 int threshold; 165 166 for_each_populated_zone(zone) { 167 unsigned long max_drift, tolerate_drift; 168 169 threshold = calculate_normal_threshold(zone); 170 171 for_each_online_cpu(cpu) 172 per_cpu_ptr(zone->pageset, cpu)->stat_threshold 173 = threshold; 174 175 /* 176 * Only set percpu_drift_mark if there is a danger that 177 * NR_FREE_PAGES reports the low watermark is ok when in fact 178 * the min watermark could be breached by an allocation 179 */ 180 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); 181 max_drift = num_online_cpus() * threshold; 182 if (max_drift > tolerate_drift) 183 zone->percpu_drift_mark = high_wmark_pages(zone) + 184 max_drift; 185 } 186 } 187 188 void set_pgdat_percpu_threshold(pg_data_t *pgdat, 189 int (*calculate_pressure)(struct zone *)) 190 { 191 struct zone *zone; 192 int cpu; 193 int threshold; 194 int i; 195 196 for (i = 0; i < pgdat->nr_zones; i++) { 197 zone = &pgdat->node_zones[i]; 198 if (!zone->percpu_drift_mark) 199 continue; 200 201 threshold = (*calculate_pressure)(zone); 202 for_each_possible_cpu(cpu) 203 per_cpu_ptr(zone->pageset, cpu)->stat_threshold 204 = threshold; 205 } 206 } 207 208 /* 209 * For use when we know that interrupts are disabled. 210 */ 211 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 212 int delta) 213 { 214 struct per_cpu_pageset __percpu *pcp = zone->pageset; 215 s8 __percpu *p = pcp->vm_stat_diff + item; 216 long x; 217 long t; 218 219 x = delta + __this_cpu_read(*p); 220 221 t = __this_cpu_read(pcp->stat_threshold); 222 223 if (unlikely(x > t || x < -t)) { 224 zone_page_state_add(x, zone, item); 225 x = 0; 226 } 227 __this_cpu_write(*p, x); 228 } 229 EXPORT_SYMBOL(__mod_zone_page_state); 230 231 /* 232 * Optimized increment and decrement functions. 233 * 234 * These are only for a single page and therefore can take a struct page * 235 * argument instead of struct zone *. This allows the inclusion of the code 236 * generated for page_zone(page) into the optimized functions. 237 * 238 * No overflow check is necessary and therefore the differential can be 239 * incremented or decremented in place which may allow the compilers to 240 * generate better code. 241 * The increment or decrement is known and therefore one boundary check can 242 * be omitted. 243 * 244 * NOTE: These functions are very performance sensitive. Change only 245 * with care. 246 * 247 * Some processors have inc/dec instructions that are atomic vs an interrupt. 248 * However, the code must first determine the differential location in a zone 249 * based on the processor number and then inc/dec the counter. There is no 250 * guarantee without disabling preemption that the processor will not change 251 * in between and therefore the atomicity vs. interrupt cannot be exploited 252 * in a useful way here. 253 */ 254 void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 255 { 256 struct per_cpu_pageset __percpu *pcp = zone->pageset; 257 s8 __percpu *p = pcp->vm_stat_diff + item; 258 s8 v, t; 259 260 v = __this_cpu_inc_return(*p); 261 t = __this_cpu_read(pcp->stat_threshold); 262 if (unlikely(v > t)) { 263 s8 overstep = t >> 1; 264 265 zone_page_state_add(v + overstep, zone, item); 266 __this_cpu_write(*p, -overstep); 267 } 268 } 269 270 void __inc_zone_page_state(struct page *page, enum zone_stat_item item) 271 { 272 __inc_zone_state(page_zone(page), item); 273 } 274 EXPORT_SYMBOL(__inc_zone_page_state); 275 276 void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 277 { 278 struct per_cpu_pageset __percpu *pcp = zone->pageset; 279 s8 __percpu *p = pcp->vm_stat_diff + item; 280 s8 v, t; 281 282 v = __this_cpu_dec_return(*p); 283 t = __this_cpu_read(pcp->stat_threshold); 284 if (unlikely(v < - t)) { 285 s8 overstep = t >> 1; 286 287 zone_page_state_add(v - overstep, zone, item); 288 __this_cpu_write(*p, overstep); 289 } 290 } 291 292 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 293 { 294 __dec_zone_state(page_zone(page), item); 295 } 296 EXPORT_SYMBOL(__dec_zone_page_state); 297 298 #ifdef CONFIG_CMPXCHG_LOCAL 299 /* 300 * If we have cmpxchg_local support then we do not need to incur the overhead 301 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg. 302 * 303 * mod_state() modifies the zone counter state through atomic per cpu 304 * operations. 305 * 306 * Overstep mode specifies how overstep should handled: 307 * 0 No overstepping 308 * 1 Overstepping half of threshold 309 * -1 Overstepping minus half of threshold 310 */ 311 static inline void mod_state(struct zone *zone, 312 enum zone_stat_item item, int delta, int overstep_mode) 313 { 314 struct per_cpu_pageset __percpu *pcp = zone->pageset; 315 s8 __percpu *p = pcp->vm_stat_diff + item; 316 long o, n, t, z; 317 318 do { 319 z = 0; /* overflow to zone counters */ 320 321 /* 322 * The fetching of the stat_threshold is racy. We may apply 323 * a counter threshold to the wrong the cpu if we get 324 * rescheduled while executing here. However, the following 325 * will apply the threshold again and therefore bring the 326 * counter under the threshold. 327 */ 328 t = this_cpu_read(pcp->stat_threshold); 329 330 o = this_cpu_read(*p); 331 n = delta + o; 332 333 if (n > t || n < -t) { 334 int os = overstep_mode * (t >> 1) ; 335 336 /* Overflow must be added to zone counters */ 337 z = n + os; 338 n = -os; 339 } 340 } while (this_cpu_cmpxchg(*p, o, n) != o); 341 342 if (z) 343 zone_page_state_add(z, zone, item); 344 } 345 346 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 347 int delta) 348 { 349 mod_state(zone, item, delta, 0); 350 } 351 EXPORT_SYMBOL(mod_zone_page_state); 352 353 void inc_zone_state(struct zone *zone, enum zone_stat_item item) 354 { 355 mod_state(zone, item, 1, 1); 356 } 357 358 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 359 { 360 mod_state(page_zone(page), item, 1, 1); 361 } 362 EXPORT_SYMBOL(inc_zone_page_state); 363 364 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 365 { 366 mod_state(page_zone(page), item, -1, -1); 367 } 368 EXPORT_SYMBOL(dec_zone_page_state); 369 #else 370 /* 371 * Use interrupt disable to serialize counter updates 372 */ 373 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 374 int delta) 375 { 376 unsigned long flags; 377 378 local_irq_save(flags); 379 __mod_zone_page_state(zone, item, delta); 380 local_irq_restore(flags); 381 } 382 EXPORT_SYMBOL(mod_zone_page_state); 383 384 void inc_zone_state(struct zone *zone, enum zone_stat_item item) 385 { 386 unsigned long flags; 387 388 local_irq_save(flags); 389 __inc_zone_state(zone, item); 390 local_irq_restore(flags); 391 } 392 393 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 394 { 395 unsigned long flags; 396 struct zone *zone; 397 398 zone = page_zone(page); 399 local_irq_save(flags); 400 __inc_zone_state(zone, item); 401 local_irq_restore(flags); 402 } 403 EXPORT_SYMBOL(inc_zone_page_state); 404 405 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 406 { 407 unsigned long flags; 408 409 local_irq_save(flags); 410 __dec_zone_page_state(page, item); 411 local_irq_restore(flags); 412 } 413 EXPORT_SYMBOL(dec_zone_page_state); 414 #endif 415 416 /* 417 * Update the zone counters for one cpu. 418 * 419 * The cpu specified must be either the current cpu or a processor that 420 * is not online. If it is the current cpu then the execution thread must 421 * be pinned to the current cpu. 422 * 423 * Note that refresh_cpu_vm_stats strives to only access 424 * node local memory. The per cpu pagesets on remote zones are placed 425 * in the memory local to the processor using that pageset. So the 426 * loop over all zones will access a series of cachelines local to 427 * the processor. 428 * 429 * The call to zone_page_state_add updates the cachelines with the 430 * statistics in the remote zone struct as well as the global cachelines 431 * with the global counters. These could cause remote node cache line 432 * bouncing and will have to be only done when necessary. 433 */ 434 void refresh_cpu_vm_stats(int cpu) 435 { 436 struct zone *zone; 437 int i; 438 int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 439 440 for_each_populated_zone(zone) { 441 struct per_cpu_pageset *p; 442 443 p = per_cpu_ptr(zone->pageset, cpu); 444 445 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 446 if (p->vm_stat_diff[i]) { 447 unsigned long flags; 448 int v; 449 450 local_irq_save(flags); 451 v = p->vm_stat_diff[i]; 452 p->vm_stat_diff[i] = 0; 453 local_irq_restore(flags); 454 atomic_long_add(v, &zone->vm_stat[i]); 455 global_diff[i] += v; 456 #ifdef CONFIG_NUMA 457 /* 3 seconds idle till flush */ 458 p->expire = 3; 459 #endif 460 } 461 cond_resched(); 462 #ifdef CONFIG_NUMA 463 /* 464 * Deal with draining the remote pageset of this 465 * processor 466 * 467 * Check if there are pages remaining in this pageset 468 * if not then there is nothing to expire. 469 */ 470 if (!p->expire || !p->pcp.count) 471 continue; 472 473 /* 474 * We never drain zones local to this processor. 475 */ 476 if (zone_to_nid(zone) == numa_node_id()) { 477 p->expire = 0; 478 continue; 479 } 480 481 p->expire--; 482 if (p->expire) 483 continue; 484 485 if (p->pcp.count) 486 drain_zone_pages(zone, &p->pcp); 487 #endif 488 } 489 490 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 491 if (global_diff[i]) 492 atomic_long_add(global_diff[i], &vm_stat[i]); 493 } 494 495 #endif 496 497 #ifdef CONFIG_NUMA 498 /* 499 * zonelist = the list of zones passed to the allocator 500 * z = the zone from which the allocation occurred. 501 * 502 * Must be called with interrupts disabled. 503 * 504 * When __GFP_OTHER_NODE is set assume the node of the preferred 505 * zone is the local node. This is useful for daemons who allocate 506 * memory on behalf of other processes. 507 */ 508 void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags) 509 { 510 if (z->zone_pgdat == preferred_zone->zone_pgdat) { 511 __inc_zone_state(z, NUMA_HIT); 512 } else { 513 __inc_zone_state(z, NUMA_MISS); 514 __inc_zone_state(preferred_zone, NUMA_FOREIGN); 515 } 516 if (z->node == ((flags & __GFP_OTHER_NODE) ? 517 preferred_zone->node : numa_node_id())) 518 __inc_zone_state(z, NUMA_LOCAL); 519 else 520 __inc_zone_state(z, NUMA_OTHER); 521 } 522 #endif 523 524 #ifdef CONFIG_COMPACTION 525 526 struct contig_page_info { 527 unsigned long free_pages; 528 unsigned long free_blocks_total; 529 unsigned long free_blocks_suitable; 530 }; 531 532 /* 533 * Calculate the number of free pages in a zone, how many contiguous 534 * pages are free and how many are large enough to satisfy an allocation of 535 * the target size. Note that this function makes no attempt to estimate 536 * how many suitable free blocks there *might* be if MOVABLE pages were 537 * migrated. Calculating that is possible, but expensive and can be 538 * figured out from userspace 539 */ 540 static void fill_contig_page_info(struct zone *zone, 541 unsigned int suitable_order, 542 struct contig_page_info *info) 543 { 544 unsigned int order; 545 546 info->free_pages = 0; 547 info->free_blocks_total = 0; 548 info->free_blocks_suitable = 0; 549 550 for (order = 0; order < MAX_ORDER; order++) { 551 unsigned long blocks; 552 553 /* Count number of free blocks */ 554 blocks = zone->free_area[order].nr_free; 555 info->free_blocks_total += blocks; 556 557 /* Count free base pages */ 558 info->free_pages += blocks << order; 559 560 /* Count the suitable free blocks */ 561 if (order >= suitable_order) 562 info->free_blocks_suitable += blocks << 563 (order - suitable_order); 564 } 565 } 566 567 /* 568 * A fragmentation index only makes sense if an allocation of a requested 569 * size would fail. If that is true, the fragmentation index indicates 570 * whether external fragmentation or a lack of memory was the problem. 571 * The value can be used to determine if page reclaim or compaction 572 * should be used 573 */ 574 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) 575 { 576 unsigned long requested = 1UL << order; 577 578 if (!info->free_blocks_total) 579 return 0; 580 581 /* Fragmentation index only makes sense when a request would fail */ 582 if (info->free_blocks_suitable) 583 return -1000; 584 585 /* 586 * Index is between 0 and 1 so return within 3 decimal places 587 * 588 * 0 => allocation would fail due to lack of memory 589 * 1 => allocation would fail due to fragmentation 590 */ 591 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); 592 } 593 594 /* Same as __fragmentation index but allocs contig_page_info on stack */ 595 int fragmentation_index(struct zone *zone, unsigned int order) 596 { 597 struct contig_page_info info; 598 599 fill_contig_page_info(zone, order, &info); 600 return __fragmentation_index(order, &info); 601 } 602 #endif 603 604 #if defined(CONFIG_PROC_FS) || defined(CONFIG_COMPACTION) 605 #include <linux/proc_fs.h> 606 #include <linux/seq_file.h> 607 608 static char * const migratetype_names[MIGRATE_TYPES] = { 609 "Unmovable", 610 "Reclaimable", 611 "Movable", 612 "Reserve", 613 "Isolate", 614 }; 615 616 static void *frag_start(struct seq_file *m, loff_t *pos) 617 { 618 pg_data_t *pgdat; 619 loff_t node = *pos; 620 for (pgdat = first_online_pgdat(); 621 pgdat && node; 622 pgdat = next_online_pgdat(pgdat)) 623 --node; 624 625 return pgdat; 626 } 627 628 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 629 { 630 pg_data_t *pgdat = (pg_data_t *)arg; 631 632 (*pos)++; 633 return next_online_pgdat(pgdat); 634 } 635 636 static void frag_stop(struct seq_file *m, void *arg) 637 { 638 } 639 640 /* Walk all the zones in a node and print using a callback */ 641 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, 642 void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) 643 { 644 struct zone *zone; 645 struct zone *node_zones = pgdat->node_zones; 646 unsigned long flags; 647 648 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 649 if (!populated_zone(zone)) 650 continue; 651 652 spin_lock_irqsave(&zone->lock, flags); 653 print(m, pgdat, zone); 654 spin_unlock_irqrestore(&zone->lock, flags); 655 } 656 } 657 #endif 658 659 #ifdef CONFIG_PROC_FS 660 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, 661 struct zone *zone) 662 { 663 int order; 664 665 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 666 for (order = 0; order < MAX_ORDER; ++order) 667 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); 668 seq_putc(m, '\n'); 669 } 670 671 /* 672 * This walks the free areas for each zone. 673 */ 674 static int frag_show(struct seq_file *m, void *arg) 675 { 676 pg_data_t *pgdat = (pg_data_t *)arg; 677 walk_zones_in_node(m, pgdat, frag_show_print); 678 return 0; 679 } 680 681 static void pagetypeinfo_showfree_print(struct seq_file *m, 682 pg_data_t *pgdat, struct zone *zone) 683 { 684 int order, mtype; 685 686 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { 687 seq_printf(m, "Node %4d, zone %8s, type %12s ", 688 pgdat->node_id, 689 zone->name, 690 migratetype_names[mtype]); 691 for (order = 0; order < MAX_ORDER; ++order) { 692 unsigned long freecount = 0; 693 struct free_area *area; 694 struct list_head *curr; 695 696 area = &(zone->free_area[order]); 697 698 list_for_each(curr, &area->free_list[mtype]) 699 freecount++; 700 seq_printf(m, "%6lu ", freecount); 701 } 702 seq_putc(m, '\n'); 703 } 704 } 705 706 /* Print out the free pages at each order for each migatetype */ 707 static int pagetypeinfo_showfree(struct seq_file *m, void *arg) 708 { 709 int order; 710 pg_data_t *pgdat = (pg_data_t *)arg; 711 712 /* Print header */ 713 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); 714 for (order = 0; order < MAX_ORDER; ++order) 715 seq_printf(m, "%6d ", order); 716 seq_putc(m, '\n'); 717 718 walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print); 719 720 return 0; 721 } 722 723 static void pagetypeinfo_showblockcount_print(struct seq_file *m, 724 pg_data_t *pgdat, struct zone *zone) 725 { 726 int mtype; 727 unsigned long pfn; 728 unsigned long start_pfn = zone->zone_start_pfn; 729 unsigned long end_pfn = start_pfn + zone->spanned_pages; 730 unsigned long count[MIGRATE_TYPES] = { 0, }; 731 732 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 733 struct page *page; 734 735 if (!pfn_valid(pfn)) 736 continue; 737 738 page = pfn_to_page(pfn); 739 740 /* Watch for unexpected holes punched in the memmap */ 741 if (!memmap_valid_within(pfn, page, zone)) 742 continue; 743 744 mtype = get_pageblock_migratetype(page); 745 746 if (mtype < MIGRATE_TYPES) 747 count[mtype]++; 748 } 749 750 /* Print counts */ 751 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 752 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 753 seq_printf(m, "%12lu ", count[mtype]); 754 seq_putc(m, '\n'); 755 } 756 757 /* Print out the free pages at each order for each migratetype */ 758 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) 759 { 760 int mtype; 761 pg_data_t *pgdat = (pg_data_t *)arg; 762 763 seq_printf(m, "\n%-23s", "Number of blocks type "); 764 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 765 seq_printf(m, "%12s ", migratetype_names[mtype]); 766 seq_putc(m, '\n'); 767 walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print); 768 769 return 0; 770 } 771 772 /* 773 * This prints out statistics in relation to grouping pages by mobility. 774 * It is expensive to collect so do not constantly read the file. 775 */ 776 static int pagetypeinfo_show(struct seq_file *m, void *arg) 777 { 778 pg_data_t *pgdat = (pg_data_t *)arg; 779 780 /* check memoryless node */ 781 if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) 782 return 0; 783 784 seq_printf(m, "Page block order: %d\n", pageblock_order); 785 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); 786 seq_putc(m, '\n'); 787 pagetypeinfo_showfree(m, pgdat); 788 pagetypeinfo_showblockcount(m, pgdat); 789 790 return 0; 791 } 792 793 static const struct seq_operations fragmentation_op = { 794 .start = frag_start, 795 .next = frag_next, 796 .stop = frag_stop, 797 .show = frag_show, 798 }; 799 800 static int fragmentation_open(struct inode *inode, struct file *file) 801 { 802 return seq_open(file, &fragmentation_op); 803 } 804 805 static const struct file_operations fragmentation_file_operations = { 806 .open = fragmentation_open, 807 .read = seq_read, 808 .llseek = seq_lseek, 809 .release = seq_release, 810 }; 811 812 static const struct seq_operations pagetypeinfo_op = { 813 .start = frag_start, 814 .next = frag_next, 815 .stop = frag_stop, 816 .show = pagetypeinfo_show, 817 }; 818 819 static int pagetypeinfo_open(struct inode *inode, struct file *file) 820 { 821 return seq_open(file, &pagetypeinfo_op); 822 } 823 824 static const struct file_operations pagetypeinfo_file_ops = { 825 .open = pagetypeinfo_open, 826 .read = seq_read, 827 .llseek = seq_lseek, 828 .release = seq_release, 829 }; 830 831 #ifdef CONFIG_ZONE_DMA 832 #define TEXT_FOR_DMA(xx) xx "_dma", 833 #else 834 #define TEXT_FOR_DMA(xx) 835 #endif 836 837 #ifdef CONFIG_ZONE_DMA32 838 #define TEXT_FOR_DMA32(xx) xx "_dma32", 839 #else 840 #define TEXT_FOR_DMA32(xx) 841 #endif 842 843 #ifdef CONFIG_HIGHMEM 844 #define TEXT_FOR_HIGHMEM(xx) xx "_high", 845 #else 846 #define TEXT_FOR_HIGHMEM(xx) 847 #endif 848 849 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ 850 TEXT_FOR_HIGHMEM(xx) xx "_movable", 851 852 static const char * const vmstat_text[] = { 853 /* Zoned VM counters */ 854 "nr_free_pages", 855 "nr_inactive_anon", 856 "nr_active_anon", 857 "nr_inactive_file", 858 "nr_active_file", 859 "nr_unevictable", 860 "nr_mlock", 861 "nr_anon_pages", 862 "nr_mapped", 863 "nr_file_pages", 864 "nr_dirty", 865 "nr_writeback", 866 "nr_slab_reclaimable", 867 "nr_slab_unreclaimable", 868 "nr_page_table_pages", 869 "nr_kernel_stack", 870 "nr_unstable", 871 "nr_bounce", 872 "nr_vmscan_write", 873 "nr_writeback_temp", 874 "nr_isolated_anon", 875 "nr_isolated_file", 876 "nr_shmem", 877 "nr_dirtied", 878 "nr_written", 879 880 #ifdef CONFIG_NUMA 881 "numa_hit", 882 "numa_miss", 883 "numa_foreign", 884 "numa_interleave", 885 "numa_local", 886 "numa_other", 887 #endif 888 "nr_anon_transparent_hugepages", 889 "nr_dirty_threshold", 890 "nr_dirty_background_threshold", 891 892 #ifdef CONFIG_VM_EVENT_COUNTERS 893 "pgpgin", 894 "pgpgout", 895 "pswpin", 896 "pswpout", 897 898 TEXTS_FOR_ZONES("pgalloc") 899 900 "pgfree", 901 "pgactivate", 902 "pgdeactivate", 903 904 "pgfault", 905 "pgmajfault", 906 907 TEXTS_FOR_ZONES("pgrefill") 908 TEXTS_FOR_ZONES("pgsteal") 909 TEXTS_FOR_ZONES("pgscan_kswapd") 910 TEXTS_FOR_ZONES("pgscan_direct") 911 912 #ifdef CONFIG_NUMA 913 "zone_reclaim_failed", 914 #endif 915 "pginodesteal", 916 "slabs_scanned", 917 "kswapd_steal", 918 "kswapd_inodesteal", 919 "kswapd_low_wmark_hit_quickly", 920 "kswapd_high_wmark_hit_quickly", 921 "kswapd_skip_congestion_wait", 922 "pageoutrun", 923 "allocstall", 924 925 "pgrotated", 926 927 #ifdef CONFIG_COMPACTION 928 "compact_blocks_moved", 929 "compact_pages_moved", 930 "compact_pagemigrate_failed", 931 "compact_stall", 932 "compact_fail", 933 "compact_success", 934 #endif 935 936 #ifdef CONFIG_HUGETLB_PAGE 937 "htlb_buddy_alloc_success", 938 "htlb_buddy_alloc_fail", 939 #endif 940 "unevictable_pgs_culled", 941 "unevictable_pgs_scanned", 942 "unevictable_pgs_rescued", 943 "unevictable_pgs_mlocked", 944 "unevictable_pgs_munlocked", 945 "unevictable_pgs_cleared", 946 "unevictable_pgs_stranded", 947 "unevictable_pgs_mlockfreed", 948 #endif 949 }; 950 951 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 952 struct zone *zone) 953 { 954 int i; 955 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 956 seq_printf(m, 957 "\n pages free %lu" 958 "\n min %lu" 959 "\n low %lu" 960 "\n high %lu" 961 "\n scanned %lu" 962 "\n spanned %lu" 963 "\n present %lu", 964 zone_page_state(zone, NR_FREE_PAGES), 965 min_wmark_pages(zone), 966 low_wmark_pages(zone), 967 high_wmark_pages(zone), 968 zone->pages_scanned, 969 zone->spanned_pages, 970 zone->present_pages); 971 972 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 973 seq_printf(m, "\n %-12s %lu", vmstat_text[i], 974 zone_page_state(zone, i)); 975 976 seq_printf(m, 977 "\n protection: (%lu", 978 zone->lowmem_reserve[0]); 979 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 980 seq_printf(m, ", %lu", zone->lowmem_reserve[i]); 981 seq_printf(m, 982 ")" 983 "\n pagesets"); 984 for_each_online_cpu(i) { 985 struct per_cpu_pageset *pageset; 986 987 pageset = per_cpu_ptr(zone->pageset, i); 988 seq_printf(m, 989 "\n cpu: %i" 990 "\n count: %i" 991 "\n high: %i" 992 "\n batch: %i", 993 i, 994 pageset->pcp.count, 995 pageset->pcp.high, 996 pageset->pcp.batch); 997 #ifdef CONFIG_SMP 998 seq_printf(m, "\n vm stats threshold: %d", 999 pageset->stat_threshold); 1000 #endif 1001 } 1002 seq_printf(m, 1003 "\n all_unreclaimable: %u" 1004 "\n start_pfn: %lu" 1005 "\n inactive_ratio: %u", 1006 zone->all_unreclaimable, 1007 zone->zone_start_pfn, 1008 zone->inactive_ratio); 1009 seq_putc(m, '\n'); 1010 } 1011 1012 /* 1013 * Output information about zones in @pgdat. 1014 */ 1015 static int zoneinfo_show(struct seq_file *m, void *arg) 1016 { 1017 pg_data_t *pgdat = (pg_data_t *)arg; 1018 walk_zones_in_node(m, pgdat, zoneinfo_show_print); 1019 return 0; 1020 } 1021 1022 static const struct seq_operations zoneinfo_op = { 1023 .start = frag_start, /* iterate over all zones. The same as in 1024 * fragmentation. */ 1025 .next = frag_next, 1026 .stop = frag_stop, 1027 .show = zoneinfo_show, 1028 }; 1029 1030 static int zoneinfo_open(struct inode *inode, struct file *file) 1031 { 1032 return seq_open(file, &zoneinfo_op); 1033 } 1034 1035 static const struct file_operations proc_zoneinfo_file_operations = { 1036 .open = zoneinfo_open, 1037 .read = seq_read, 1038 .llseek = seq_lseek, 1039 .release = seq_release, 1040 }; 1041 1042 enum writeback_stat_item { 1043 NR_DIRTY_THRESHOLD, 1044 NR_DIRTY_BG_THRESHOLD, 1045 NR_VM_WRITEBACK_STAT_ITEMS, 1046 }; 1047 1048 static void *vmstat_start(struct seq_file *m, loff_t *pos) 1049 { 1050 unsigned long *v; 1051 int i, stat_items_size; 1052 1053 if (*pos >= ARRAY_SIZE(vmstat_text)) 1054 return NULL; 1055 stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) + 1056 NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long); 1057 1058 #ifdef CONFIG_VM_EVENT_COUNTERS 1059 stat_items_size += sizeof(struct vm_event_state); 1060 #endif 1061 1062 v = kmalloc(stat_items_size, GFP_KERNEL); 1063 m->private = v; 1064 if (!v) 1065 return ERR_PTR(-ENOMEM); 1066 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 1067 v[i] = global_page_state(i); 1068 v += NR_VM_ZONE_STAT_ITEMS; 1069 1070 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD, 1071 v + NR_DIRTY_THRESHOLD); 1072 v += NR_VM_WRITEBACK_STAT_ITEMS; 1073 1074 #ifdef CONFIG_VM_EVENT_COUNTERS 1075 all_vm_events(v); 1076 v[PGPGIN] /= 2; /* sectors -> kbytes */ 1077 v[PGPGOUT] /= 2; 1078 #endif 1079 return (unsigned long *)m->private + *pos; 1080 } 1081 1082 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 1083 { 1084 (*pos)++; 1085 if (*pos >= ARRAY_SIZE(vmstat_text)) 1086 return NULL; 1087 return (unsigned long *)m->private + *pos; 1088 } 1089 1090 static int vmstat_show(struct seq_file *m, void *arg) 1091 { 1092 unsigned long *l = arg; 1093 unsigned long off = l - (unsigned long *)m->private; 1094 1095 seq_printf(m, "%s %lu\n", vmstat_text[off], *l); 1096 return 0; 1097 } 1098 1099 static void vmstat_stop(struct seq_file *m, void *arg) 1100 { 1101 kfree(m->private); 1102 m->private = NULL; 1103 } 1104 1105 static const struct seq_operations vmstat_op = { 1106 .start = vmstat_start, 1107 .next = vmstat_next, 1108 .stop = vmstat_stop, 1109 .show = vmstat_show, 1110 }; 1111 1112 static int vmstat_open(struct inode *inode, struct file *file) 1113 { 1114 return seq_open(file, &vmstat_op); 1115 } 1116 1117 static const struct file_operations proc_vmstat_file_operations = { 1118 .open = vmstat_open, 1119 .read = seq_read, 1120 .llseek = seq_lseek, 1121 .release = seq_release, 1122 }; 1123 #endif /* CONFIG_PROC_FS */ 1124 1125 #ifdef CONFIG_SMP 1126 static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1127 int sysctl_stat_interval __read_mostly = HZ; 1128 1129 static void vmstat_update(struct work_struct *w) 1130 { 1131 refresh_cpu_vm_stats(smp_processor_id()); 1132 schedule_delayed_work(&__get_cpu_var(vmstat_work), 1133 round_jiffies_relative(sysctl_stat_interval)); 1134 } 1135 1136 static void __cpuinit start_cpu_timer(int cpu) 1137 { 1138 struct delayed_work *work = &per_cpu(vmstat_work, cpu); 1139 1140 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); 1141 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); 1142 } 1143 1144 /* 1145 * Use the cpu notifier to insure that the thresholds are recalculated 1146 * when necessary. 1147 */ 1148 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, 1149 unsigned long action, 1150 void *hcpu) 1151 { 1152 long cpu = (long)hcpu; 1153 1154 switch (action) { 1155 case CPU_ONLINE: 1156 case CPU_ONLINE_FROZEN: 1157 refresh_zone_stat_thresholds(); 1158 start_cpu_timer(cpu); 1159 node_set_state(cpu_to_node(cpu), N_CPU); 1160 break; 1161 case CPU_DOWN_PREPARE: 1162 case CPU_DOWN_PREPARE_FROZEN: 1163 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); 1164 per_cpu(vmstat_work, cpu).work.func = NULL; 1165 break; 1166 case CPU_DOWN_FAILED: 1167 case CPU_DOWN_FAILED_FROZEN: 1168 start_cpu_timer(cpu); 1169 break; 1170 case CPU_DEAD: 1171 case CPU_DEAD_FROZEN: 1172 refresh_zone_stat_thresholds(); 1173 break; 1174 default: 1175 break; 1176 } 1177 return NOTIFY_OK; 1178 } 1179 1180 static struct notifier_block __cpuinitdata vmstat_notifier = 1181 { &vmstat_cpuup_callback, NULL, 0 }; 1182 #endif 1183 1184 static int __init setup_vmstat(void) 1185 { 1186 #ifdef CONFIG_SMP 1187 int cpu; 1188 1189 refresh_zone_stat_thresholds(); 1190 register_cpu_notifier(&vmstat_notifier); 1191 1192 for_each_online_cpu(cpu) 1193 start_cpu_timer(cpu); 1194 #endif 1195 #ifdef CONFIG_PROC_FS 1196 proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); 1197 proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); 1198 proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); 1199 proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); 1200 #endif 1201 return 0; 1202 } 1203 module_init(setup_vmstat) 1204 1205 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 1206 #include <linux/debugfs.h> 1207 1208 static struct dentry *extfrag_debug_root; 1209 1210 /* 1211 * Return an index indicating how much of the available free memory is 1212 * unusable for an allocation of the requested size. 1213 */ 1214 static int unusable_free_index(unsigned int order, 1215 struct contig_page_info *info) 1216 { 1217 /* No free memory is interpreted as all free memory is unusable */ 1218 if (info->free_pages == 0) 1219 return 1000; 1220 1221 /* 1222 * Index should be a value between 0 and 1. Return a value to 3 1223 * decimal places. 1224 * 1225 * 0 => no fragmentation 1226 * 1 => high fragmentation 1227 */ 1228 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages); 1229 1230 } 1231 1232 static void unusable_show_print(struct seq_file *m, 1233 pg_data_t *pgdat, struct zone *zone) 1234 { 1235 unsigned int order; 1236 int index; 1237 struct contig_page_info info; 1238 1239 seq_printf(m, "Node %d, zone %8s ", 1240 pgdat->node_id, 1241 zone->name); 1242 for (order = 0; order < MAX_ORDER; ++order) { 1243 fill_contig_page_info(zone, order, &info); 1244 index = unusable_free_index(order, &info); 1245 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); 1246 } 1247 1248 seq_putc(m, '\n'); 1249 } 1250 1251 /* 1252 * Display unusable free space index 1253 * 1254 * The unusable free space index measures how much of the available free 1255 * memory cannot be used to satisfy an allocation of a given size and is a 1256 * value between 0 and 1. The higher the value, the more of free memory is 1257 * unusable and by implication, the worse the external fragmentation is. This 1258 * can be expressed as a percentage by multiplying by 100. 1259 */ 1260 static int unusable_show(struct seq_file *m, void *arg) 1261 { 1262 pg_data_t *pgdat = (pg_data_t *)arg; 1263 1264 /* check memoryless node */ 1265 if (!node_state(pgdat->node_id, N_HIGH_MEMORY)) 1266 return 0; 1267 1268 walk_zones_in_node(m, pgdat, unusable_show_print); 1269 1270 return 0; 1271 } 1272 1273 static const struct seq_operations unusable_op = { 1274 .start = frag_start, 1275 .next = frag_next, 1276 .stop = frag_stop, 1277 .show = unusable_show, 1278 }; 1279 1280 static int unusable_open(struct inode *inode, struct file *file) 1281 { 1282 return seq_open(file, &unusable_op); 1283 } 1284 1285 static const struct file_operations unusable_file_ops = { 1286 .open = unusable_open, 1287 .read = seq_read, 1288 .llseek = seq_lseek, 1289 .release = seq_release, 1290 }; 1291 1292 static void extfrag_show_print(struct seq_file *m, 1293 pg_data_t *pgdat, struct zone *zone) 1294 { 1295 unsigned int order; 1296 int index; 1297 1298 /* Alloc on stack as interrupts are disabled for zone walk */ 1299 struct contig_page_info info; 1300 1301 seq_printf(m, "Node %d, zone %8s ", 1302 pgdat->node_id, 1303 zone->name); 1304 for (order = 0; order < MAX_ORDER; ++order) { 1305 fill_contig_page_info(zone, order, &info); 1306 index = __fragmentation_index(order, &info); 1307 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); 1308 } 1309 1310 seq_putc(m, '\n'); 1311 } 1312 1313 /* 1314 * Display fragmentation index for orders that allocations would fail for 1315 */ 1316 static int extfrag_show(struct seq_file *m, void *arg) 1317 { 1318 pg_data_t *pgdat = (pg_data_t *)arg; 1319 1320 walk_zones_in_node(m, pgdat, extfrag_show_print); 1321 1322 return 0; 1323 } 1324 1325 static const struct seq_operations extfrag_op = { 1326 .start = frag_start, 1327 .next = frag_next, 1328 .stop = frag_stop, 1329 .show = extfrag_show, 1330 }; 1331 1332 static int extfrag_open(struct inode *inode, struct file *file) 1333 { 1334 return seq_open(file, &extfrag_op); 1335 } 1336 1337 static const struct file_operations extfrag_file_ops = { 1338 .open = extfrag_open, 1339 .read = seq_read, 1340 .llseek = seq_lseek, 1341 .release = seq_release, 1342 }; 1343 1344 static int __init extfrag_debug_init(void) 1345 { 1346 extfrag_debug_root = debugfs_create_dir("extfrag", NULL); 1347 if (!extfrag_debug_root) 1348 return -ENOMEM; 1349 1350 if (!debugfs_create_file("unusable_index", 0444, 1351 extfrag_debug_root, NULL, &unusable_file_ops)) 1352 return -ENOMEM; 1353 1354 if (!debugfs_create_file("extfrag_index", 0444, 1355 extfrag_debug_root, NULL, &extfrag_file_ops)) 1356 return -ENOMEM; 1357 1358 return 0; 1359 } 1360 1361 module_init(extfrag_debug_init); 1362 #endif 1363