1 /* 2 * linux/mm/vmstat.c 3 * 4 * Manages VM statistics 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * 7 * zoned VM statistics 8 * Copyright (C) 2006 Silicon Graphics, Inc., 9 * Christoph Lameter <christoph@lameter.com> 10 * Copyright (C) 2008-2014 Christoph Lameter 11 */ 12 #include <linux/fs.h> 13 #include <linux/mm.h> 14 #include <linux/err.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/cpu.h> 18 #include <linux/cpumask.h> 19 #include <linux/vmstat.h> 20 #include <linux/proc_fs.h> 21 #include <linux/seq_file.h> 22 #include <linux/debugfs.h> 23 #include <linux/sched.h> 24 #include <linux/math64.h> 25 #include <linux/writeback.h> 26 #include <linux/compaction.h> 27 #include <linux/mm_inline.h> 28 #include <linux/page_ext.h> 29 #include <linux/page_owner.h> 30 31 #include "internal.h" 32 33 #ifdef CONFIG_VM_EVENT_COUNTERS 34 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; 35 EXPORT_PER_CPU_SYMBOL(vm_event_states); 36 37 static void sum_vm_events(unsigned long *ret) 38 { 39 int cpu; 40 int i; 41 42 memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long)); 43 44 for_each_online_cpu(cpu) { 45 struct vm_event_state *this = &per_cpu(vm_event_states, cpu); 46 47 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) 48 ret[i] += this->event[i]; 49 } 50 } 51 52 /* 53 * Accumulate the vm event counters across all CPUs. 54 * The result is unavoidably approximate - it can change 55 * during and after execution of this function. 56 */ 57 void all_vm_events(unsigned long *ret) 58 { 59 get_online_cpus(); 60 sum_vm_events(ret); 61 put_online_cpus(); 62 } 63 EXPORT_SYMBOL_GPL(all_vm_events); 64 65 /* 66 * Fold the foreign cpu events into our own. 67 * 68 * This is adding to the events on one processor 69 * but keeps the global counts constant. 70 */ 71 void vm_events_fold_cpu(int cpu) 72 { 73 struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu); 74 int i; 75 76 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) { 77 count_vm_events(i, fold_state->event[i]); 78 fold_state->event[i] = 0; 79 } 80 } 81 82 #endif /* CONFIG_VM_EVENT_COUNTERS */ 83 84 /* 85 * Manage combined zone based / global counters 86 * 87 * vm_stat contains the global counters 88 */ 89 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp; 90 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp; 91 EXPORT_SYMBOL(vm_zone_stat); 92 EXPORT_SYMBOL(vm_node_stat); 93 94 #ifdef CONFIG_SMP 95 96 int calculate_pressure_threshold(struct zone *zone) 97 { 98 int threshold; 99 int watermark_distance; 100 101 /* 102 * As vmstats are not up to date, there is drift between the estimated 103 * and real values. For high thresholds and a high number of CPUs, it 104 * is possible for the min watermark to be breached while the estimated 105 * value looks fine. The pressure threshold is a reduced value such 106 * that even the maximum amount of drift will not accidentally breach 107 * the min watermark 108 */ 109 watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone); 110 threshold = max(1, (int)(watermark_distance / num_online_cpus())); 111 112 /* 113 * Maximum threshold is 125 114 */ 115 threshold = min(125, threshold); 116 117 return threshold; 118 } 119 120 int calculate_normal_threshold(struct zone *zone) 121 { 122 int threshold; 123 int mem; /* memory in 128 MB units */ 124 125 /* 126 * The threshold scales with the number of processors and the amount 127 * of memory per zone. More memory means that we can defer updates for 128 * longer, more processors could lead to more contention. 129 * fls() is used to have a cheap way of logarithmic scaling. 130 * 131 * Some sample thresholds: 132 * 133 * Threshold Processors (fls) Zonesize fls(mem+1) 134 * ------------------------------------------------------------------ 135 * 8 1 1 0.9-1 GB 4 136 * 16 2 2 0.9-1 GB 4 137 * 20 2 2 1-2 GB 5 138 * 24 2 2 2-4 GB 6 139 * 28 2 2 4-8 GB 7 140 * 32 2 2 8-16 GB 8 141 * 4 2 2 <128M 1 142 * 30 4 3 2-4 GB 5 143 * 48 4 3 8-16 GB 8 144 * 32 8 4 1-2 GB 4 145 * 32 8 4 0.9-1GB 4 146 * 10 16 5 <128M 1 147 * 40 16 5 900M 4 148 * 70 64 7 2-4 GB 5 149 * 84 64 7 4-8 GB 6 150 * 108 512 9 4-8 GB 6 151 * 125 1024 10 8-16 GB 8 152 * 125 1024 10 16-32 GB 9 153 */ 154 155 mem = zone->managed_pages >> (27 - PAGE_SHIFT); 156 157 threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem)); 158 159 /* 160 * Maximum threshold is 125 161 */ 162 threshold = min(125, threshold); 163 164 return threshold; 165 } 166 167 /* 168 * Refresh the thresholds for each zone. 169 */ 170 void refresh_zone_stat_thresholds(void) 171 { 172 struct pglist_data *pgdat; 173 struct zone *zone; 174 int cpu; 175 int threshold; 176 177 /* Zero current pgdat thresholds */ 178 for_each_online_pgdat(pgdat) { 179 for_each_online_cpu(cpu) { 180 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; 181 } 182 } 183 184 for_each_populated_zone(zone) { 185 struct pglist_data *pgdat = zone->zone_pgdat; 186 unsigned long max_drift, tolerate_drift; 187 188 threshold = calculate_normal_threshold(zone); 189 190 for_each_online_cpu(cpu) { 191 int pgdat_threshold; 192 193 per_cpu_ptr(zone->pageset, cpu)->stat_threshold 194 = threshold; 195 196 /* Base nodestat threshold on the largest populated zone. */ 197 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; 198 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold 199 = max(threshold, pgdat_threshold); 200 } 201 202 /* 203 * Only set percpu_drift_mark if there is a danger that 204 * NR_FREE_PAGES reports the low watermark is ok when in fact 205 * the min watermark could be breached by an allocation 206 */ 207 tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone); 208 max_drift = num_online_cpus() * threshold; 209 if (max_drift > tolerate_drift) 210 zone->percpu_drift_mark = high_wmark_pages(zone) + 211 max_drift; 212 } 213 } 214 215 void set_pgdat_percpu_threshold(pg_data_t *pgdat, 216 int (*calculate_pressure)(struct zone *)) 217 { 218 struct zone *zone; 219 int cpu; 220 int threshold; 221 int i; 222 223 for (i = 0; i < pgdat->nr_zones; i++) { 224 zone = &pgdat->node_zones[i]; 225 if (!zone->percpu_drift_mark) 226 continue; 227 228 threshold = (*calculate_pressure)(zone); 229 for_each_online_cpu(cpu) 230 per_cpu_ptr(zone->pageset, cpu)->stat_threshold 231 = threshold; 232 } 233 } 234 235 /* 236 * For use when we know that interrupts are disabled, 237 * or when we know that preemption is disabled and that 238 * particular counter cannot be updated from interrupt context. 239 */ 240 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 241 long delta) 242 { 243 struct per_cpu_pageset __percpu *pcp = zone->pageset; 244 s8 __percpu *p = pcp->vm_stat_diff + item; 245 long x; 246 long t; 247 248 x = delta + __this_cpu_read(*p); 249 250 t = __this_cpu_read(pcp->stat_threshold); 251 252 if (unlikely(x > t || x < -t)) { 253 zone_page_state_add(x, zone, item); 254 x = 0; 255 } 256 __this_cpu_write(*p, x); 257 } 258 EXPORT_SYMBOL(__mod_zone_page_state); 259 260 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, 261 long delta) 262 { 263 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 264 s8 __percpu *p = pcp->vm_node_stat_diff + item; 265 long x; 266 long t; 267 268 x = delta + __this_cpu_read(*p); 269 270 t = __this_cpu_read(pcp->stat_threshold); 271 272 if (unlikely(x > t || x < -t)) { 273 node_page_state_add(x, pgdat, item); 274 x = 0; 275 } 276 __this_cpu_write(*p, x); 277 } 278 EXPORT_SYMBOL(__mod_node_page_state); 279 280 /* 281 * Optimized increment and decrement functions. 282 * 283 * These are only for a single page and therefore can take a struct page * 284 * argument instead of struct zone *. This allows the inclusion of the code 285 * generated for page_zone(page) into the optimized functions. 286 * 287 * No overflow check is necessary and therefore the differential can be 288 * incremented or decremented in place which may allow the compilers to 289 * generate better code. 290 * The increment or decrement is known and therefore one boundary check can 291 * be omitted. 292 * 293 * NOTE: These functions are very performance sensitive. Change only 294 * with care. 295 * 296 * Some processors have inc/dec instructions that are atomic vs an interrupt. 297 * However, the code must first determine the differential location in a zone 298 * based on the processor number and then inc/dec the counter. There is no 299 * guarantee without disabling preemption that the processor will not change 300 * in between and therefore the atomicity vs. interrupt cannot be exploited 301 * in a useful way here. 302 */ 303 void __inc_zone_state(struct zone *zone, enum zone_stat_item item) 304 { 305 struct per_cpu_pageset __percpu *pcp = zone->pageset; 306 s8 __percpu *p = pcp->vm_stat_diff + item; 307 s8 v, t; 308 309 v = __this_cpu_inc_return(*p); 310 t = __this_cpu_read(pcp->stat_threshold); 311 if (unlikely(v > t)) { 312 s8 overstep = t >> 1; 313 314 zone_page_state_add(v + overstep, zone, item); 315 __this_cpu_write(*p, -overstep); 316 } 317 } 318 319 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 320 { 321 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 322 s8 __percpu *p = pcp->vm_node_stat_diff + item; 323 s8 v, t; 324 325 v = __this_cpu_inc_return(*p); 326 t = __this_cpu_read(pcp->stat_threshold); 327 if (unlikely(v > t)) { 328 s8 overstep = t >> 1; 329 330 node_page_state_add(v + overstep, pgdat, item); 331 __this_cpu_write(*p, -overstep); 332 } 333 } 334 335 void __inc_zone_page_state(struct page *page, enum zone_stat_item item) 336 { 337 __inc_zone_state(page_zone(page), item); 338 } 339 EXPORT_SYMBOL(__inc_zone_page_state); 340 341 void __inc_node_page_state(struct page *page, enum node_stat_item item) 342 { 343 __inc_node_state(page_pgdat(page), item); 344 } 345 EXPORT_SYMBOL(__inc_node_page_state); 346 347 void __dec_zone_state(struct zone *zone, enum zone_stat_item item) 348 { 349 struct per_cpu_pageset __percpu *pcp = zone->pageset; 350 s8 __percpu *p = pcp->vm_stat_diff + item; 351 s8 v, t; 352 353 v = __this_cpu_dec_return(*p); 354 t = __this_cpu_read(pcp->stat_threshold); 355 if (unlikely(v < - t)) { 356 s8 overstep = t >> 1; 357 358 zone_page_state_add(v - overstep, zone, item); 359 __this_cpu_write(*p, overstep); 360 } 361 } 362 363 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) 364 { 365 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 366 s8 __percpu *p = pcp->vm_node_stat_diff + item; 367 s8 v, t; 368 369 v = __this_cpu_dec_return(*p); 370 t = __this_cpu_read(pcp->stat_threshold); 371 if (unlikely(v < - t)) { 372 s8 overstep = t >> 1; 373 374 node_page_state_add(v - overstep, pgdat, item); 375 __this_cpu_write(*p, overstep); 376 } 377 } 378 379 void __dec_zone_page_state(struct page *page, enum zone_stat_item item) 380 { 381 __dec_zone_state(page_zone(page), item); 382 } 383 EXPORT_SYMBOL(__dec_zone_page_state); 384 385 void __dec_node_page_state(struct page *page, enum node_stat_item item) 386 { 387 __dec_node_state(page_pgdat(page), item); 388 } 389 EXPORT_SYMBOL(__dec_node_page_state); 390 391 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL 392 /* 393 * If we have cmpxchg_local support then we do not need to incur the overhead 394 * that comes with local_irq_save/restore if we use this_cpu_cmpxchg. 395 * 396 * mod_state() modifies the zone counter state through atomic per cpu 397 * operations. 398 * 399 * Overstep mode specifies how overstep should handled: 400 * 0 No overstepping 401 * 1 Overstepping half of threshold 402 * -1 Overstepping minus half of threshold 403 */ 404 static inline void mod_zone_state(struct zone *zone, 405 enum zone_stat_item item, long delta, int overstep_mode) 406 { 407 struct per_cpu_pageset __percpu *pcp = zone->pageset; 408 s8 __percpu *p = pcp->vm_stat_diff + item; 409 long o, n, t, z; 410 411 do { 412 z = 0; /* overflow to zone counters */ 413 414 /* 415 * The fetching of the stat_threshold is racy. We may apply 416 * a counter threshold to the wrong the cpu if we get 417 * rescheduled while executing here. However, the next 418 * counter update will apply the threshold again and 419 * therefore bring the counter under the threshold again. 420 * 421 * Most of the time the thresholds are the same anyways 422 * for all cpus in a zone. 423 */ 424 t = this_cpu_read(pcp->stat_threshold); 425 426 o = this_cpu_read(*p); 427 n = delta + o; 428 429 if (n > t || n < -t) { 430 int os = overstep_mode * (t >> 1) ; 431 432 /* Overflow must be added to zone counters */ 433 z = n + os; 434 n = -os; 435 } 436 } while (this_cpu_cmpxchg(*p, o, n) != o); 437 438 if (z) 439 zone_page_state_add(z, zone, item); 440 } 441 442 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 443 long delta) 444 { 445 mod_zone_state(zone, item, delta, 0); 446 } 447 EXPORT_SYMBOL(mod_zone_page_state); 448 449 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 450 { 451 mod_zone_state(page_zone(page), item, 1, 1); 452 } 453 EXPORT_SYMBOL(inc_zone_page_state); 454 455 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 456 { 457 mod_zone_state(page_zone(page), item, -1, -1); 458 } 459 EXPORT_SYMBOL(dec_zone_page_state); 460 461 static inline void mod_node_state(struct pglist_data *pgdat, 462 enum node_stat_item item, int delta, int overstep_mode) 463 { 464 struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats; 465 s8 __percpu *p = pcp->vm_node_stat_diff + item; 466 long o, n, t, z; 467 468 do { 469 z = 0; /* overflow to node counters */ 470 471 /* 472 * The fetching of the stat_threshold is racy. We may apply 473 * a counter threshold to the wrong the cpu if we get 474 * rescheduled while executing here. However, the next 475 * counter update will apply the threshold again and 476 * therefore bring the counter under the threshold again. 477 * 478 * Most of the time the thresholds are the same anyways 479 * for all cpus in a node. 480 */ 481 t = this_cpu_read(pcp->stat_threshold); 482 483 o = this_cpu_read(*p); 484 n = delta + o; 485 486 if (n > t || n < -t) { 487 int os = overstep_mode * (t >> 1) ; 488 489 /* Overflow must be added to node counters */ 490 z = n + os; 491 n = -os; 492 } 493 } while (this_cpu_cmpxchg(*p, o, n) != o); 494 495 if (z) 496 node_page_state_add(z, pgdat, item); 497 } 498 499 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, 500 long delta) 501 { 502 mod_node_state(pgdat, item, delta, 0); 503 } 504 EXPORT_SYMBOL(mod_node_page_state); 505 506 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 507 { 508 mod_node_state(pgdat, item, 1, 1); 509 } 510 511 void inc_node_page_state(struct page *page, enum node_stat_item item) 512 { 513 mod_node_state(page_pgdat(page), item, 1, 1); 514 } 515 EXPORT_SYMBOL(inc_node_page_state); 516 517 void dec_node_page_state(struct page *page, enum node_stat_item item) 518 { 519 mod_node_state(page_pgdat(page), item, -1, -1); 520 } 521 EXPORT_SYMBOL(dec_node_page_state); 522 #else 523 /* 524 * Use interrupt disable to serialize counter updates 525 */ 526 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item, 527 long delta) 528 { 529 unsigned long flags; 530 531 local_irq_save(flags); 532 __mod_zone_page_state(zone, item, delta); 533 local_irq_restore(flags); 534 } 535 EXPORT_SYMBOL(mod_zone_page_state); 536 537 void inc_zone_page_state(struct page *page, enum zone_stat_item item) 538 { 539 unsigned long flags; 540 struct zone *zone; 541 542 zone = page_zone(page); 543 local_irq_save(flags); 544 __inc_zone_state(zone, item); 545 local_irq_restore(flags); 546 } 547 EXPORT_SYMBOL(inc_zone_page_state); 548 549 void dec_zone_page_state(struct page *page, enum zone_stat_item item) 550 { 551 unsigned long flags; 552 553 local_irq_save(flags); 554 __dec_zone_page_state(page, item); 555 local_irq_restore(flags); 556 } 557 EXPORT_SYMBOL(dec_zone_page_state); 558 559 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) 560 { 561 unsigned long flags; 562 563 local_irq_save(flags); 564 __inc_node_state(pgdat, item); 565 local_irq_restore(flags); 566 } 567 EXPORT_SYMBOL(inc_node_state); 568 569 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, 570 long delta) 571 { 572 unsigned long flags; 573 574 local_irq_save(flags); 575 __mod_node_page_state(pgdat, item, delta); 576 local_irq_restore(flags); 577 } 578 EXPORT_SYMBOL(mod_node_page_state); 579 580 void inc_node_page_state(struct page *page, enum node_stat_item item) 581 { 582 unsigned long flags; 583 struct pglist_data *pgdat; 584 585 pgdat = page_pgdat(page); 586 local_irq_save(flags); 587 __inc_node_state(pgdat, item); 588 local_irq_restore(flags); 589 } 590 EXPORT_SYMBOL(inc_node_page_state); 591 592 void dec_node_page_state(struct page *page, enum node_stat_item item) 593 { 594 unsigned long flags; 595 596 local_irq_save(flags); 597 __dec_node_page_state(page, item); 598 local_irq_restore(flags); 599 } 600 EXPORT_SYMBOL(dec_node_page_state); 601 #endif 602 603 /* 604 * Fold a differential into the global counters. 605 * Returns the number of counters updated. 606 */ 607 static int fold_diff(int *zone_diff, int *node_diff) 608 { 609 int i; 610 int changes = 0; 611 612 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 613 if (zone_diff[i]) { 614 atomic_long_add(zone_diff[i], &vm_zone_stat[i]); 615 changes++; 616 } 617 618 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 619 if (node_diff[i]) { 620 atomic_long_add(node_diff[i], &vm_node_stat[i]); 621 changes++; 622 } 623 return changes; 624 } 625 626 /* 627 * Update the zone counters for the current cpu. 628 * 629 * Note that refresh_cpu_vm_stats strives to only access 630 * node local memory. The per cpu pagesets on remote zones are placed 631 * in the memory local to the processor using that pageset. So the 632 * loop over all zones will access a series of cachelines local to 633 * the processor. 634 * 635 * The call to zone_page_state_add updates the cachelines with the 636 * statistics in the remote zone struct as well as the global cachelines 637 * with the global counters. These could cause remote node cache line 638 * bouncing and will have to be only done when necessary. 639 * 640 * The function returns the number of global counters updated. 641 */ 642 static int refresh_cpu_vm_stats(bool do_pagesets) 643 { 644 struct pglist_data *pgdat; 645 struct zone *zone; 646 int i; 647 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 648 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; 649 int changes = 0; 650 651 for_each_populated_zone(zone) { 652 struct per_cpu_pageset __percpu *p = zone->pageset; 653 654 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 655 int v; 656 657 v = this_cpu_xchg(p->vm_stat_diff[i], 0); 658 if (v) { 659 660 atomic_long_add(v, &zone->vm_stat[i]); 661 global_zone_diff[i] += v; 662 #ifdef CONFIG_NUMA 663 /* 3 seconds idle till flush */ 664 __this_cpu_write(p->expire, 3); 665 #endif 666 } 667 } 668 #ifdef CONFIG_NUMA 669 if (do_pagesets) { 670 cond_resched(); 671 /* 672 * Deal with draining the remote pageset of this 673 * processor 674 * 675 * Check if there are pages remaining in this pageset 676 * if not then there is nothing to expire. 677 */ 678 if (!__this_cpu_read(p->expire) || 679 !__this_cpu_read(p->pcp.count)) 680 continue; 681 682 /* 683 * We never drain zones local to this processor. 684 */ 685 if (zone_to_nid(zone) == numa_node_id()) { 686 __this_cpu_write(p->expire, 0); 687 continue; 688 } 689 690 if (__this_cpu_dec_return(p->expire)) 691 continue; 692 693 if (__this_cpu_read(p->pcp.count)) { 694 drain_zone_pages(zone, this_cpu_ptr(&p->pcp)); 695 changes++; 696 } 697 } 698 #endif 699 } 700 701 for_each_online_pgdat(pgdat) { 702 struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats; 703 704 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 705 int v; 706 707 v = this_cpu_xchg(p->vm_node_stat_diff[i], 0); 708 if (v) { 709 atomic_long_add(v, &pgdat->vm_stat[i]); 710 global_node_diff[i] += v; 711 } 712 } 713 } 714 715 changes += fold_diff(global_zone_diff, global_node_diff); 716 return changes; 717 } 718 719 /* 720 * Fold the data for an offline cpu into the global array. 721 * There cannot be any access by the offline cpu and therefore 722 * synchronization is simplified. 723 */ 724 void cpu_vm_stats_fold(int cpu) 725 { 726 struct pglist_data *pgdat; 727 struct zone *zone; 728 int i; 729 int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, }; 730 int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, }; 731 732 for_each_populated_zone(zone) { 733 struct per_cpu_pageset *p; 734 735 p = per_cpu_ptr(zone->pageset, cpu); 736 737 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 738 if (p->vm_stat_diff[i]) { 739 int v; 740 741 v = p->vm_stat_diff[i]; 742 p->vm_stat_diff[i] = 0; 743 atomic_long_add(v, &zone->vm_stat[i]); 744 global_zone_diff[i] += v; 745 } 746 } 747 748 for_each_online_pgdat(pgdat) { 749 struct per_cpu_nodestat *p; 750 751 p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu); 752 753 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 754 if (p->vm_node_stat_diff[i]) { 755 int v; 756 757 v = p->vm_node_stat_diff[i]; 758 p->vm_node_stat_diff[i] = 0; 759 atomic_long_add(v, &pgdat->vm_stat[i]); 760 global_node_diff[i] += v; 761 } 762 } 763 764 fold_diff(global_zone_diff, global_node_diff); 765 } 766 767 /* 768 * this is only called if !populated_zone(zone), which implies no other users of 769 * pset->vm_stat_diff[] exsist. 770 */ 771 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset) 772 { 773 int i; 774 775 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 776 if (pset->vm_stat_diff[i]) { 777 int v = pset->vm_stat_diff[i]; 778 pset->vm_stat_diff[i] = 0; 779 atomic_long_add(v, &zone->vm_stat[i]); 780 atomic_long_add(v, &vm_zone_stat[i]); 781 } 782 } 783 #endif 784 785 #ifdef CONFIG_NUMA 786 /* 787 * Determine the per node value of a stat item. This function 788 * is called frequently in a NUMA machine, so try to be as 789 * frugal as possible. 790 */ 791 unsigned long sum_zone_node_page_state(int node, 792 enum zone_stat_item item) 793 { 794 struct zone *zones = NODE_DATA(node)->node_zones; 795 int i; 796 unsigned long count = 0; 797 798 for (i = 0; i < MAX_NR_ZONES; i++) 799 count += zone_page_state(zones + i, item); 800 801 return count; 802 } 803 804 /* 805 * Determine the per node value of a stat item. 806 */ 807 unsigned long node_page_state(struct pglist_data *pgdat, 808 enum node_stat_item item) 809 { 810 long x = atomic_long_read(&pgdat->vm_stat[item]); 811 #ifdef CONFIG_SMP 812 if (x < 0) 813 x = 0; 814 #endif 815 return x; 816 } 817 #endif 818 819 #ifdef CONFIG_COMPACTION 820 821 struct contig_page_info { 822 unsigned long free_pages; 823 unsigned long free_blocks_total; 824 unsigned long free_blocks_suitable; 825 }; 826 827 /* 828 * Calculate the number of free pages in a zone, how many contiguous 829 * pages are free and how many are large enough to satisfy an allocation of 830 * the target size. Note that this function makes no attempt to estimate 831 * how many suitable free blocks there *might* be if MOVABLE pages were 832 * migrated. Calculating that is possible, but expensive and can be 833 * figured out from userspace 834 */ 835 static void fill_contig_page_info(struct zone *zone, 836 unsigned int suitable_order, 837 struct contig_page_info *info) 838 { 839 unsigned int order; 840 841 info->free_pages = 0; 842 info->free_blocks_total = 0; 843 info->free_blocks_suitable = 0; 844 845 for (order = 0; order < MAX_ORDER; order++) { 846 unsigned long blocks; 847 848 /* Count number of free blocks */ 849 blocks = zone->free_area[order].nr_free; 850 info->free_blocks_total += blocks; 851 852 /* Count free base pages */ 853 info->free_pages += blocks << order; 854 855 /* Count the suitable free blocks */ 856 if (order >= suitable_order) 857 info->free_blocks_suitable += blocks << 858 (order - suitable_order); 859 } 860 } 861 862 /* 863 * A fragmentation index only makes sense if an allocation of a requested 864 * size would fail. If that is true, the fragmentation index indicates 865 * whether external fragmentation or a lack of memory was the problem. 866 * The value can be used to determine if page reclaim or compaction 867 * should be used 868 */ 869 static int __fragmentation_index(unsigned int order, struct contig_page_info *info) 870 { 871 unsigned long requested = 1UL << order; 872 873 if (!info->free_blocks_total) 874 return 0; 875 876 /* Fragmentation index only makes sense when a request would fail */ 877 if (info->free_blocks_suitable) 878 return -1000; 879 880 /* 881 * Index is between 0 and 1 so return within 3 decimal places 882 * 883 * 0 => allocation would fail due to lack of memory 884 * 1 => allocation would fail due to fragmentation 885 */ 886 return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total); 887 } 888 889 /* Same as __fragmentation index but allocs contig_page_info on stack */ 890 int fragmentation_index(struct zone *zone, unsigned int order) 891 { 892 struct contig_page_info info; 893 894 fill_contig_page_info(zone, order, &info); 895 return __fragmentation_index(order, &info); 896 } 897 #endif 898 899 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA) 900 #ifdef CONFIG_ZONE_DMA 901 #define TEXT_FOR_DMA(xx) xx "_dma", 902 #else 903 #define TEXT_FOR_DMA(xx) 904 #endif 905 906 #ifdef CONFIG_ZONE_DMA32 907 #define TEXT_FOR_DMA32(xx) xx "_dma32", 908 #else 909 #define TEXT_FOR_DMA32(xx) 910 #endif 911 912 #ifdef CONFIG_HIGHMEM 913 #define TEXT_FOR_HIGHMEM(xx) xx "_high", 914 #else 915 #define TEXT_FOR_HIGHMEM(xx) 916 #endif 917 918 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \ 919 TEXT_FOR_HIGHMEM(xx) xx "_movable", 920 921 const char * const vmstat_text[] = { 922 /* enum zone_stat_item countes */ 923 "nr_free_pages", 924 "nr_zone_inactive_anon", 925 "nr_zone_active_anon", 926 "nr_zone_inactive_file", 927 "nr_zone_active_file", 928 "nr_zone_unevictable", 929 "nr_zone_write_pending", 930 "nr_mlock", 931 "nr_page_table_pages", 932 "nr_kernel_stack", 933 "nr_bounce", 934 #if IS_ENABLED(CONFIG_ZSMALLOC) 935 "nr_zspages", 936 #endif 937 #ifdef CONFIG_NUMA 938 "numa_hit", 939 "numa_miss", 940 "numa_foreign", 941 "numa_interleave", 942 "numa_local", 943 "numa_other", 944 #endif 945 "nr_free_cma", 946 947 /* Node-based counters */ 948 "nr_inactive_anon", 949 "nr_active_anon", 950 "nr_inactive_file", 951 "nr_active_file", 952 "nr_unevictable", 953 "nr_slab_reclaimable", 954 "nr_slab_unreclaimable", 955 "nr_isolated_anon", 956 "nr_isolated_file", 957 "workingset_refault", 958 "workingset_activate", 959 "workingset_nodereclaim", 960 "nr_anon_pages", 961 "nr_mapped", 962 "nr_file_pages", 963 "nr_dirty", 964 "nr_writeback", 965 "nr_writeback_temp", 966 "nr_shmem", 967 "nr_shmem_hugepages", 968 "nr_shmem_pmdmapped", 969 "nr_anon_transparent_hugepages", 970 "nr_unstable", 971 "nr_vmscan_write", 972 "nr_vmscan_immediate_reclaim", 973 "nr_dirtied", 974 "nr_written", 975 976 /* enum writeback_stat_item counters */ 977 "nr_dirty_threshold", 978 "nr_dirty_background_threshold", 979 980 #ifdef CONFIG_VM_EVENT_COUNTERS 981 /* enum vm_event_item counters */ 982 "pgpgin", 983 "pgpgout", 984 "pswpin", 985 "pswpout", 986 987 TEXTS_FOR_ZONES("pgalloc") 988 TEXTS_FOR_ZONES("allocstall") 989 TEXTS_FOR_ZONES("pgskip") 990 991 "pgfree", 992 "pgactivate", 993 "pgdeactivate", 994 "pglazyfree", 995 996 "pgfault", 997 "pgmajfault", 998 "pglazyfreed", 999 1000 "pgrefill", 1001 "pgsteal_kswapd", 1002 "pgsteal_direct", 1003 "pgscan_kswapd", 1004 "pgscan_direct", 1005 "pgscan_direct_throttle", 1006 1007 #ifdef CONFIG_NUMA 1008 "zone_reclaim_failed", 1009 #endif 1010 "pginodesteal", 1011 "slabs_scanned", 1012 "kswapd_inodesteal", 1013 "kswapd_low_wmark_hit_quickly", 1014 "kswapd_high_wmark_hit_quickly", 1015 "pageoutrun", 1016 1017 "pgrotated", 1018 1019 "drop_pagecache", 1020 "drop_slab", 1021 "oom_kill", 1022 1023 #ifdef CONFIG_NUMA_BALANCING 1024 "numa_pte_updates", 1025 "numa_huge_pte_updates", 1026 "numa_hint_faults", 1027 "numa_hint_faults_local", 1028 "numa_pages_migrated", 1029 #endif 1030 #ifdef CONFIG_MIGRATION 1031 "pgmigrate_success", 1032 "pgmigrate_fail", 1033 #endif 1034 #ifdef CONFIG_COMPACTION 1035 "compact_migrate_scanned", 1036 "compact_free_scanned", 1037 "compact_isolated", 1038 "compact_stall", 1039 "compact_fail", 1040 "compact_success", 1041 "compact_daemon_wake", 1042 "compact_daemon_migrate_scanned", 1043 "compact_daemon_free_scanned", 1044 #endif 1045 1046 #ifdef CONFIG_HUGETLB_PAGE 1047 "htlb_buddy_alloc_success", 1048 "htlb_buddy_alloc_fail", 1049 #endif 1050 "unevictable_pgs_culled", 1051 "unevictable_pgs_scanned", 1052 "unevictable_pgs_rescued", 1053 "unevictable_pgs_mlocked", 1054 "unevictable_pgs_munlocked", 1055 "unevictable_pgs_cleared", 1056 "unevictable_pgs_stranded", 1057 1058 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1059 "thp_fault_alloc", 1060 "thp_fault_fallback", 1061 "thp_collapse_alloc", 1062 "thp_collapse_alloc_failed", 1063 "thp_file_alloc", 1064 "thp_file_mapped", 1065 "thp_split_page", 1066 "thp_split_page_failed", 1067 "thp_deferred_split_page", 1068 "thp_split_pmd", 1069 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 1070 "thp_split_pud", 1071 #endif 1072 "thp_zero_page_alloc", 1073 "thp_zero_page_alloc_failed", 1074 #endif 1075 #ifdef CONFIG_MEMORY_BALLOON 1076 "balloon_inflate", 1077 "balloon_deflate", 1078 #ifdef CONFIG_BALLOON_COMPACTION 1079 "balloon_migrate", 1080 #endif 1081 #endif /* CONFIG_MEMORY_BALLOON */ 1082 #ifdef CONFIG_DEBUG_TLBFLUSH 1083 #ifdef CONFIG_SMP 1084 "nr_tlb_remote_flush", 1085 "nr_tlb_remote_flush_received", 1086 #endif /* CONFIG_SMP */ 1087 "nr_tlb_local_flush_all", 1088 "nr_tlb_local_flush_one", 1089 #endif /* CONFIG_DEBUG_TLBFLUSH */ 1090 1091 #ifdef CONFIG_DEBUG_VM_VMACACHE 1092 "vmacache_find_calls", 1093 "vmacache_find_hits", 1094 "vmacache_full_flushes", 1095 #endif 1096 #endif /* CONFIG_VM_EVENTS_COUNTERS */ 1097 }; 1098 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */ 1099 1100 1101 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \ 1102 defined(CONFIG_PROC_FS) 1103 static void *frag_start(struct seq_file *m, loff_t *pos) 1104 { 1105 pg_data_t *pgdat; 1106 loff_t node = *pos; 1107 1108 for (pgdat = first_online_pgdat(); 1109 pgdat && node; 1110 pgdat = next_online_pgdat(pgdat)) 1111 --node; 1112 1113 return pgdat; 1114 } 1115 1116 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos) 1117 { 1118 pg_data_t *pgdat = (pg_data_t *)arg; 1119 1120 (*pos)++; 1121 return next_online_pgdat(pgdat); 1122 } 1123 1124 static void frag_stop(struct seq_file *m, void *arg) 1125 { 1126 } 1127 1128 /* 1129 * Walk zones in a node and print using a callback. 1130 * If @assert_populated is true, only use callback for zones that are populated. 1131 */ 1132 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, 1133 bool assert_populated, bool nolock, 1134 void (*print)(struct seq_file *m, pg_data_t *, struct zone *)) 1135 { 1136 struct zone *zone; 1137 struct zone *node_zones = pgdat->node_zones; 1138 unsigned long flags; 1139 1140 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { 1141 if (assert_populated && !populated_zone(zone)) 1142 continue; 1143 1144 if (!nolock) 1145 spin_lock_irqsave(&zone->lock, flags); 1146 print(m, pgdat, zone); 1147 if (!nolock) 1148 spin_unlock_irqrestore(&zone->lock, flags); 1149 } 1150 } 1151 #endif 1152 1153 #ifdef CONFIG_PROC_FS 1154 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, 1155 struct zone *zone) 1156 { 1157 int order; 1158 1159 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 1160 for (order = 0; order < MAX_ORDER; ++order) 1161 seq_printf(m, "%6lu ", zone->free_area[order].nr_free); 1162 seq_putc(m, '\n'); 1163 } 1164 1165 /* 1166 * This walks the free areas for each zone. 1167 */ 1168 static int frag_show(struct seq_file *m, void *arg) 1169 { 1170 pg_data_t *pgdat = (pg_data_t *)arg; 1171 walk_zones_in_node(m, pgdat, true, false, frag_show_print); 1172 return 0; 1173 } 1174 1175 static void pagetypeinfo_showfree_print(struct seq_file *m, 1176 pg_data_t *pgdat, struct zone *zone) 1177 { 1178 int order, mtype; 1179 1180 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) { 1181 seq_printf(m, "Node %4d, zone %8s, type %12s ", 1182 pgdat->node_id, 1183 zone->name, 1184 migratetype_names[mtype]); 1185 for (order = 0; order < MAX_ORDER; ++order) { 1186 unsigned long freecount = 0; 1187 struct free_area *area; 1188 struct list_head *curr; 1189 1190 area = &(zone->free_area[order]); 1191 1192 list_for_each(curr, &area->free_list[mtype]) 1193 freecount++; 1194 seq_printf(m, "%6lu ", freecount); 1195 } 1196 seq_putc(m, '\n'); 1197 } 1198 } 1199 1200 /* Print out the free pages at each order for each migatetype */ 1201 static int pagetypeinfo_showfree(struct seq_file *m, void *arg) 1202 { 1203 int order; 1204 pg_data_t *pgdat = (pg_data_t *)arg; 1205 1206 /* Print header */ 1207 seq_printf(m, "%-43s ", "Free pages count per migrate type at order"); 1208 for (order = 0; order < MAX_ORDER; ++order) 1209 seq_printf(m, "%6d ", order); 1210 seq_putc(m, '\n'); 1211 1212 walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print); 1213 1214 return 0; 1215 } 1216 1217 static void pagetypeinfo_showblockcount_print(struct seq_file *m, 1218 pg_data_t *pgdat, struct zone *zone) 1219 { 1220 int mtype; 1221 unsigned long pfn; 1222 unsigned long start_pfn = zone->zone_start_pfn; 1223 unsigned long end_pfn = zone_end_pfn(zone); 1224 unsigned long count[MIGRATE_TYPES] = { 0, }; 1225 1226 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 1227 struct page *page; 1228 1229 page = pfn_to_online_page(pfn); 1230 if (!page) 1231 continue; 1232 1233 /* Watch for unexpected holes punched in the memmap */ 1234 if (!memmap_valid_within(pfn, page, zone)) 1235 continue; 1236 1237 if (page_zone(page) != zone) 1238 continue; 1239 1240 mtype = get_pageblock_migratetype(page); 1241 1242 if (mtype < MIGRATE_TYPES) 1243 count[mtype]++; 1244 } 1245 1246 /* Print counts */ 1247 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); 1248 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 1249 seq_printf(m, "%12lu ", count[mtype]); 1250 seq_putc(m, '\n'); 1251 } 1252 1253 /* Print out the free pages at each order for each migratetype */ 1254 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg) 1255 { 1256 int mtype; 1257 pg_data_t *pgdat = (pg_data_t *)arg; 1258 1259 seq_printf(m, "\n%-23s", "Number of blocks type "); 1260 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 1261 seq_printf(m, "%12s ", migratetype_names[mtype]); 1262 seq_putc(m, '\n'); 1263 walk_zones_in_node(m, pgdat, true, false, 1264 pagetypeinfo_showblockcount_print); 1265 1266 return 0; 1267 } 1268 1269 /* 1270 * Print out the number of pageblocks for each migratetype that contain pages 1271 * of other types. This gives an indication of how well fallbacks are being 1272 * contained by rmqueue_fallback(). It requires information from PAGE_OWNER 1273 * to determine what is going on 1274 */ 1275 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat) 1276 { 1277 #ifdef CONFIG_PAGE_OWNER 1278 int mtype; 1279 1280 if (!static_branch_unlikely(&page_owner_inited)) 1281 return; 1282 1283 drain_all_pages(NULL); 1284 1285 seq_printf(m, "\n%-23s", "Number of mixed blocks "); 1286 for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) 1287 seq_printf(m, "%12s ", migratetype_names[mtype]); 1288 seq_putc(m, '\n'); 1289 1290 walk_zones_in_node(m, pgdat, true, true, 1291 pagetypeinfo_showmixedcount_print); 1292 #endif /* CONFIG_PAGE_OWNER */ 1293 } 1294 1295 /* 1296 * This prints out statistics in relation to grouping pages by mobility. 1297 * It is expensive to collect so do not constantly read the file. 1298 */ 1299 static int pagetypeinfo_show(struct seq_file *m, void *arg) 1300 { 1301 pg_data_t *pgdat = (pg_data_t *)arg; 1302 1303 /* check memoryless node */ 1304 if (!node_state(pgdat->node_id, N_MEMORY)) 1305 return 0; 1306 1307 seq_printf(m, "Page block order: %d\n", pageblock_order); 1308 seq_printf(m, "Pages per block: %lu\n", pageblock_nr_pages); 1309 seq_putc(m, '\n'); 1310 pagetypeinfo_showfree(m, pgdat); 1311 pagetypeinfo_showblockcount(m, pgdat); 1312 pagetypeinfo_showmixedcount(m, pgdat); 1313 1314 return 0; 1315 } 1316 1317 static const struct seq_operations fragmentation_op = { 1318 .start = frag_start, 1319 .next = frag_next, 1320 .stop = frag_stop, 1321 .show = frag_show, 1322 }; 1323 1324 static int fragmentation_open(struct inode *inode, struct file *file) 1325 { 1326 return seq_open(file, &fragmentation_op); 1327 } 1328 1329 static const struct file_operations buddyinfo_file_operations = { 1330 .open = fragmentation_open, 1331 .read = seq_read, 1332 .llseek = seq_lseek, 1333 .release = seq_release, 1334 }; 1335 1336 static const struct seq_operations pagetypeinfo_op = { 1337 .start = frag_start, 1338 .next = frag_next, 1339 .stop = frag_stop, 1340 .show = pagetypeinfo_show, 1341 }; 1342 1343 static int pagetypeinfo_open(struct inode *inode, struct file *file) 1344 { 1345 return seq_open(file, &pagetypeinfo_op); 1346 } 1347 1348 static const struct file_operations pagetypeinfo_file_operations = { 1349 .open = pagetypeinfo_open, 1350 .read = seq_read, 1351 .llseek = seq_lseek, 1352 .release = seq_release, 1353 }; 1354 1355 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone) 1356 { 1357 int zid; 1358 1359 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 1360 struct zone *compare = &pgdat->node_zones[zid]; 1361 1362 if (populated_zone(compare)) 1363 return zone == compare; 1364 } 1365 1366 return false; 1367 } 1368 1369 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, 1370 struct zone *zone) 1371 { 1372 int i; 1373 seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name); 1374 if (is_zone_first_populated(pgdat, zone)) { 1375 seq_printf(m, "\n per-node stats"); 1376 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) { 1377 seq_printf(m, "\n %-12s %lu", 1378 vmstat_text[i + NR_VM_ZONE_STAT_ITEMS], 1379 node_page_state(pgdat, i)); 1380 } 1381 } 1382 seq_printf(m, 1383 "\n pages free %lu" 1384 "\n min %lu" 1385 "\n low %lu" 1386 "\n high %lu" 1387 "\n spanned %lu" 1388 "\n present %lu" 1389 "\n managed %lu", 1390 zone_page_state(zone, NR_FREE_PAGES), 1391 min_wmark_pages(zone), 1392 low_wmark_pages(zone), 1393 high_wmark_pages(zone), 1394 zone->spanned_pages, 1395 zone->present_pages, 1396 zone->managed_pages); 1397 1398 seq_printf(m, 1399 "\n protection: (%ld", 1400 zone->lowmem_reserve[0]); 1401 for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) 1402 seq_printf(m, ", %ld", zone->lowmem_reserve[i]); 1403 seq_putc(m, ')'); 1404 1405 /* If unpopulated, no other information is useful */ 1406 if (!populated_zone(zone)) { 1407 seq_putc(m, '\n'); 1408 return; 1409 } 1410 1411 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 1412 seq_printf(m, "\n %-12s %lu", vmstat_text[i], 1413 zone_page_state(zone, i)); 1414 1415 seq_printf(m, "\n pagesets"); 1416 for_each_online_cpu(i) { 1417 struct per_cpu_pageset *pageset; 1418 1419 pageset = per_cpu_ptr(zone->pageset, i); 1420 seq_printf(m, 1421 "\n cpu: %i" 1422 "\n count: %i" 1423 "\n high: %i" 1424 "\n batch: %i", 1425 i, 1426 pageset->pcp.count, 1427 pageset->pcp.high, 1428 pageset->pcp.batch); 1429 #ifdef CONFIG_SMP 1430 seq_printf(m, "\n vm stats threshold: %d", 1431 pageset->stat_threshold); 1432 #endif 1433 } 1434 seq_printf(m, 1435 "\n node_unreclaimable: %u" 1436 "\n start_pfn: %lu" 1437 "\n node_inactive_ratio: %u", 1438 pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES, 1439 zone->zone_start_pfn, 1440 zone->zone_pgdat->inactive_ratio); 1441 seq_putc(m, '\n'); 1442 } 1443 1444 /* 1445 * Output information about zones in @pgdat. All zones are printed regardless 1446 * of whether they are populated or not: lowmem_reserve_ratio operates on the 1447 * set of all zones and userspace would not be aware of such zones if they are 1448 * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio). 1449 */ 1450 static int zoneinfo_show(struct seq_file *m, void *arg) 1451 { 1452 pg_data_t *pgdat = (pg_data_t *)arg; 1453 walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print); 1454 return 0; 1455 } 1456 1457 static const struct seq_operations zoneinfo_op = { 1458 .start = frag_start, /* iterate over all zones. The same as in 1459 * fragmentation. */ 1460 .next = frag_next, 1461 .stop = frag_stop, 1462 .show = zoneinfo_show, 1463 }; 1464 1465 static int zoneinfo_open(struct inode *inode, struct file *file) 1466 { 1467 return seq_open(file, &zoneinfo_op); 1468 } 1469 1470 static const struct file_operations zoneinfo_file_operations = { 1471 .open = zoneinfo_open, 1472 .read = seq_read, 1473 .llseek = seq_lseek, 1474 .release = seq_release, 1475 }; 1476 1477 enum writeback_stat_item { 1478 NR_DIRTY_THRESHOLD, 1479 NR_DIRTY_BG_THRESHOLD, 1480 NR_VM_WRITEBACK_STAT_ITEMS, 1481 }; 1482 1483 static void *vmstat_start(struct seq_file *m, loff_t *pos) 1484 { 1485 unsigned long *v; 1486 int i, stat_items_size; 1487 1488 if (*pos >= ARRAY_SIZE(vmstat_text)) 1489 return NULL; 1490 stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) + 1491 NR_VM_NODE_STAT_ITEMS * sizeof(unsigned long) + 1492 NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long); 1493 1494 #ifdef CONFIG_VM_EVENT_COUNTERS 1495 stat_items_size += sizeof(struct vm_event_state); 1496 #endif 1497 1498 v = kmalloc(stat_items_size, GFP_KERNEL); 1499 m->private = v; 1500 if (!v) 1501 return ERR_PTR(-ENOMEM); 1502 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) 1503 v[i] = global_page_state(i); 1504 v += NR_VM_ZONE_STAT_ITEMS; 1505 1506 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) 1507 v[i] = global_node_page_state(i); 1508 v += NR_VM_NODE_STAT_ITEMS; 1509 1510 global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD, 1511 v + NR_DIRTY_THRESHOLD); 1512 v += NR_VM_WRITEBACK_STAT_ITEMS; 1513 1514 #ifdef CONFIG_VM_EVENT_COUNTERS 1515 all_vm_events(v); 1516 v[PGPGIN] /= 2; /* sectors -> kbytes */ 1517 v[PGPGOUT] /= 2; 1518 #endif 1519 return (unsigned long *)m->private + *pos; 1520 } 1521 1522 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos) 1523 { 1524 (*pos)++; 1525 if (*pos >= ARRAY_SIZE(vmstat_text)) 1526 return NULL; 1527 return (unsigned long *)m->private + *pos; 1528 } 1529 1530 static int vmstat_show(struct seq_file *m, void *arg) 1531 { 1532 unsigned long *l = arg; 1533 unsigned long off = l - (unsigned long *)m->private; 1534 1535 seq_puts(m, vmstat_text[off]); 1536 seq_put_decimal_ull(m, " ", *l); 1537 seq_putc(m, '\n'); 1538 return 0; 1539 } 1540 1541 static void vmstat_stop(struct seq_file *m, void *arg) 1542 { 1543 kfree(m->private); 1544 m->private = NULL; 1545 } 1546 1547 static const struct seq_operations vmstat_op = { 1548 .start = vmstat_start, 1549 .next = vmstat_next, 1550 .stop = vmstat_stop, 1551 .show = vmstat_show, 1552 }; 1553 1554 static int vmstat_open(struct inode *inode, struct file *file) 1555 { 1556 return seq_open(file, &vmstat_op); 1557 } 1558 1559 static const struct file_operations vmstat_file_operations = { 1560 .open = vmstat_open, 1561 .read = seq_read, 1562 .llseek = seq_lseek, 1563 .release = seq_release, 1564 }; 1565 #endif /* CONFIG_PROC_FS */ 1566 1567 #ifdef CONFIG_SMP 1568 static DEFINE_PER_CPU(struct delayed_work, vmstat_work); 1569 int sysctl_stat_interval __read_mostly = HZ; 1570 1571 #ifdef CONFIG_PROC_FS 1572 static void refresh_vm_stats(struct work_struct *work) 1573 { 1574 refresh_cpu_vm_stats(true); 1575 } 1576 1577 int vmstat_refresh(struct ctl_table *table, int write, 1578 void __user *buffer, size_t *lenp, loff_t *ppos) 1579 { 1580 long val; 1581 int err; 1582 int i; 1583 1584 /* 1585 * The regular update, every sysctl_stat_interval, may come later 1586 * than expected: leaving a significant amount in per_cpu buckets. 1587 * This is particularly misleading when checking a quantity of HUGE 1588 * pages, immediately after running a test. /proc/sys/vm/stat_refresh, 1589 * which can equally be echo'ed to or cat'ted from (by root), 1590 * can be used to update the stats just before reading them. 1591 * 1592 * Oh, and since global_page_state() etc. are so careful to hide 1593 * transiently negative values, report an error here if any of 1594 * the stats is negative, so we know to go looking for imbalance. 1595 */ 1596 err = schedule_on_each_cpu(refresh_vm_stats); 1597 if (err) 1598 return err; 1599 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) { 1600 val = atomic_long_read(&vm_zone_stat[i]); 1601 if (val < 0) { 1602 pr_warn("%s: %s %ld\n", 1603 __func__, vmstat_text[i], val); 1604 err = -EINVAL; 1605 } 1606 } 1607 if (err) 1608 return err; 1609 if (write) 1610 *ppos += *lenp; 1611 else 1612 *lenp = 0; 1613 return 0; 1614 } 1615 #endif /* CONFIG_PROC_FS */ 1616 1617 static void vmstat_update(struct work_struct *w) 1618 { 1619 if (refresh_cpu_vm_stats(true)) { 1620 /* 1621 * Counters were updated so we expect more updates 1622 * to occur in the future. Keep on running the 1623 * update worker thread. 1624 */ 1625 queue_delayed_work_on(smp_processor_id(), mm_percpu_wq, 1626 this_cpu_ptr(&vmstat_work), 1627 round_jiffies_relative(sysctl_stat_interval)); 1628 } 1629 } 1630 1631 /* 1632 * Switch off vmstat processing and then fold all the remaining differentials 1633 * until the diffs stay at zero. The function is used by NOHZ and can only be 1634 * invoked when tick processing is not active. 1635 */ 1636 /* 1637 * Check if the diffs for a certain cpu indicate that 1638 * an update is needed. 1639 */ 1640 static bool need_update(int cpu) 1641 { 1642 struct zone *zone; 1643 1644 for_each_populated_zone(zone) { 1645 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu); 1646 1647 BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1); 1648 /* 1649 * The fast way of checking if there are any vmstat diffs. 1650 * This works because the diffs are byte sized items. 1651 */ 1652 if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS)) 1653 return true; 1654 1655 } 1656 return false; 1657 } 1658 1659 /* 1660 * Switch off vmstat processing and then fold all the remaining differentials 1661 * until the diffs stay at zero. The function is used by NOHZ and can only be 1662 * invoked when tick processing is not active. 1663 */ 1664 void quiet_vmstat(void) 1665 { 1666 if (system_state != SYSTEM_RUNNING) 1667 return; 1668 1669 if (!delayed_work_pending(this_cpu_ptr(&vmstat_work))) 1670 return; 1671 1672 if (!need_update(smp_processor_id())) 1673 return; 1674 1675 /* 1676 * Just refresh counters and do not care about the pending delayed 1677 * vmstat_update. It doesn't fire that often to matter and canceling 1678 * it would be too expensive from this path. 1679 * vmstat_shepherd will take care about that for us. 1680 */ 1681 refresh_cpu_vm_stats(false); 1682 } 1683 1684 /* 1685 * Shepherd worker thread that checks the 1686 * differentials of processors that have their worker 1687 * threads for vm statistics updates disabled because of 1688 * inactivity. 1689 */ 1690 static void vmstat_shepherd(struct work_struct *w); 1691 1692 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd); 1693 1694 static void vmstat_shepherd(struct work_struct *w) 1695 { 1696 int cpu; 1697 1698 get_online_cpus(); 1699 /* Check processors whose vmstat worker threads have been disabled */ 1700 for_each_online_cpu(cpu) { 1701 struct delayed_work *dw = &per_cpu(vmstat_work, cpu); 1702 1703 if (!delayed_work_pending(dw) && need_update(cpu)) 1704 queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); 1705 } 1706 put_online_cpus(); 1707 1708 schedule_delayed_work(&shepherd, 1709 round_jiffies_relative(sysctl_stat_interval)); 1710 } 1711 1712 static void __init start_shepherd_timer(void) 1713 { 1714 int cpu; 1715 1716 for_each_possible_cpu(cpu) 1717 INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), 1718 vmstat_update); 1719 1720 schedule_delayed_work(&shepherd, 1721 round_jiffies_relative(sysctl_stat_interval)); 1722 } 1723 1724 static void __init init_cpu_node_state(void) 1725 { 1726 int node; 1727 1728 for_each_online_node(node) { 1729 if (cpumask_weight(cpumask_of_node(node)) > 0) 1730 node_set_state(node, N_CPU); 1731 } 1732 } 1733 1734 static int vmstat_cpu_online(unsigned int cpu) 1735 { 1736 refresh_zone_stat_thresholds(); 1737 node_set_state(cpu_to_node(cpu), N_CPU); 1738 return 0; 1739 } 1740 1741 static int vmstat_cpu_down_prep(unsigned int cpu) 1742 { 1743 cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); 1744 return 0; 1745 } 1746 1747 static int vmstat_cpu_dead(unsigned int cpu) 1748 { 1749 const struct cpumask *node_cpus; 1750 int node; 1751 1752 node = cpu_to_node(cpu); 1753 1754 refresh_zone_stat_thresholds(); 1755 node_cpus = cpumask_of_node(node); 1756 if (cpumask_weight(node_cpus) > 0) 1757 return 0; 1758 1759 node_clear_state(node, N_CPU); 1760 return 0; 1761 } 1762 1763 #endif 1764 1765 struct workqueue_struct *mm_percpu_wq; 1766 1767 void __init init_mm_internals(void) 1768 { 1769 int ret __maybe_unused; 1770 1771 mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0); 1772 1773 #ifdef CONFIG_SMP 1774 ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", 1775 NULL, vmstat_cpu_dead); 1776 if (ret < 0) 1777 pr_err("vmstat: failed to register 'dead' hotplug state\n"); 1778 1779 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online", 1780 vmstat_cpu_online, 1781 vmstat_cpu_down_prep); 1782 if (ret < 0) 1783 pr_err("vmstat: failed to register 'online' hotplug state\n"); 1784 1785 get_online_cpus(); 1786 init_cpu_node_state(); 1787 put_online_cpus(); 1788 1789 start_shepherd_timer(); 1790 #endif 1791 #ifdef CONFIG_PROC_FS 1792 proc_create("buddyinfo", 0444, NULL, &buddyinfo_file_operations); 1793 proc_create("pagetypeinfo", 0444, NULL, &pagetypeinfo_file_operations); 1794 proc_create("vmstat", 0444, NULL, &vmstat_file_operations); 1795 proc_create("zoneinfo", 0444, NULL, &zoneinfo_file_operations); 1796 #endif 1797 } 1798 1799 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 1800 1801 /* 1802 * Return an index indicating how much of the available free memory is 1803 * unusable for an allocation of the requested size. 1804 */ 1805 static int unusable_free_index(unsigned int order, 1806 struct contig_page_info *info) 1807 { 1808 /* No free memory is interpreted as all free memory is unusable */ 1809 if (info->free_pages == 0) 1810 return 1000; 1811 1812 /* 1813 * Index should be a value between 0 and 1. Return a value to 3 1814 * decimal places. 1815 * 1816 * 0 => no fragmentation 1817 * 1 => high fragmentation 1818 */ 1819 return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages); 1820 1821 } 1822 1823 static void unusable_show_print(struct seq_file *m, 1824 pg_data_t *pgdat, struct zone *zone) 1825 { 1826 unsigned int order; 1827 int index; 1828 struct contig_page_info info; 1829 1830 seq_printf(m, "Node %d, zone %8s ", 1831 pgdat->node_id, 1832 zone->name); 1833 for (order = 0; order < MAX_ORDER; ++order) { 1834 fill_contig_page_info(zone, order, &info); 1835 index = unusable_free_index(order, &info); 1836 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); 1837 } 1838 1839 seq_putc(m, '\n'); 1840 } 1841 1842 /* 1843 * Display unusable free space index 1844 * 1845 * The unusable free space index measures how much of the available free 1846 * memory cannot be used to satisfy an allocation of a given size and is a 1847 * value between 0 and 1. The higher the value, the more of free memory is 1848 * unusable and by implication, the worse the external fragmentation is. This 1849 * can be expressed as a percentage by multiplying by 100. 1850 */ 1851 static int unusable_show(struct seq_file *m, void *arg) 1852 { 1853 pg_data_t *pgdat = (pg_data_t *)arg; 1854 1855 /* check memoryless node */ 1856 if (!node_state(pgdat->node_id, N_MEMORY)) 1857 return 0; 1858 1859 walk_zones_in_node(m, pgdat, true, false, unusable_show_print); 1860 1861 return 0; 1862 } 1863 1864 static const struct seq_operations unusable_op = { 1865 .start = frag_start, 1866 .next = frag_next, 1867 .stop = frag_stop, 1868 .show = unusable_show, 1869 }; 1870 1871 static int unusable_open(struct inode *inode, struct file *file) 1872 { 1873 return seq_open(file, &unusable_op); 1874 } 1875 1876 static const struct file_operations unusable_file_ops = { 1877 .open = unusable_open, 1878 .read = seq_read, 1879 .llseek = seq_lseek, 1880 .release = seq_release, 1881 }; 1882 1883 static void extfrag_show_print(struct seq_file *m, 1884 pg_data_t *pgdat, struct zone *zone) 1885 { 1886 unsigned int order; 1887 int index; 1888 1889 /* Alloc on stack as interrupts are disabled for zone walk */ 1890 struct contig_page_info info; 1891 1892 seq_printf(m, "Node %d, zone %8s ", 1893 pgdat->node_id, 1894 zone->name); 1895 for (order = 0; order < MAX_ORDER; ++order) { 1896 fill_contig_page_info(zone, order, &info); 1897 index = __fragmentation_index(order, &info); 1898 seq_printf(m, "%d.%03d ", index / 1000, index % 1000); 1899 } 1900 1901 seq_putc(m, '\n'); 1902 } 1903 1904 /* 1905 * Display fragmentation index for orders that allocations would fail for 1906 */ 1907 static int extfrag_show(struct seq_file *m, void *arg) 1908 { 1909 pg_data_t *pgdat = (pg_data_t *)arg; 1910 1911 walk_zones_in_node(m, pgdat, true, false, extfrag_show_print); 1912 1913 return 0; 1914 } 1915 1916 static const struct seq_operations extfrag_op = { 1917 .start = frag_start, 1918 .next = frag_next, 1919 .stop = frag_stop, 1920 .show = extfrag_show, 1921 }; 1922 1923 static int extfrag_open(struct inode *inode, struct file *file) 1924 { 1925 return seq_open(file, &extfrag_op); 1926 } 1927 1928 static const struct file_operations extfrag_file_ops = { 1929 .open = extfrag_open, 1930 .read = seq_read, 1931 .llseek = seq_lseek, 1932 .release = seq_release, 1933 }; 1934 1935 static int __init extfrag_debug_init(void) 1936 { 1937 struct dentry *extfrag_debug_root; 1938 1939 extfrag_debug_root = debugfs_create_dir("extfrag", NULL); 1940 if (!extfrag_debug_root) 1941 return -ENOMEM; 1942 1943 if (!debugfs_create_file("unusable_index", 0444, 1944 extfrag_debug_root, NULL, &unusable_file_ops)) 1945 goto fail; 1946 1947 if (!debugfs_create_file("extfrag_index", 0444, 1948 extfrag_debug_root, NULL, &extfrag_file_ops)) 1949 goto fail; 1950 1951 return 0; 1952 fail: 1953 debugfs_remove_recursive(extfrag_debug_root); 1954 return -ENOMEM; 1955 } 1956 1957 module_init(extfrag_debug_init); 1958 #endif 1959