xref: /openbmc/linux/mm/vmstat.c (revision 3805e6a1)
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *		Christoph Lameter <christoph@lameter.com>
10  *  Copyright (C) 2008-2014 Christoph Lameter
11  */
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/vmstat.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/debugfs.h>
23 #include <linux/sched.h>
24 #include <linux/math64.h>
25 #include <linux/writeback.h>
26 #include <linux/compaction.h>
27 #include <linux/mm_inline.h>
28 #include <linux/page_ext.h>
29 #include <linux/page_owner.h>
30 
31 #include "internal.h"
32 
33 #ifdef CONFIG_VM_EVENT_COUNTERS
34 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
35 EXPORT_PER_CPU_SYMBOL(vm_event_states);
36 
37 static void sum_vm_events(unsigned long *ret)
38 {
39 	int cpu;
40 	int i;
41 
42 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
43 
44 	for_each_online_cpu(cpu) {
45 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
46 
47 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
48 			ret[i] += this->event[i];
49 	}
50 }
51 
52 /*
53  * Accumulate the vm event counters across all CPUs.
54  * The result is unavoidably approximate - it can change
55  * during and after execution of this function.
56 */
57 void all_vm_events(unsigned long *ret)
58 {
59 	get_online_cpus();
60 	sum_vm_events(ret);
61 	put_online_cpus();
62 }
63 EXPORT_SYMBOL_GPL(all_vm_events);
64 
65 /*
66  * Fold the foreign cpu events into our own.
67  *
68  * This is adding to the events on one processor
69  * but keeps the global counts constant.
70  */
71 void vm_events_fold_cpu(int cpu)
72 {
73 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
74 	int i;
75 
76 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
77 		count_vm_events(i, fold_state->event[i]);
78 		fold_state->event[i] = 0;
79 	}
80 }
81 
82 #endif /* CONFIG_VM_EVENT_COUNTERS */
83 
84 /*
85  * Manage combined zone based / global counters
86  *
87  * vm_stat contains the global counters
88  */
89 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90 EXPORT_SYMBOL(vm_stat);
91 
92 #ifdef CONFIG_SMP
93 
94 int calculate_pressure_threshold(struct zone *zone)
95 {
96 	int threshold;
97 	int watermark_distance;
98 
99 	/*
100 	 * As vmstats are not up to date, there is drift between the estimated
101 	 * and real values. For high thresholds and a high number of CPUs, it
102 	 * is possible for the min watermark to be breached while the estimated
103 	 * value looks fine. The pressure threshold is a reduced value such
104 	 * that even the maximum amount of drift will not accidentally breach
105 	 * the min watermark
106 	 */
107 	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
108 	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
109 
110 	/*
111 	 * Maximum threshold is 125
112 	 */
113 	threshold = min(125, threshold);
114 
115 	return threshold;
116 }
117 
118 int calculate_normal_threshold(struct zone *zone)
119 {
120 	int threshold;
121 	int mem;	/* memory in 128 MB units */
122 
123 	/*
124 	 * The threshold scales with the number of processors and the amount
125 	 * of memory per zone. More memory means that we can defer updates for
126 	 * longer, more processors could lead to more contention.
127  	 * fls() is used to have a cheap way of logarithmic scaling.
128 	 *
129 	 * Some sample thresholds:
130 	 *
131 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
132 	 * ------------------------------------------------------------------
133 	 * 8		1		1	0.9-1 GB	4
134 	 * 16		2		2	0.9-1 GB	4
135 	 * 20 		2		2	1-2 GB		5
136 	 * 24		2		2	2-4 GB		6
137 	 * 28		2		2	4-8 GB		7
138 	 * 32		2		2	8-16 GB		8
139 	 * 4		2		2	<128M		1
140 	 * 30		4		3	2-4 GB		5
141 	 * 48		4		3	8-16 GB		8
142 	 * 32		8		4	1-2 GB		4
143 	 * 32		8		4	0.9-1GB		4
144 	 * 10		16		5	<128M		1
145 	 * 40		16		5	900M		4
146 	 * 70		64		7	2-4 GB		5
147 	 * 84		64		7	4-8 GB		6
148 	 * 108		512		9	4-8 GB		6
149 	 * 125		1024		10	8-16 GB		8
150 	 * 125		1024		10	16-32 GB	9
151 	 */
152 
153 	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
154 
155 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
156 
157 	/*
158 	 * Maximum threshold is 125
159 	 */
160 	threshold = min(125, threshold);
161 
162 	return threshold;
163 }
164 
165 /*
166  * Refresh the thresholds for each zone.
167  */
168 void refresh_zone_stat_thresholds(void)
169 {
170 	struct zone *zone;
171 	int cpu;
172 	int threshold;
173 
174 	for_each_populated_zone(zone) {
175 		unsigned long max_drift, tolerate_drift;
176 
177 		threshold = calculate_normal_threshold(zone);
178 
179 		for_each_online_cpu(cpu)
180 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
181 							= threshold;
182 
183 		/*
184 		 * Only set percpu_drift_mark if there is a danger that
185 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
186 		 * the min watermark could be breached by an allocation
187 		 */
188 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
189 		max_drift = num_online_cpus() * threshold;
190 		if (max_drift > tolerate_drift)
191 			zone->percpu_drift_mark = high_wmark_pages(zone) +
192 					max_drift;
193 	}
194 }
195 
196 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
197 				int (*calculate_pressure)(struct zone *))
198 {
199 	struct zone *zone;
200 	int cpu;
201 	int threshold;
202 	int i;
203 
204 	for (i = 0; i < pgdat->nr_zones; i++) {
205 		zone = &pgdat->node_zones[i];
206 		if (!zone->percpu_drift_mark)
207 			continue;
208 
209 		threshold = (*calculate_pressure)(zone);
210 		for_each_online_cpu(cpu)
211 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
212 							= threshold;
213 	}
214 }
215 
216 /*
217  * For use when we know that interrupts are disabled,
218  * or when we know that preemption is disabled and that
219  * particular counter cannot be updated from interrupt context.
220  */
221 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
222 			   long delta)
223 {
224 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
225 	s8 __percpu *p = pcp->vm_stat_diff + item;
226 	long x;
227 	long t;
228 
229 	x = delta + __this_cpu_read(*p);
230 
231 	t = __this_cpu_read(pcp->stat_threshold);
232 
233 	if (unlikely(x > t || x < -t)) {
234 		zone_page_state_add(x, zone, item);
235 		x = 0;
236 	}
237 	__this_cpu_write(*p, x);
238 }
239 EXPORT_SYMBOL(__mod_zone_page_state);
240 
241 /*
242  * Optimized increment and decrement functions.
243  *
244  * These are only for a single page and therefore can take a struct page *
245  * argument instead of struct zone *. This allows the inclusion of the code
246  * generated for page_zone(page) into the optimized functions.
247  *
248  * No overflow check is necessary and therefore the differential can be
249  * incremented or decremented in place which may allow the compilers to
250  * generate better code.
251  * The increment or decrement is known and therefore one boundary check can
252  * be omitted.
253  *
254  * NOTE: These functions are very performance sensitive. Change only
255  * with care.
256  *
257  * Some processors have inc/dec instructions that are atomic vs an interrupt.
258  * However, the code must first determine the differential location in a zone
259  * based on the processor number and then inc/dec the counter. There is no
260  * guarantee without disabling preemption that the processor will not change
261  * in between and therefore the atomicity vs. interrupt cannot be exploited
262  * in a useful way here.
263  */
264 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
265 {
266 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
267 	s8 __percpu *p = pcp->vm_stat_diff + item;
268 	s8 v, t;
269 
270 	v = __this_cpu_inc_return(*p);
271 	t = __this_cpu_read(pcp->stat_threshold);
272 	if (unlikely(v > t)) {
273 		s8 overstep = t >> 1;
274 
275 		zone_page_state_add(v + overstep, zone, item);
276 		__this_cpu_write(*p, -overstep);
277 	}
278 }
279 
280 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
281 {
282 	__inc_zone_state(page_zone(page), item);
283 }
284 EXPORT_SYMBOL(__inc_zone_page_state);
285 
286 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
287 {
288 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
289 	s8 __percpu *p = pcp->vm_stat_diff + item;
290 	s8 v, t;
291 
292 	v = __this_cpu_dec_return(*p);
293 	t = __this_cpu_read(pcp->stat_threshold);
294 	if (unlikely(v < - t)) {
295 		s8 overstep = t >> 1;
296 
297 		zone_page_state_add(v - overstep, zone, item);
298 		__this_cpu_write(*p, overstep);
299 	}
300 }
301 
302 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
303 {
304 	__dec_zone_state(page_zone(page), item);
305 }
306 EXPORT_SYMBOL(__dec_zone_page_state);
307 
308 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
309 /*
310  * If we have cmpxchg_local support then we do not need to incur the overhead
311  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
312  *
313  * mod_state() modifies the zone counter state through atomic per cpu
314  * operations.
315  *
316  * Overstep mode specifies how overstep should handled:
317  *     0       No overstepping
318  *     1       Overstepping half of threshold
319  *     -1      Overstepping minus half of threshold
320 */
321 static inline void mod_state(struct zone *zone, enum zone_stat_item item,
322 			     long delta, int overstep_mode)
323 {
324 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
325 	s8 __percpu *p = pcp->vm_stat_diff + item;
326 	long o, n, t, z;
327 
328 	do {
329 		z = 0;  /* overflow to zone counters */
330 
331 		/*
332 		 * The fetching of the stat_threshold is racy. We may apply
333 		 * a counter threshold to the wrong the cpu if we get
334 		 * rescheduled while executing here. However, the next
335 		 * counter update will apply the threshold again and
336 		 * therefore bring the counter under the threshold again.
337 		 *
338 		 * Most of the time the thresholds are the same anyways
339 		 * for all cpus in a zone.
340 		 */
341 		t = this_cpu_read(pcp->stat_threshold);
342 
343 		o = this_cpu_read(*p);
344 		n = delta + o;
345 
346 		if (n > t || n < -t) {
347 			int os = overstep_mode * (t >> 1) ;
348 
349 			/* Overflow must be added to zone counters */
350 			z = n + os;
351 			n = -os;
352 		}
353 	} while (this_cpu_cmpxchg(*p, o, n) != o);
354 
355 	if (z)
356 		zone_page_state_add(z, zone, item);
357 }
358 
359 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
360 			 long delta)
361 {
362 	mod_state(zone, item, delta, 0);
363 }
364 EXPORT_SYMBOL(mod_zone_page_state);
365 
366 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
367 {
368 	mod_state(zone, item, 1, 1);
369 }
370 
371 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
372 {
373 	mod_state(page_zone(page), item, 1, 1);
374 }
375 EXPORT_SYMBOL(inc_zone_page_state);
376 
377 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
378 {
379 	mod_state(page_zone(page), item, -1, -1);
380 }
381 EXPORT_SYMBOL(dec_zone_page_state);
382 #else
383 /*
384  * Use interrupt disable to serialize counter updates
385  */
386 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
387 			 long delta)
388 {
389 	unsigned long flags;
390 
391 	local_irq_save(flags);
392 	__mod_zone_page_state(zone, item, delta);
393 	local_irq_restore(flags);
394 }
395 EXPORT_SYMBOL(mod_zone_page_state);
396 
397 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
398 {
399 	unsigned long flags;
400 
401 	local_irq_save(flags);
402 	__inc_zone_state(zone, item);
403 	local_irq_restore(flags);
404 }
405 
406 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
407 {
408 	unsigned long flags;
409 	struct zone *zone;
410 
411 	zone = page_zone(page);
412 	local_irq_save(flags);
413 	__inc_zone_state(zone, item);
414 	local_irq_restore(flags);
415 }
416 EXPORT_SYMBOL(inc_zone_page_state);
417 
418 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
419 {
420 	unsigned long flags;
421 
422 	local_irq_save(flags);
423 	__dec_zone_page_state(page, item);
424 	local_irq_restore(flags);
425 }
426 EXPORT_SYMBOL(dec_zone_page_state);
427 #endif
428 
429 
430 /*
431  * Fold a differential into the global counters.
432  * Returns the number of counters updated.
433  */
434 static int fold_diff(int *diff)
435 {
436 	int i;
437 	int changes = 0;
438 
439 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
440 		if (diff[i]) {
441 			atomic_long_add(diff[i], &vm_stat[i]);
442 			changes++;
443 	}
444 	return changes;
445 }
446 
447 /*
448  * Update the zone counters for the current cpu.
449  *
450  * Note that refresh_cpu_vm_stats strives to only access
451  * node local memory. The per cpu pagesets on remote zones are placed
452  * in the memory local to the processor using that pageset. So the
453  * loop over all zones will access a series of cachelines local to
454  * the processor.
455  *
456  * The call to zone_page_state_add updates the cachelines with the
457  * statistics in the remote zone struct as well as the global cachelines
458  * with the global counters. These could cause remote node cache line
459  * bouncing and will have to be only done when necessary.
460  *
461  * The function returns the number of global counters updated.
462  */
463 static int refresh_cpu_vm_stats(bool do_pagesets)
464 {
465 	struct zone *zone;
466 	int i;
467 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
468 	int changes = 0;
469 
470 	for_each_populated_zone(zone) {
471 		struct per_cpu_pageset __percpu *p = zone->pageset;
472 
473 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
474 			int v;
475 
476 			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
477 			if (v) {
478 
479 				atomic_long_add(v, &zone->vm_stat[i]);
480 				global_diff[i] += v;
481 #ifdef CONFIG_NUMA
482 				/* 3 seconds idle till flush */
483 				__this_cpu_write(p->expire, 3);
484 #endif
485 			}
486 		}
487 #ifdef CONFIG_NUMA
488 		if (do_pagesets) {
489 			cond_resched();
490 			/*
491 			 * Deal with draining the remote pageset of this
492 			 * processor
493 			 *
494 			 * Check if there are pages remaining in this pageset
495 			 * if not then there is nothing to expire.
496 			 */
497 			if (!__this_cpu_read(p->expire) ||
498 			       !__this_cpu_read(p->pcp.count))
499 				continue;
500 
501 			/*
502 			 * We never drain zones local to this processor.
503 			 */
504 			if (zone_to_nid(zone) == numa_node_id()) {
505 				__this_cpu_write(p->expire, 0);
506 				continue;
507 			}
508 
509 			if (__this_cpu_dec_return(p->expire))
510 				continue;
511 
512 			if (__this_cpu_read(p->pcp.count)) {
513 				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
514 				changes++;
515 			}
516 		}
517 #endif
518 	}
519 	changes += fold_diff(global_diff);
520 	return changes;
521 }
522 
523 /*
524  * Fold the data for an offline cpu into the global array.
525  * There cannot be any access by the offline cpu and therefore
526  * synchronization is simplified.
527  */
528 void cpu_vm_stats_fold(int cpu)
529 {
530 	struct zone *zone;
531 	int i;
532 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
533 
534 	for_each_populated_zone(zone) {
535 		struct per_cpu_pageset *p;
536 
537 		p = per_cpu_ptr(zone->pageset, cpu);
538 
539 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
540 			if (p->vm_stat_diff[i]) {
541 				int v;
542 
543 				v = p->vm_stat_diff[i];
544 				p->vm_stat_diff[i] = 0;
545 				atomic_long_add(v, &zone->vm_stat[i]);
546 				global_diff[i] += v;
547 			}
548 	}
549 
550 	fold_diff(global_diff);
551 }
552 
553 /*
554  * this is only called if !populated_zone(zone), which implies no other users of
555  * pset->vm_stat_diff[] exsist.
556  */
557 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
558 {
559 	int i;
560 
561 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
562 		if (pset->vm_stat_diff[i]) {
563 			int v = pset->vm_stat_diff[i];
564 			pset->vm_stat_diff[i] = 0;
565 			atomic_long_add(v, &zone->vm_stat[i]);
566 			atomic_long_add(v, &vm_stat[i]);
567 		}
568 }
569 #endif
570 
571 #ifdef CONFIG_NUMA
572 /*
573  * Determine the per node value of a stat item.
574  */
575 unsigned long node_page_state(int node, enum zone_stat_item item)
576 {
577 	struct zone *zones = NODE_DATA(node)->node_zones;
578 	int i;
579 	unsigned long count = 0;
580 
581 	for (i = 0; i < MAX_NR_ZONES; i++)
582 		count += zone_page_state(zones + i, item);
583 
584 	return count;
585 }
586 
587 #endif
588 
589 #ifdef CONFIG_COMPACTION
590 
591 struct contig_page_info {
592 	unsigned long free_pages;
593 	unsigned long free_blocks_total;
594 	unsigned long free_blocks_suitable;
595 };
596 
597 /*
598  * Calculate the number of free pages in a zone, how many contiguous
599  * pages are free and how many are large enough to satisfy an allocation of
600  * the target size. Note that this function makes no attempt to estimate
601  * how many suitable free blocks there *might* be if MOVABLE pages were
602  * migrated. Calculating that is possible, but expensive and can be
603  * figured out from userspace
604  */
605 static void fill_contig_page_info(struct zone *zone,
606 				unsigned int suitable_order,
607 				struct contig_page_info *info)
608 {
609 	unsigned int order;
610 
611 	info->free_pages = 0;
612 	info->free_blocks_total = 0;
613 	info->free_blocks_suitable = 0;
614 
615 	for (order = 0; order < MAX_ORDER; order++) {
616 		unsigned long blocks;
617 
618 		/* Count number of free blocks */
619 		blocks = zone->free_area[order].nr_free;
620 		info->free_blocks_total += blocks;
621 
622 		/* Count free base pages */
623 		info->free_pages += blocks << order;
624 
625 		/* Count the suitable free blocks */
626 		if (order >= suitable_order)
627 			info->free_blocks_suitable += blocks <<
628 						(order - suitable_order);
629 	}
630 }
631 
632 /*
633  * A fragmentation index only makes sense if an allocation of a requested
634  * size would fail. If that is true, the fragmentation index indicates
635  * whether external fragmentation or a lack of memory was the problem.
636  * The value can be used to determine if page reclaim or compaction
637  * should be used
638  */
639 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
640 {
641 	unsigned long requested = 1UL << order;
642 
643 	if (!info->free_blocks_total)
644 		return 0;
645 
646 	/* Fragmentation index only makes sense when a request would fail */
647 	if (info->free_blocks_suitable)
648 		return -1000;
649 
650 	/*
651 	 * Index is between 0 and 1 so return within 3 decimal places
652 	 *
653 	 * 0 => allocation would fail due to lack of memory
654 	 * 1 => allocation would fail due to fragmentation
655 	 */
656 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
657 }
658 
659 /* Same as __fragmentation index but allocs contig_page_info on stack */
660 int fragmentation_index(struct zone *zone, unsigned int order)
661 {
662 	struct contig_page_info info;
663 
664 	fill_contig_page_info(zone, order, &info);
665 	return __fragmentation_index(order, &info);
666 }
667 #endif
668 
669 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
670 #ifdef CONFIG_ZONE_DMA
671 #define TEXT_FOR_DMA(xx) xx "_dma",
672 #else
673 #define TEXT_FOR_DMA(xx)
674 #endif
675 
676 #ifdef CONFIG_ZONE_DMA32
677 #define TEXT_FOR_DMA32(xx) xx "_dma32",
678 #else
679 #define TEXT_FOR_DMA32(xx)
680 #endif
681 
682 #ifdef CONFIG_HIGHMEM
683 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
684 #else
685 #define TEXT_FOR_HIGHMEM(xx)
686 #endif
687 
688 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
689 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
690 
691 const char * const vmstat_text[] = {
692 	/* enum zone_stat_item countes */
693 	"nr_free_pages",
694 	"nr_alloc_batch",
695 	"nr_inactive_anon",
696 	"nr_active_anon",
697 	"nr_inactive_file",
698 	"nr_active_file",
699 	"nr_unevictable",
700 	"nr_mlock",
701 	"nr_anon_pages",
702 	"nr_mapped",
703 	"nr_file_pages",
704 	"nr_dirty",
705 	"nr_writeback",
706 	"nr_slab_reclaimable",
707 	"nr_slab_unreclaimable",
708 	"nr_page_table_pages",
709 	"nr_kernel_stack",
710 	"nr_unstable",
711 	"nr_bounce",
712 	"nr_vmscan_write",
713 	"nr_vmscan_immediate_reclaim",
714 	"nr_writeback_temp",
715 	"nr_isolated_anon",
716 	"nr_isolated_file",
717 	"nr_shmem",
718 	"nr_dirtied",
719 	"nr_written",
720 	"nr_pages_scanned",
721 
722 #ifdef CONFIG_NUMA
723 	"numa_hit",
724 	"numa_miss",
725 	"numa_foreign",
726 	"numa_interleave",
727 	"numa_local",
728 	"numa_other",
729 #endif
730 	"workingset_refault",
731 	"workingset_activate",
732 	"workingset_nodereclaim",
733 	"nr_anon_transparent_hugepages",
734 	"nr_free_cma",
735 
736 	/* enum writeback_stat_item counters */
737 	"nr_dirty_threshold",
738 	"nr_dirty_background_threshold",
739 
740 #ifdef CONFIG_VM_EVENT_COUNTERS
741 	/* enum vm_event_item counters */
742 	"pgpgin",
743 	"pgpgout",
744 	"pswpin",
745 	"pswpout",
746 
747 	TEXTS_FOR_ZONES("pgalloc")
748 
749 	"pgfree",
750 	"pgactivate",
751 	"pgdeactivate",
752 
753 	"pgfault",
754 	"pgmajfault",
755 	"pglazyfreed",
756 
757 	TEXTS_FOR_ZONES("pgrefill")
758 	TEXTS_FOR_ZONES("pgsteal_kswapd")
759 	TEXTS_FOR_ZONES("pgsteal_direct")
760 	TEXTS_FOR_ZONES("pgscan_kswapd")
761 	TEXTS_FOR_ZONES("pgscan_direct")
762 	"pgscan_direct_throttle",
763 
764 #ifdef CONFIG_NUMA
765 	"zone_reclaim_failed",
766 #endif
767 	"pginodesteal",
768 	"slabs_scanned",
769 	"kswapd_inodesteal",
770 	"kswapd_low_wmark_hit_quickly",
771 	"kswapd_high_wmark_hit_quickly",
772 	"pageoutrun",
773 	"allocstall",
774 
775 	"pgrotated",
776 
777 	"drop_pagecache",
778 	"drop_slab",
779 
780 #ifdef CONFIG_NUMA_BALANCING
781 	"numa_pte_updates",
782 	"numa_huge_pte_updates",
783 	"numa_hint_faults",
784 	"numa_hint_faults_local",
785 	"numa_pages_migrated",
786 #endif
787 #ifdef CONFIG_MIGRATION
788 	"pgmigrate_success",
789 	"pgmigrate_fail",
790 #endif
791 #ifdef CONFIG_COMPACTION
792 	"compact_migrate_scanned",
793 	"compact_free_scanned",
794 	"compact_isolated",
795 	"compact_stall",
796 	"compact_fail",
797 	"compact_success",
798 	"compact_daemon_wake",
799 #endif
800 
801 #ifdef CONFIG_HUGETLB_PAGE
802 	"htlb_buddy_alloc_success",
803 	"htlb_buddy_alloc_fail",
804 #endif
805 	"unevictable_pgs_culled",
806 	"unevictable_pgs_scanned",
807 	"unevictable_pgs_rescued",
808 	"unevictable_pgs_mlocked",
809 	"unevictable_pgs_munlocked",
810 	"unevictable_pgs_cleared",
811 	"unevictable_pgs_stranded",
812 
813 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
814 	"thp_fault_alloc",
815 	"thp_fault_fallback",
816 	"thp_collapse_alloc",
817 	"thp_collapse_alloc_failed",
818 	"thp_split_page",
819 	"thp_split_page_failed",
820 	"thp_deferred_split_page",
821 	"thp_split_pmd",
822 	"thp_zero_page_alloc",
823 	"thp_zero_page_alloc_failed",
824 #endif
825 #ifdef CONFIG_MEMORY_BALLOON
826 	"balloon_inflate",
827 	"balloon_deflate",
828 #ifdef CONFIG_BALLOON_COMPACTION
829 	"balloon_migrate",
830 #endif
831 #endif /* CONFIG_MEMORY_BALLOON */
832 #ifdef CONFIG_DEBUG_TLBFLUSH
833 #ifdef CONFIG_SMP
834 	"nr_tlb_remote_flush",
835 	"nr_tlb_remote_flush_received",
836 #endif /* CONFIG_SMP */
837 	"nr_tlb_local_flush_all",
838 	"nr_tlb_local_flush_one",
839 #endif /* CONFIG_DEBUG_TLBFLUSH */
840 
841 #ifdef CONFIG_DEBUG_VM_VMACACHE
842 	"vmacache_find_calls",
843 	"vmacache_find_hits",
844 	"vmacache_full_flushes",
845 #endif
846 #endif /* CONFIG_VM_EVENTS_COUNTERS */
847 };
848 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
849 
850 
851 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
852      defined(CONFIG_PROC_FS)
853 static void *frag_start(struct seq_file *m, loff_t *pos)
854 {
855 	pg_data_t *pgdat;
856 	loff_t node = *pos;
857 
858 	for (pgdat = first_online_pgdat();
859 	     pgdat && node;
860 	     pgdat = next_online_pgdat(pgdat))
861 		--node;
862 
863 	return pgdat;
864 }
865 
866 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
867 {
868 	pg_data_t *pgdat = (pg_data_t *)arg;
869 
870 	(*pos)++;
871 	return next_online_pgdat(pgdat);
872 }
873 
874 static void frag_stop(struct seq_file *m, void *arg)
875 {
876 }
877 
878 /* Walk all the zones in a node and print using a callback */
879 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
880 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
881 {
882 	struct zone *zone;
883 	struct zone *node_zones = pgdat->node_zones;
884 	unsigned long flags;
885 
886 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
887 		if (!populated_zone(zone))
888 			continue;
889 
890 		spin_lock_irqsave(&zone->lock, flags);
891 		print(m, pgdat, zone);
892 		spin_unlock_irqrestore(&zone->lock, flags);
893 	}
894 }
895 #endif
896 
897 #ifdef CONFIG_PROC_FS
898 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
899 						struct zone *zone)
900 {
901 	int order;
902 
903 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
904 	for (order = 0; order < MAX_ORDER; ++order)
905 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
906 	seq_putc(m, '\n');
907 }
908 
909 /*
910  * This walks the free areas for each zone.
911  */
912 static int frag_show(struct seq_file *m, void *arg)
913 {
914 	pg_data_t *pgdat = (pg_data_t *)arg;
915 	walk_zones_in_node(m, pgdat, frag_show_print);
916 	return 0;
917 }
918 
919 static void pagetypeinfo_showfree_print(struct seq_file *m,
920 					pg_data_t *pgdat, struct zone *zone)
921 {
922 	int order, mtype;
923 
924 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
925 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
926 					pgdat->node_id,
927 					zone->name,
928 					migratetype_names[mtype]);
929 		for (order = 0; order < MAX_ORDER; ++order) {
930 			unsigned long freecount = 0;
931 			struct free_area *area;
932 			struct list_head *curr;
933 
934 			area = &(zone->free_area[order]);
935 
936 			list_for_each(curr, &area->free_list[mtype])
937 				freecount++;
938 			seq_printf(m, "%6lu ", freecount);
939 		}
940 		seq_putc(m, '\n');
941 	}
942 }
943 
944 /* Print out the free pages at each order for each migatetype */
945 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
946 {
947 	int order;
948 	pg_data_t *pgdat = (pg_data_t *)arg;
949 
950 	/* Print header */
951 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
952 	for (order = 0; order < MAX_ORDER; ++order)
953 		seq_printf(m, "%6d ", order);
954 	seq_putc(m, '\n');
955 
956 	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
957 
958 	return 0;
959 }
960 
961 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
962 					pg_data_t *pgdat, struct zone *zone)
963 {
964 	int mtype;
965 	unsigned long pfn;
966 	unsigned long start_pfn = zone->zone_start_pfn;
967 	unsigned long end_pfn = zone_end_pfn(zone);
968 	unsigned long count[MIGRATE_TYPES] = { 0, };
969 
970 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
971 		struct page *page;
972 
973 		if (!pfn_valid(pfn))
974 			continue;
975 
976 		page = pfn_to_page(pfn);
977 
978 		/* Watch for unexpected holes punched in the memmap */
979 		if (!memmap_valid_within(pfn, page, zone))
980 			continue;
981 
982 		if (page_zone(page) != zone)
983 			continue;
984 
985 		mtype = get_pageblock_migratetype(page);
986 
987 		if (mtype < MIGRATE_TYPES)
988 			count[mtype]++;
989 	}
990 
991 	/* Print counts */
992 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
993 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
994 		seq_printf(m, "%12lu ", count[mtype]);
995 	seq_putc(m, '\n');
996 }
997 
998 /* Print out the free pages at each order for each migratetype */
999 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1000 {
1001 	int mtype;
1002 	pg_data_t *pgdat = (pg_data_t *)arg;
1003 
1004 	seq_printf(m, "\n%-23s", "Number of blocks type ");
1005 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1006 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1007 	seq_putc(m, '\n');
1008 	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1009 
1010 	return 0;
1011 }
1012 
1013 #ifdef CONFIG_PAGE_OWNER
1014 static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1015 							pg_data_t *pgdat,
1016 							struct zone *zone)
1017 {
1018 	struct page *page;
1019 	struct page_ext *page_ext;
1020 	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1021 	unsigned long end_pfn = pfn + zone->spanned_pages;
1022 	unsigned long count[MIGRATE_TYPES] = { 0, };
1023 	int pageblock_mt, page_mt;
1024 	int i;
1025 
1026 	/* Scan block by block. First and last block may be incomplete */
1027 	pfn = zone->zone_start_pfn;
1028 
1029 	/*
1030 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1031 	 * a zone boundary, it will be double counted between zones. This does
1032 	 * not matter as the mixed block count will still be correct
1033 	 */
1034 	for (; pfn < end_pfn; ) {
1035 		if (!pfn_valid(pfn)) {
1036 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1037 			continue;
1038 		}
1039 
1040 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1041 		block_end_pfn = min(block_end_pfn, end_pfn);
1042 
1043 		page = pfn_to_page(pfn);
1044 		pageblock_mt = get_pageblock_migratetype(page);
1045 
1046 		for (; pfn < block_end_pfn; pfn++) {
1047 			if (!pfn_valid_within(pfn))
1048 				continue;
1049 
1050 			page = pfn_to_page(pfn);
1051 
1052 			if (page_zone(page) != zone)
1053 				continue;
1054 
1055 			if (PageBuddy(page)) {
1056 				pfn += (1UL << page_order(page)) - 1;
1057 				continue;
1058 			}
1059 
1060 			if (PageReserved(page))
1061 				continue;
1062 
1063 			page_ext = lookup_page_ext(page);
1064 
1065 			if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1066 				continue;
1067 
1068 			page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1069 			if (pageblock_mt != page_mt) {
1070 				if (is_migrate_cma(pageblock_mt))
1071 					count[MIGRATE_MOVABLE]++;
1072 				else
1073 					count[pageblock_mt]++;
1074 
1075 				pfn = block_end_pfn;
1076 				break;
1077 			}
1078 			pfn += (1UL << page_ext->order) - 1;
1079 		}
1080 	}
1081 
1082 	/* Print counts */
1083 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1084 	for (i = 0; i < MIGRATE_TYPES; i++)
1085 		seq_printf(m, "%12lu ", count[i]);
1086 	seq_putc(m, '\n');
1087 }
1088 #endif /* CONFIG_PAGE_OWNER */
1089 
1090 /*
1091  * Print out the number of pageblocks for each migratetype that contain pages
1092  * of other types. This gives an indication of how well fallbacks are being
1093  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1094  * to determine what is going on
1095  */
1096 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1097 {
1098 #ifdef CONFIG_PAGE_OWNER
1099 	int mtype;
1100 
1101 	if (!static_branch_unlikely(&page_owner_inited))
1102 		return;
1103 
1104 	drain_all_pages(NULL);
1105 
1106 	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1107 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1108 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1109 	seq_putc(m, '\n');
1110 
1111 	walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1112 #endif /* CONFIG_PAGE_OWNER */
1113 }
1114 
1115 /*
1116  * This prints out statistics in relation to grouping pages by mobility.
1117  * It is expensive to collect so do not constantly read the file.
1118  */
1119 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1120 {
1121 	pg_data_t *pgdat = (pg_data_t *)arg;
1122 
1123 	/* check memoryless node */
1124 	if (!node_state(pgdat->node_id, N_MEMORY))
1125 		return 0;
1126 
1127 	seq_printf(m, "Page block order: %d\n", pageblock_order);
1128 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1129 	seq_putc(m, '\n');
1130 	pagetypeinfo_showfree(m, pgdat);
1131 	pagetypeinfo_showblockcount(m, pgdat);
1132 	pagetypeinfo_showmixedcount(m, pgdat);
1133 
1134 	return 0;
1135 }
1136 
1137 static const struct seq_operations fragmentation_op = {
1138 	.start	= frag_start,
1139 	.next	= frag_next,
1140 	.stop	= frag_stop,
1141 	.show	= frag_show,
1142 };
1143 
1144 static int fragmentation_open(struct inode *inode, struct file *file)
1145 {
1146 	return seq_open(file, &fragmentation_op);
1147 }
1148 
1149 static const struct file_operations fragmentation_file_operations = {
1150 	.open		= fragmentation_open,
1151 	.read		= seq_read,
1152 	.llseek		= seq_lseek,
1153 	.release	= seq_release,
1154 };
1155 
1156 static const struct seq_operations pagetypeinfo_op = {
1157 	.start	= frag_start,
1158 	.next	= frag_next,
1159 	.stop	= frag_stop,
1160 	.show	= pagetypeinfo_show,
1161 };
1162 
1163 static int pagetypeinfo_open(struct inode *inode, struct file *file)
1164 {
1165 	return seq_open(file, &pagetypeinfo_op);
1166 }
1167 
1168 static const struct file_operations pagetypeinfo_file_ops = {
1169 	.open		= pagetypeinfo_open,
1170 	.read		= seq_read,
1171 	.llseek		= seq_lseek,
1172 	.release	= seq_release,
1173 };
1174 
1175 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1176 							struct zone *zone)
1177 {
1178 	int i;
1179 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1180 	seq_printf(m,
1181 		   "\n  pages free     %lu"
1182 		   "\n        min      %lu"
1183 		   "\n        low      %lu"
1184 		   "\n        high     %lu"
1185 		   "\n        scanned  %lu"
1186 		   "\n        spanned  %lu"
1187 		   "\n        present  %lu"
1188 		   "\n        managed  %lu",
1189 		   zone_page_state(zone, NR_FREE_PAGES),
1190 		   min_wmark_pages(zone),
1191 		   low_wmark_pages(zone),
1192 		   high_wmark_pages(zone),
1193 		   zone_page_state(zone, NR_PAGES_SCANNED),
1194 		   zone->spanned_pages,
1195 		   zone->present_pages,
1196 		   zone->managed_pages);
1197 
1198 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1199 		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
1200 				zone_page_state(zone, i));
1201 
1202 	seq_printf(m,
1203 		   "\n        protection: (%ld",
1204 		   zone->lowmem_reserve[0]);
1205 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1206 		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1207 	seq_printf(m,
1208 		   ")"
1209 		   "\n  pagesets");
1210 	for_each_online_cpu(i) {
1211 		struct per_cpu_pageset *pageset;
1212 
1213 		pageset = per_cpu_ptr(zone->pageset, i);
1214 		seq_printf(m,
1215 			   "\n    cpu: %i"
1216 			   "\n              count: %i"
1217 			   "\n              high:  %i"
1218 			   "\n              batch: %i",
1219 			   i,
1220 			   pageset->pcp.count,
1221 			   pageset->pcp.high,
1222 			   pageset->pcp.batch);
1223 #ifdef CONFIG_SMP
1224 		seq_printf(m, "\n  vm stats threshold: %d",
1225 				pageset->stat_threshold);
1226 #endif
1227 	}
1228 	seq_printf(m,
1229 		   "\n  all_unreclaimable: %u"
1230 		   "\n  start_pfn:         %lu"
1231 		   "\n  inactive_ratio:    %u",
1232 		   !zone_reclaimable(zone),
1233 		   zone->zone_start_pfn,
1234 		   zone->inactive_ratio);
1235 	seq_putc(m, '\n');
1236 }
1237 
1238 /*
1239  * Output information about zones in @pgdat.
1240  */
1241 static int zoneinfo_show(struct seq_file *m, void *arg)
1242 {
1243 	pg_data_t *pgdat = (pg_data_t *)arg;
1244 	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1245 	return 0;
1246 }
1247 
1248 static const struct seq_operations zoneinfo_op = {
1249 	.start	= frag_start, /* iterate over all zones. The same as in
1250 			       * fragmentation. */
1251 	.next	= frag_next,
1252 	.stop	= frag_stop,
1253 	.show	= zoneinfo_show,
1254 };
1255 
1256 static int zoneinfo_open(struct inode *inode, struct file *file)
1257 {
1258 	return seq_open(file, &zoneinfo_op);
1259 }
1260 
1261 static const struct file_operations proc_zoneinfo_file_operations = {
1262 	.open		= zoneinfo_open,
1263 	.read		= seq_read,
1264 	.llseek		= seq_lseek,
1265 	.release	= seq_release,
1266 };
1267 
1268 enum writeback_stat_item {
1269 	NR_DIRTY_THRESHOLD,
1270 	NR_DIRTY_BG_THRESHOLD,
1271 	NR_VM_WRITEBACK_STAT_ITEMS,
1272 };
1273 
1274 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1275 {
1276 	unsigned long *v;
1277 	int i, stat_items_size;
1278 
1279 	if (*pos >= ARRAY_SIZE(vmstat_text))
1280 		return NULL;
1281 	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1282 			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1283 
1284 #ifdef CONFIG_VM_EVENT_COUNTERS
1285 	stat_items_size += sizeof(struct vm_event_state);
1286 #endif
1287 
1288 	v = kmalloc(stat_items_size, GFP_KERNEL);
1289 	m->private = v;
1290 	if (!v)
1291 		return ERR_PTR(-ENOMEM);
1292 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1293 		v[i] = global_page_state(i);
1294 	v += NR_VM_ZONE_STAT_ITEMS;
1295 
1296 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1297 			    v + NR_DIRTY_THRESHOLD);
1298 	v += NR_VM_WRITEBACK_STAT_ITEMS;
1299 
1300 #ifdef CONFIG_VM_EVENT_COUNTERS
1301 	all_vm_events(v);
1302 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1303 	v[PGPGOUT] /= 2;
1304 #endif
1305 	return (unsigned long *)m->private + *pos;
1306 }
1307 
1308 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1309 {
1310 	(*pos)++;
1311 	if (*pos >= ARRAY_SIZE(vmstat_text))
1312 		return NULL;
1313 	return (unsigned long *)m->private + *pos;
1314 }
1315 
1316 static int vmstat_show(struct seq_file *m, void *arg)
1317 {
1318 	unsigned long *l = arg;
1319 	unsigned long off = l - (unsigned long *)m->private;
1320 
1321 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1322 	return 0;
1323 }
1324 
1325 static void vmstat_stop(struct seq_file *m, void *arg)
1326 {
1327 	kfree(m->private);
1328 	m->private = NULL;
1329 }
1330 
1331 static const struct seq_operations vmstat_op = {
1332 	.start	= vmstat_start,
1333 	.next	= vmstat_next,
1334 	.stop	= vmstat_stop,
1335 	.show	= vmstat_show,
1336 };
1337 
1338 static int vmstat_open(struct inode *inode, struct file *file)
1339 {
1340 	return seq_open(file, &vmstat_op);
1341 }
1342 
1343 static const struct file_operations proc_vmstat_file_operations = {
1344 	.open		= vmstat_open,
1345 	.read		= seq_read,
1346 	.llseek		= seq_lseek,
1347 	.release	= seq_release,
1348 };
1349 #endif /* CONFIG_PROC_FS */
1350 
1351 #ifdef CONFIG_SMP
1352 static struct workqueue_struct *vmstat_wq;
1353 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1354 int sysctl_stat_interval __read_mostly = HZ;
1355 
1356 #ifdef CONFIG_PROC_FS
1357 static void refresh_vm_stats(struct work_struct *work)
1358 {
1359 	refresh_cpu_vm_stats(true);
1360 }
1361 
1362 int vmstat_refresh(struct ctl_table *table, int write,
1363 		   void __user *buffer, size_t *lenp, loff_t *ppos)
1364 {
1365 	long val;
1366 	int err;
1367 	int i;
1368 
1369 	/*
1370 	 * The regular update, every sysctl_stat_interval, may come later
1371 	 * than expected: leaving a significant amount in per_cpu buckets.
1372 	 * This is particularly misleading when checking a quantity of HUGE
1373 	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1374 	 * which can equally be echo'ed to or cat'ted from (by root),
1375 	 * can be used to update the stats just before reading them.
1376 	 *
1377 	 * Oh, and since global_page_state() etc. are so careful to hide
1378 	 * transiently negative values, report an error here if any of
1379 	 * the stats is negative, so we know to go looking for imbalance.
1380 	 */
1381 	err = schedule_on_each_cpu(refresh_vm_stats);
1382 	if (err)
1383 		return err;
1384 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1385 		val = atomic_long_read(&vm_stat[i]);
1386 		if (val < 0) {
1387 			switch (i) {
1388 			case NR_ALLOC_BATCH:
1389 			case NR_PAGES_SCANNED:
1390 				/*
1391 				 * These are often seen to go negative in
1392 				 * recent kernels, but not to go permanently
1393 				 * negative.  Whilst it would be nicer not to
1394 				 * have exceptions, rooting them out would be
1395 				 * another task, of rather low priority.
1396 				 */
1397 				break;
1398 			default:
1399 				pr_warn("%s: %s %ld\n",
1400 					__func__, vmstat_text[i], val);
1401 				err = -EINVAL;
1402 				break;
1403 			}
1404 		}
1405 	}
1406 	if (err)
1407 		return err;
1408 	if (write)
1409 		*ppos += *lenp;
1410 	else
1411 		*lenp = 0;
1412 	return 0;
1413 }
1414 #endif /* CONFIG_PROC_FS */
1415 
1416 static void vmstat_update(struct work_struct *w)
1417 {
1418 	if (refresh_cpu_vm_stats(true)) {
1419 		/*
1420 		 * Counters were updated so we expect more updates
1421 		 * to occur in the future. Keep on running the
1422 		 * update worker thread.
1423 		 */
1424 		queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1425 				this_cpu_ptr(&vmstat_work),
1426 				round_jiffies_relative(sysctl_stat_interval));
1427 	}
1428 }
1429 
1430 /*
1431  * Switch off vmstat processing and then fold all the remaining differentials
1432  * until the diffs stay at zero. The function is used by NOHZ and can only be
1433  * invoked when tick processing is not active.
1434  */
1435 /*
1436  * Check if the diffs for a certain cpu indicate that
1437  * an update is needed.
1438  */
1439 static bool need_update(int cpu)
1440 {
1441 	struct zone *zone;
1442 
1443 	for_each_populated_zone(zone) {
1444 		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1445 
1446 		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1447 		/*
1448 		 * The fast way of checking if there are any vmstat diffs.
1449 		 * This works because the diffs are byte sized items.
1450 		 */
1451 		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1452 			return true;
1453 
1454 	}
1455 	return false;
1456 }
1457 
1458 /*
1459  * Switch off vmstat processing and then fold all the remaining differentials
1460  * until the diffs stay at zero. The function is used by NOHZ and can only be
1461  * invoked when tick processing is not active.
1462  */
1463 void quiet_vmstat(void)
1464 {
1465 	if (system_state != SYSTEM_RUNNING)
1466 		return;
1467 
1468 	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1469 		return;
1470 
1471 	if (!need_update(smp_processor_id()))
1472 		return;
1473 
1474 	/*
1475 	 * Just refresh counters and do not care about the pending delayed
1476 	 * vmstat_update. It doesn't fire that often to matter and canceling
1477 	 * it would be too expensive from this path.
1478 	 * vmstat_shepherd will take care about that for us.
1479 	 */
1480 	refresh_cpu_vm_stats(false);
1481 }
1482 
1483 /*
1484  * Shepherd worker thread that checks the
1485  * differentials of processors that have their worker
1486  * threads for vm statistics updates disabled because of
1487  * inactivity.
1488  */
1489 static void vmstat_shepherd(struct work_struct *w);
1490 
1491 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1492 
1493 static void vmstat_shepherd(struct work_struct *w)
1494 {
1495 	int cpu;
1496 
1497 	get_online_cpus();
1498 	/* Check processors whose vmstat worker threads have been disabled */
1499 	for_each_online_cpu(cpu) {
1500 		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1501 
1502 		if (!delayed_work_pending(dw) && need_update(cpu))
1503 			queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
1504 	}
1505 	put_online_cpus();
1506 
1507 	schedule_delayed_work(&shepherd,
1508 		round_jiffies_relative(sysctl_stat_interval));
1509 }
1510 
1511 static void __init start_shepherd_timer(void)
1512 {
1513 	int cpu;
1514 
1515 	for_each_possible_cpu(cpu)
1516 		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1517 			vmstat_update);
1518 
1519 	vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1520 	schedule_delayed_work(&shepherd,
1521 		round_jiffies_relative(sysctl_stat_interval));
1522 }
1523 
1524 static void vmstat_cpu_dead(int node)
1525 {
1526 	int cpu;
1527 
1528 	get_online_cpus();
1529 	for_each_online_cpu(cpu)
1530 		if (cpu_to_node(cpu) == node)
1531 			goto end;
1532 
1533 	node_clear_state(node, N_CPU);
1534 end:
1535 	put_online_cpus();
1536 }
1537 
1538 /*
1539  * Use the cpu notifier to insure that the thresholds are recalculated
1540  * when necessary.
1541  */
1542 static int vmstat_cpuup_callback(struct notifier_block *nfb,
1543 		unsigned long action,
1544 		void *hcpu)
1545 {
1546 	long cpu = (long)hcpu;
1547 
1548 	switch (action) {
1549 	case CPU_ONLINE:
1550 	case CPU_ONLINE_FROZEN:
1551 		refresh_zone_stat_thresholds();
1552 		node_set_state(cpu_to_node(cpu), N_CPU);
1553 		break;
1554 	case CPU_DOWN_PREPARE:
1555 	case CPU_DOWN_PREPARE_FROZEN:
1556 		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1557 		break;
1558 	case CPU_DOWN_FAILED:
1559 	case CPU_DOWN_FAILED_FROZEN:
1560 		break;
1561 	case CPU_DEAD:
1562 	case CPU_DEAD_FROZEN:
1563 		refresh_zone_stat_thresholds();
1564 		vmstat_cpu_dead(cpu_to_node(cpu));
1565 		break;
1566 	default:
1567 		break;
1568 	}
1569 	return NOTIFY_OK;
1570 }
1571 
1572 static struct notifier_block vmstat_notifier =
1573 	{ &vmstat_cpuup_callback, NULL, 0 };
1574 #endif
1575 
1576 static int __init setup_vmstat(void)
1577 {
1578 #ifdef CONFIG_SMP
1579 	cpu_notifier_register_begin();
1580 	__register_cpu_notifier(&vmstat_notifier);
1581 
1582 	start_shepherd_timer();
1583 	cpu_notifier_register_done();
1584 #endif
1585 #ifdef CONFIG_PROC_FS
1586 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1587 	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1588 	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1589 	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1590 #endif
1591 	return 0;
1592 }
1593 module_init(setup_vmstat)
1594 
1595 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1596 
1597 /*
1598  * Return an index indicating how much of the available free memory is
1599  * unusable for an allocation of the requested size.
1600  */
1601 static int unusable_free_index(unsigned int order,
1602 				struct contig_page_info *info)
1603 {
1604 	/* No free memory is interpreted as all free memory is unusable */
1605 	if (info->free_pages == 0)
1606 		return 1000;
1607 
1608 	/*
1609 	 * Index should be a value between 0 and 1. Return a value to 3
1610 	 * decimal places.
1611 	 *
1612 	 * 0 => no fragmentation
1613 	 * 1 => high fragmentation
1614 	 */
1615 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1616 
1617 }
1618 
1619 static void unusable_show_print(struct seq_file *m,
1620 					pg_data_t *pgdat, struct zone *zone)
1621 {
1622 	unsigned int order;
1623 	int index;
1624 	struct contig_page_info info;
1625 
1626 	seq_printf(m, "Node %d, zone %8s ",
1627 				pgdat->node_id,
1628 				zone->name);
1629 	for (order = 0; order < MAX_ORDER; ++order) {
1630 		fill_contig_page_info(zone, order, &info);
1631 		index = unusable_free_index(order, &info);
1632 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1633 	}
1634 
1635 	seq_putc(m, '\n');
1636 }
1637 
1638 /*
1639  * Display unusable free space index
1640  *
1641  * The unusable free space index measures how much of the available free
1642  * memory cannot be used to satisfy an allocation of a given size and is a
1643  * value between 0 and 1. The higher the value, the more of free memory is
1644  * unusable and by implication, the worse the external fragmentation is. This
1645  * can be expressed as a percentage by multiplying by 100.
1646  */
1647 static int unusable_show(struct seq_file *m, void *arg)
1648 {
1649 	pg_data_t *pgdat = (pg_data_t *)arg;
1650 
1651 	/* check memoryless node */
1652 	if (!node_state(pgdat->node_id, N_MEMORY))
1653 		return 0;
1654 
1655 	walk_zones_in_node(m, pgdat, unusable_show_print);
1656 
1657 	return 0;
1658 }
1659 
1660 static const struct seq_operations unusable_op = {
1661 	.start	= frag_start,
1662 	.next	= frag_next,
1663 	.stop	= frag_stop,
1664 	.show	= unusable_show,
1665 };
1666 
1667 static int unusable_open(struct inode *inode, struct file *file)
1668 {
1669 	return seq_open(file, &unusable_op);
1670 }
1671 
1672 static const struct file_operations unusable_file_ops = {
1673 	.open		= unusable_open,
1674 	.read		= seq_read,
1675 	.llseek		= seq_lseek,
1676 	.release	= seq_release,
1677 };
1678 
1679 static void extfrag_show_print(struct seq_file *m,
1680 					pg_data_t *pgdat, struct zone *zone)
1681 {
1682 	unsigned int order;
1683 	int index;
1684 
1685 	/* Alloc on stack as interrupts are disabled for zone walk */
1686 	struct contig_page_info info;
1687 
1688 	seq_printf(m, "Node %d, zone %8s ",
1689 				pgdat->node_id,
1690 				zone->name);
1691 	for (order = 0; order < MAX_ORDER; ++order) {
1692 		fill_contig_page_info(zone, order, &info);
1693 		index = __fragmentation_index(order, &info);
1694 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1695 	}
1696 
1697 	seq_putc(m, '\n');
1698 }
1699 
1700 /*
1701  * Display fragmentation index for orders that allocations would fail for
1702  */
1703 static int extfrag_show(struct seq_file *m, void *arg)
1704 {
1705 	pg_data_t *pgdat = (pg_data_t *)arg;
1706 
1707 	walk_zones_in_node(m, pgdat, extfrag_show_print);
1708 
1709 	return 0;
1710 }
1711 
1712 static const struct seq_operations extfrag_op = {
1713 	.start	= frag_start,
1714 	.next	= frag_next,
1715 	.stop	= frag_stop,
1716 	.show	= extfrag_show,
1717 };
1718 
1719 static int extfrag_open(struct inode *inode, struct file *file)
1720 {
1721 	return seq_open(file, &extfrag_op);
1722 }
1723 
1724 static const struct file_operations extfrag_file_ops = {
1725 	.open		= extfrag_open,
1726 	.read		= seq_read,
1727 	.llseek		= seq_lseek,
1728 	.release	= seq_release,
1729 };
1730 
1731 static int __init extfrag_debug_init(void)
1732 {
1733 	struct dentry *extfrag_debug_root;
1734 
1735 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1736 	if (!extfrag_debug_root)
1737 		return -ENOMEM;
1738 
1739 	if (!debugfs_create_file("unusable_index", 0444,
1740 			extfrag_debug_root, NULL, &unusable_file_ops))
1741 		goto fail;
1742 
1743 	if (!debugfs_create_file("extfrag_index", 0444,
1744 			extfrag_debug_root, NULL, &extfrag_file_ops))
1745 		goto fail;
1746 
1747 	return 0;
1748 fail:
1749 	debugfs_remove_recursive(extfrag_debug_root);
1750 	return -ENOMEM;
1751 }
1752 
1753 module_init(extfrag_debug_init);
1754 #endif
1755