xref: /openbmc/linux/mm/vmstat.c (revision 110e6f26)
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *		Christoph Lameter <christoph@lameter.com>
10  *  Copyright (C) 2008-2014 Christoph Lameter
11  */
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/cpu.h>
18 #include <linux/cpumask.h>
19 #include <linux/vmstat.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/debugfs.h>
23 #include <linux/sched.h>
24 #include <linux/math64.h>
25 #include <linux/writeback.h>
26 #include <linux/compaction.h>
27 #include <linux/mm_inline.h>
28 #include <linux/page_ext.h>
29 #include <linux/page_owner.h>
30 
31 #include "internal.h"
32 
33 #ifdef CONFIG_VM_EVENT_COUNTERS
34 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
35 EXPORT_PER_CPU_SYMBOL(vm_event_states);
36 
37 static void sum_vm_events(unsigned long *ret)
38 {
39 	int cpu;
40 	int i;
41 
42 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
43 
44 	for_each_online_cpu(cpu) {
45 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
46 
47 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
48 			ret[i] += this->event[i];
49 	}
50 }
51 
52 /*
53  * Accumulate the vm event counters across all CPUs.
54  * The result is unavoidably approximate - it can change
55  * during and after execution of this function.
56 */
57 void all_vm_events(unsigned long *ret)
58 {
59 	get_online_cpus();
60 	sum_vm_events(ret);
61 	put_online_cpus();
62 }
63 EXPORT_SYMBOL_GPL(all_vm_events);
64 
65 /*
66  * Fold the foreign cpu events into our own.
67  *
68  * This is adding to the events on one processor
69  * but keeps the global counts constant.
70  */
71 void vm_events_fold_cpu(int cpu)
72 {
73 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
74 	int i;
75 
76 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
77 		count_vm_events(i, fold_state->event[i]);
78 		fold_state->event[i] = 0;
79 	}
80 }
81 
82 #endif /* CONFIG_VM_EVENT_COUNTERS */
83 
84 /*
85  * Manage combined zone based / global counters
86  *
87  * vm_stat contains the global counters
88  */
89 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
90 EXPORT_SYMBOL(vm_stat);
91 
92 #ifdef CONFIG_SMP
93 
94 int calculate_pressure_threshold(struct zone *zone)
95 {
96 	int threshold;
97 	int watermark_distance;
98 
99 	/*
100 	 * As vmstats are not up to date, there is drift between the estimated
101 	 * and real values. For high thresholds and a high number of CPUs, it
102 	 * is possible for the min watermark to be breached while the estimated
103 	 * value looks fine. The pressure threshold is a reduced value such
104 	 * that even the maximum amount of drift will not accidentally breach
105 	 * the min watermark
106 	 */
107 	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
108 	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
109 
110 	/*
111 	 * Maximum threshold is 125
112 	 */
113 	threshold = min(125, threshold);
114 
115 	return threshold;
116 }
117 
118 int calculate_normal_threshold(struct zone *zone)
119 {
120 	int threshold;
121 	int mem;	/* memory in 128 MB units */
122 
123 	/*
124 	 * The threshold scales with the number of processors and the amount
125 	 * of memory per zone. More memory means that we can defer updates for
126 	 * longer, more processors could lead to more contention.
127  	 * fls() is used to have a cheap way of logarithmic scaling.
128 	 *
129 	 * Some sample thresholds:
130 	 *
131 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
132 	 * ------------------------------------------------------------------
133 	 * 8		1		1	0.9-1 GB	4
134 	 * 16		2		2	0.9-1 GB	4
135 	 * 20 		2		2	1-2 GB		5
136 	 * 24		2		2	2-4 GB		6
137 	 * 28		2		2	4-8 GB		7
138 	 * 32		2		2	8-16 GB		8
139 	 * 4		2		2	<128M		1
140 	 * 30		4		3	2-4 GB		5
141 	 * 48		4		3	8-16 GB		8
142 	 * 32		8		4	1-2 GB		4
143 	 * 32		8		4	0.9-1GB		4
144 	 * 10		16		5	<128M		1
145 	 * 40		16		5	900M		4
146 	 * 70		64		7	2-4 GB		5
147 	 * 84		64		7	4-8 GB		6
148 	 * 108		512		9	4-8 GB		6
149 	 * 125		1024		10	8-16 GB		8
150 	 * 125		1024		10	16-32 GB	9
151 	 */
152 
153 	mem = zone->managed_pages >> (27 - PAGE_SHIFT);
154 
155 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
156 
157 	/*
158 	 * Maximum threshold is 125
159 	 */
160 	threshold = min(125, threshold);
161 
162 	return threshold;
163 }
164 
165 /*
166  * Refresh the thresholds for each zone.
167  */
168 void refresh_zone_stat_thresholds(void)
169 {
170 	struct zone *zone;
171 	int cpu;
172 	int threshold;
173 
174 	for_each_populated_zone(zone) {
175 		unsigned long max_drift, tolerate_drift;
176 
177 		threshold = calculate_normal_threshold(zone);
178 
179 		for_each_online_cpu(cpu)
180 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
181 							= threshold;
182 
183 		/*
184 		 * Only set percpu_drift_mark if there is a danger that
185 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
186 		 * the min watermark could be breached by an allocation
187 		 */
188 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
189 		max_drift = num_online_cpus() * threshold;
190 		if (max_drift > tolerate_drift)
191 			zone->percpu_drift_mark = high_wmark_pages(zone) +
192 					max_drift;
193 	}
194 }
195 
196 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
197 				int (*calculate_pressure)(struct zone *))
198 {
199 	struct zone *zone;
200 	int cpu;
201 	int threshold;
202 	int i;
203 
204 	for (i = 0; i < pgdat->nr_zones; i++) {
205 		zone = &pgdat->node_zones[i];
206 		if (!zone->percpu_drift_mark)
207 			continue;
208 
209 		threshold = (*calculate_pressure)(zone);
210 		for_each_online_cpu(cpu)
211 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
212 							= threshold;
213 	}
214 }
215 
216 /*
217  * For use when we know that interrupts are disabled,
218  * or when we know that preemption is disabled and that
219  * particular counter cannot be updated from interrupt context.
220  */
221 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
222 			   long delta)
223 {
224 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
225 	s8 __percpu *p = pcp->vm_stat_diff + item;
226 	long x;
227 	long t;
228 
229 	x = delta + __this_cpu_read(*p);
230 
231 	t = __this_cpu_read(pcp->stat_threshold);
232 
233 	if (unlikely(x > t || x < -t)) {
234 		zone_page_state_add(x, zone, item);
235 		x = 0;
236 	}
237 	__this_cpu_write(*p, x);
238 }
239 EXPORT_SYMBOL(__mod_zone_page_state);
240 
241 /*
242  * Optimized increment and decrement functions.
243  *
244  * These are only for a single page and therefore can take a struct page *
245  * argument instead of struct zone *. This allows the inclusion of the code
246  * generated for page_zone(page) into the optimized functions.
247  *
248  * No overflow check is necessary and therefore the differential can be
249  * incremented or decremented in place which may allow the compilers to
250  * generate better code.
251  * The increment or decrement is known and therefore one boundary check can
252  * be omitted.
253  *
254  * NOTE: These functions are very performance sensitive. Change only
255  * with care.
256  *
257  * Some processors have inc/dec instructions that are atomic vs an interrupt.
258  * However, the code must first determine the differential location in a zone
259  * based on the processor number and then inc/dec the counter. There is no
260  * guarantee without disabling preemption that the processor will not change
261  * in between and therefore the atomicity vs. interrupt cannot be exploited
262  * in a useful way here.
263  */
264 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
265 {
266 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
267 	s8 __percpu *p = pcp->vm_stat_diff + item;
268 	s8 v, t;
269 
270 	v = __this_cpu_inc_return(*p);
271 	t = __this_cpu_read(pcp->stat_threshold);
272 	if (unlikely(v > t)) {
273 		s8 overstep = t >> 1;
274 
275 		zone_page_state_add(v + overstep, zone, item);
276 		__this_cpu_write(*p, -overstep);
277 	}
278 }
279 
280 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
281 {
282 	__inc_zone_state(page_zone(page), item);
283 }
284 EXPORT_SYMBOL(__inc_zone_page_state);
285 
286 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
287 {
288 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
289 	s8 __percpu *p = pcp->vm_stat_diff + item;
290 	s8 v, t;
291 
292 	v = __this_cpu_dec_return(*p);
293 	t = __this_cpu_read(pcp->stat_threshold);
294 	if (unlikely(v < - t)) {
295 		s8 overstep = t >> 1;
296 
297 		zone_page_state_add(v - overstep, zone, item);
298 		__this_cpu_write(*p, overstep);
299 	}
300 }
301 
302 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
303 {
304 	__dec_zone_state(page_zone(page), item);
305 }
306 EXPORT_SYMBOL(__dec_zone_page_state);
307 
308 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
309 /*
310  * If we have cmpxchg_local support then we do not need to incur the overhead
311  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
312  *
313  * mod_state() modifies the zone counter state through atomic per cpu
314  * operations.
315  *
316  * Overstep mode specifies how overstep should handled:
317  *     0       No overstepping
318  *     1       Overstepping half of threshold
319  *     -1      Overstepping minus half of threshold
320 */
321 static inline void mod_state(struct zone *zone, enum zone_stat_item item,
322 			     long delta, int overstep_mode)
323 {
324 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
325 	s8 __percpu *p = pcp->vm_stat_diff + item;
326 	long o, n, t, z;
327 
328 	do {
329 		z = 0;  /* overflow to zone counters */
330 
331 		/*
332 		 * The fetching of the stat_threshold is racy. We may apply
333 		 * a counter threshold to the wrong the cpu if we get
334 		 * rescheduled while executing here. However, the next
335 		 * counter update will apply the threshold again and
336 		 * therefore bring the counter under the threshold again.
337 		 *
338 		 * Most of the time the thresholds are the same anyways
339 		 * for all cpus in a zone.
340 		 */
341 		t = this_cpu_read(pcp->stat_threshold);
342 
343 		o = this_cpu_read(*p);
344 		n = delta + o;
345 
346 		if (n > t || n < -t) {
347 			int os = overstep_mode * (t >> 1) ;
348 
349 			/* Overflow must be added to zone counters */
350 			z = n + os;
351 			n = -os;
352 		}
353 	} while (this_cpu_cmpxchg(*p, o, n) != o);
354 
355 	if (z)
356 		zone_page_state_add(z, zone, item);
357 }
358 
359 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
360 			 long delta)
361 {
362 	mod_state(zone, item, delta, 0);
363 }
364 EXPORT_SYMBOL(mod_zone_page_state);
365 
366 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
367 {
368 	mod_state(zone, item, 1, 1);
369 }
370 
371 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
372 {
373 	mod_state(page_zone(page), item, 1, 1);
374 }
375 EXPORT_SYMBOL(inc_zone_page_state);
376 
377 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
378 {
379 	mod_state(page_zone(page), item, -1, -1);
380 }
381 EXPORT_SYMBOL(dec_zone_page_state);
382 #else
383 /*
384  * Use interrupt disable to serialize counter updates
385  */
386 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
387 			 long delta)
388 {
389 	unsigned long flags;
390 
391 	local_irq_save(flags);
392 	__mod_zone_page_state(zone, item, delta);
393 	local_irq_restore(flags);
394 }
395 EXPORT_SYMBOL(mod_zone_page_state);
396 
397 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
398 {
399 	unsigned long flags;
400 
401 	local_irq_save(flags);
402 	__inc_zone_state(zone, item);
403 	local_irq_restore(flags);
404 }
405 
406 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
407 {
408 	unsigned long flags;
409 	struct zone *zone;
410 
411 	zone = page_zone(page);
412 	local_irq_save(flags);
413 	__inc_zone_state(zone, item);
414 	local_irq_restore(flags);
415 }
416 EXPORT_SYMBOL(inc_zone_page_state);
417 
418 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
419 {
420 	unsigned long flags;
421 
422 	local_irq_save(flags);
423 	__dec_zone_page_state(page, item);
424 	local_irq_restore(flags);
425 }
426 EXPORT_SYMBOL(dec_zone_page_state);
427 #endif
428 
429 
430 /*
431  * Fold a differential into the global counters.
432  * Returns the number of counters updated.
433  */
434 static int fold_diff(int *diff)
435 {
436 	int i;
437 	int changes = 0;
438 
439 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
440 		if (diff[i]) {
441 			atomic_long_add(diff[i], &vm_stat[i]);
442 			changes++;
443 	}
444 	return changes;
445 }
446 
447 /*
448  * Update the zone counters for the current cpu.
449  *
450  * Note that refresh_cpu_vm_stats strives to only access
451  * node local memory. The per cpu pagesets on remote zones are placed
452  * in the memory local to the processor using that pageset. So the
453  * loop over all zones will access a series of cachelines local to
454  * the processor.
455  *
456  * The call to zone_page_state_add updates the cachelines with the
457  * statistics in the remote zone struct as well as the global cachelines
458  * with the global counters. These could cause remote node cache line
459  * bouncing and will have to be only done when necessary.
460  *
461  * The function returns the number of global counters updated.
462  */
463 static int refresh_cpu_vm_stats(bool do_pagesets)
464 {
465 	struct zone *zone;
466 	int i;
467 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
468 	int changes = 0;
469 
470 	for_each_populated_zone(zone) {
471 		struct per_cpu_pageset __percpu *p = zone->pageset;
472 
473 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
474 			int v;
475 
476 			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
477 			if (v) {
478 
479 				atomic_long_add(v, &zone->vm_stat[i]);
480 				global_diff[i] += v;
481 #ifdef CONFIG_NUMA
482 				/* 3 seconds idle till flush */
483 				__this_cpu_write(p->expire, 3);
484 #endif
485 			}
486 		}
487 #ifdef CONFIG_NUMA
488 		if (do_pagesets) {
489 			cond_resched();
490 			/*
491 			 * Deal with draining the remote pageset of this
492 			 * processor
493 			 *
494 			 * Check if there are pages remaining in this pageset
495 			 * if not then there is nothing to expire.
496 			 */
497 			if (!__this_cpu_read(p->expire) ||
498 			       !__this_cpu_read(p->pcp.count))
499 				continue;
500 
501 			/*
502 			 * We never drain zones local to this processor.
503 			 */
504 			if (zone_to_nid(zone) == numa_node_id()) {
505 				__this_cpu_write(p->expire, 0);
506 				continue;
507 			}
508 
509 			if (__this_cpu_dec_return(p->expire))
510 				continue;
511 
512 			if (__this_cpu_read(p->pcp.count)) {
513 				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
514 				changes++;
515 			}
516 		}
517 #endif
518 	}
519 	changes += fold_diff(global_diff);
520 	return changes;
521 }
522 
523 /*
524  * Fold the data for an offline cpu into the global array.
525  * There cannot be any access by the offline cpu and therefore
526  * synchronization is simplified.
527  */
528 void cpu_vm_stats_fold(int cpu)
529 {
530 	struct zone *zone;
531 	int i;
532 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
533 
534 	for_each_populated_zone(zone) {
535 		struct per_cpu_pageset *p;
536 
537 		p = per_cpu_ptr(zone->pageset, cpu);
538 
539 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
540 			if (p->vm_stat_diff[i]) {
541 				int v;
542 
543 				v = p->vm_stat_diff[i];
544 				p->vm_stat_diff[i] = 0;
545 				atomic_long_add(v, &zone->vm_stat[i]);
546 				global_diff[i] += v;
547 			}
548 	}
549 
550 	fold_diff(global_diff);
551 }
552 
553 /*
554  * this is only called if !populated_zone(zone), which implies no other users of
555  * pset->vm_stat_diff[] exsist.
556  */
557 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
558 {
559 	int i;
560 
561 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
562 		if (pset->vm_stat_diff[i]) {
563 			int v = pset->vm_stat_diff[i];
564 			pset->vm_stat_diff[i] = 0;
565 			atomic_long_add(v, &zone->vm_stat[i]);
566 			atomic_long_add(v, &vm_stat[i]);
567 		}
568 }
569 #endif
570 
571 #ifdef CONFIG_NUMA
572 /*
573  * zonelist = the list of zones passed to the allocator
574  * z 	    = the zone from which the allocation occurred.
575  *
576  * Must be called with interrupts disabled.
577  *
578  * When __GFP_OTHER_NODE is set assume the node of the preferred
579  * zone is the local node. This is useful for daemons who allocate
580  * memory on behalf of other processes.
581  */
582 void zone_statistics(struct zone *preferred_zone, struct zone *z, gfp_t flags)
583 {
584 	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
585 		__inc_zone_state(z, NUMA_HIT);
586 	} else {
587 		__inc_zone_state(z, NUMA_MISS);
588 		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
589 	}
590 	if (z->node == ((flags & __GFP_OTHER_NODE) ?
591 			preferred_zone->node : numa_node_id()))
592 		__inc_zone_state(z, NUMA_LOCAL);
593 	else
594 		__inc_zone_state(z, NUMA_OTHER);
595 }
596 
597 /*
598  * Determine the per node value of a stat item.
599  */
600 unsigned long node_page_state(int node, enum zone_stat_item item)
601 {
602 	struct zone *zones = NODE_DATA(node)->node_zones;
603 
604 	return
605 #ifdef CONFIG_ZONE_DMA
606 		zone_page_state(&zones[ZONE_DMA], item) +
607 #endif
608 #ifdef CONFIG_ZONE_DMA32
609 		zone_page_state(&zones[ZONE_DMA32], item) +
610 #endif
611 #ifdef CONFIG_HIGHMEM
612 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
613 #endif
614 		zone_page_state(&zones[ZONE_NORMAL], item) +
615 		zone_page_state(&zones[ZONE_MOVABLE], item);
616 }
617 
618 #endif
619 
620 #ifdef CONFIG_COMPACTION
621 
622 struct contig_page_info {
623 	unsigned long free_pages;
624 	unsigned long free_blocks_total;
625 	unsigned long free_blocks_suitable;
626 };
627 
628 /*
629  * Calculate the number of free pages in a zone, how many contiguous
630  * pages are free and how many are large enough to satisfy an allocation of
631  * the target size. Note that this function makes no attempt to estimate
632  * how many suitable free blocks there *might* be if MOVABLE pages were
633  * migrated. Calculating that is possible, but expensive and can be
634  * figured out from userspace
635  */
636 static void fill_contig_page_info(struct zone *zone,
637 				unsigned int suitable_order,
638 				struct contig_page_info *info)
639 {
640 	unsigned int order;
641 
642 	info->free_pages = 0;
643 	info->free_blocks_total = 0;
644 	info->free_blocks_suitable = 0;
645 
646 	for (order = 0; order < MAX_ORDER; order++) {
647 		unsigned long blocks;
648 
649 		/* Count number of free blocks */
650 		blocks = zone->free_area[order].nr_free;
651 		info->free_blocks_total += blocks;
652 
653 		/* Count free base pages */
654 		info->free_pages += blocks << order;
655 
656 		/* Count the suitable free blocks */
657 		if (order >= suitable_order)
658 			info->free_blocks_suitable += blocks <<
659 						(order - suitable_order);
660 	}
661 }
662 
663 /*
664  * A fragmentation index only makes sense if an allocation of a requested
665  * size would fail. If that is true, the fragmentation index indicates
666  * whether external fragmentation or a lack of memory was the problem.
667  * The value can be used to determine if page reclaim or compaction
668  * should be used
669  */
670 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
671 {
672 	unsigned long requested = 1UL << order;
673 
674 	if (!info->free_blocks_total)
675 		return 0;
676 
677 	/* Fragmentation index only makes sense when a request would fail */
678 	if (info->free_blocks_suitable)
679 		return -1000;
680 
681 	/*
682 	 * Index is between 0 and 1 so return within 3 decimal places
683 	 *
684 	 * 0 => allocation would fail due to lack of memory
685 	 * 1 => allocation would fail due to fragmentation
686 	 */
687 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
688 }
689 
690 /* Same as __fragmentation index but allocs contig_page_info on stack */
691 int fragmentation_index(struct zone *zone, unsigned int order)
692 {
693 	struct contig_page_info info;
694 
695 	fill_contig_page_info(zone, order, &info);
696 	return __fragmentation_index(order, &info);
697 }
698 #endif
699 
700 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || defined(CONFIG_NUMA)
701 #ifdef CONFIG_ZONE_DMA
702 #define TEXT_FOR_DMA(xx) xx "_dma",
703 #else
704 #define TEXT_FOR_DMA(xx)
705 #endif
706 
707 #ifdef CONFIG_ZONE_DMA32
708 #define TEXT_FOR_DMA32(xx) xx "_dma32",
709 #else
710 #define TEXT_FOR_DMA32(xx)
711 #endif
712 
713 #ifdef CONFIG_HIGHMEM
714 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
715 #else
716 #define TEXT_FOR_HIGHMEM(xx)
717 #endif
718 
719 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
720 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
721 
722 const char * const vmstat_text[] = {
723 	/* enum zone_stat_item countes */
724 	"nr_free_pages",
725 	"nr_alloc_batch",
726 	"nr_inactive_anon",
727 	"nr_active_anon",
728 	"nr_inactive_file",
729 	"nr_active_file",
730 	"nr_unevictable",
731 	"nr_mlock",
732 	"nr_anon_pages",
733 	"nr_mapped",
734 	"nr_file_pages",
735 	"nr_dirty",
736 	"nr_writeback",
737 	"nr_slab_reclaimable",
738 	"nr_slab_unreclaimable",
739 	"nr_page_table_pages",
740 	"nr_kernel_stack",
741 	"nr_unstable",
742 	"nr_bounce",
743 	"nr_vmscan_write",
744 	"nr_vmscan_immediate_reclaim",
745 	"nr_writeback_temp",
746 	"nr_isolated_anon",
747 	"nr_isolated_file",
748 	"nr_shmem",
749 	"nr_dirtied",
750 	"nr_written",
751 	"nr_pages_scanned",
752 
753 #ifdef CONFIG_NUMA
754 	"numa_hit",
755 	"numa_miss",
756 	"numa_foreign",
757 	"numa_interleave",
758 	"numa_local",
759 	"numa_other",
760 #endif
761 	"workingset_refault",
762 	"workingset_activate",
763 	"workingset_nodereclaim",
764 	"nr_anon_transparent_hugepages",
765 	"nr_free_cma",
766 
767 	/* enum writeback_stat_item counters */
768 	"nr_dirty_threshold",
769 	"nr_dirty_background_threshold",
770 
771 #ifdef CONFIG_VM_EVENT_COUNTERS
772 	/* enum vm_event_item counters */
773 	"pgpgin",
774 	"pgpgout",
775 	"pswpin",
776 	"pswpout",
777 
778 	TEXTS_FOR_ZONES("pgalloc")
779 
780 	"pgfree",
781 	"pgactivate",
782 	"pgdeactivate",
783 
784 	"pgfault",
785 	"pgmajfault",
786 	"pglazyfreed",
787 
788 	TEXTS_FOR_ZONES("pgrefill")
789 	TEXTS_FOR_ZONES("pgsteal_kswapd")
790 	TEXTS_FOR_ZONES("pgsteal_direct")
791 	TEXTS_FOR_ZONES("pgscan_kswapd")
792 	TEXTS_FOR_ZONES("pgscan_direct")
793 	"pgscan_direct_throttle",
794 
795 #ifdef CONFIG_NUMA
796 	"zone_reclaim_failed",
797 #endif
798 	"pginodesteal",
799 	"slabs_scanned",
800 	"kswapd_inodesteal",
801 	"kswapd_low_wmark_hit_quickly",
802 	"kswapd_high_wmark_hit_quickly",
803 	"pageoutrun",
804 	"allocstall",
805 
806 	"pgrotated",
807 
808 	"drop_pagecache",
809 	"drop_slab",
810 
811 #ifdef CONFIG_NUMA_BALANCING
812 	"numa_pte_updates",
813 	"numa_huge_pte_updates",
814 	"numa_hint_faults",
815 	"numa_hint_faults_local",
816 	"numa_pages_migrated",
817 #endif
818 #ifdef CONFIG_MIGRATION
819 	"pgmigrate_success",
820 	"pgmigrate_fail",
821 #endif
822 #ifdef CONFIG_COMPACTION
823 	"compact_migrate_scanned",
824 	"compact_free_scanned",
825 	"compact_isolated",
826 	"compact_stall",
827 	"compact_fail",
828 	"compact_success",
829 	"compact_daemon_wake",
830 #endif
831 
832 #ifdef CONFIG_HUGETLB_PAGE
833 	"htlb_buddy_alloc_success",
834 	"htlb_buddy_alloc_fail",
835 #endif
836 	"unevictable_pgs_culled",
837 	"unevictable_pgs_scanned",
838 	"unevictable_pgs_rescued",
839 	"unevictable_pgs_mlocked",
840 	"unevictable_pgs_munlocked",
841 	"unevictable_pgs_cleared",
842 	"unevictable_pgs_stranded",
843 
844 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
845 	"thp_fault_alloc",
846 	"thp_fault_fallback",
847 	"thp_collapse_alloc",
848 	"thp_collapse_alloc_failed",
849 	"thp_split_page",
850 	"thp_split_page_failed",
851 	"thp_deferred_split_page",
852 	"thp_split_pmd",
853 	"thp_zero_page_alloc",
854 	"thp_zero_page_alloc_failed",
855 #endif
856 #ifdef CONFIG_MEMORY_BALLOON
857 	"balloon_inflate",
858 	"balloon_deflate",
859 #ifdef CONFIG_BALLOON_COMPACTION
860 	"balloon_migrate",
861 #endif
862 #endif /* CONFIG_MEMORY_BALLOON */
863 #ifdef CONFIG_DEBUG_TLBFLUSH
864 #ifdef CONFIG_SMP
865 	"nr_tlb_remote_flush",
866 	"nr_tlb_remote_flush_received",
867 #endif /* CONFIG_SMP */
868 	"nr_tlb_local_flush_all",
869 	"nr_tlb_local_flush_one",
870 #endif /* CONFIG_DEBUG_TLBFLUSH */
871 
872 #ifdef CONFIG_DEBUG_VM_VMACACHE
873 	"vmacache_find_calls",
874 	"vmacache_find_hits",
875 	"vmacache_full_flushes",
876 #endif
877 #endif /* CONFIG_VM_EVENTS_COUNTERS */
878 };
879 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA */
880 
881 
882 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
883      defined(CONFIG_PROC_FS)
884 static void *frag_start(struct seq_file *m, loff_t *pos)
885 {
886 	pg_data_t *pgdat;
887 	loff_t node = *pos;
888 
889 	for (pgdat = first_online_pgdat();
890 	     pgdat && node;
891 	     pgdat = next_online_pgdat(pgdat))
892 		--node;
893 
894 	return pgdat;
895 }
896 
897 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
898 {
899 	pg_data_t *pgdat = (pg_data_t *)arg;
900 
901 	(*pos)++;
902 	return next_online_pgdat(pgdat);
903 }
904 
905 static void frag_stop(struct seq_file *m, void *arg)
906 {
907 }
908 
909 /* Walk all the zones in a node and print using a callback */
910 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
911 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
912 {
913 	struct zone *zone;
914 	struct zone *node_zones = pgdat->node_zones;
915 	unsigned long flags;
916 
917 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
918 		if (!populated_zone(zone))
919 			continue;
920 
921 		spin_lock_irqsave(&zone->lock, flags);
922 		print(m, pgdat, zone);
923 		spin_unlock_irqrestore(&zone->lock, flags);
924 	}
925 }
926 #endif
927 
928 #ifdef CONFIG_PROC_FS
929 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
930 						struct zone *zone)
931 {
932 	int order;
933 
934 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
935 	for (order = 0; order < MAX_ORDER; ++order)
936 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
937 	seq_putc(m, '\n');
938 }
939 
940 /*
941  * This walks the free areas for each zone.
942  */
943 static int frag_show(struct seq_file *m, void *arg)
944 {
945 	pg_data_t *pgdat = (pg_data_t *)arg;
946 	walk_zones_in_node(m, pgdat, frag_show_print);
947 	return 0;
948 }
949 
950 static void pagetypeinfo_showfree_print(struct seq_file *m,
951 					pg_data_t *pgdat, struct zone *zone)
952 {
953 	int order, mtype;
954 
955 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
956 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
957 					pgdat->node_id,
958 					zone->name,
959 					migratetype_names[mtype]);
960 		for (order = 0; order < MAX_ORDER; ++order) {
961 			unsigned long freecount = 0;
962 			struct free_area *area;
963 			struct list_head *curr;
964 
965 			area = &(zone->free_area[order]);
966 
967 			list_for_each(curr, &area->free_list[mtype])
968 				freecount++;
969 			seq_printf(m, "%6lu ", freecount);
970 		}
971 		seq_putc(m, '\n');
972 	}
973 }
974 
975 /* Print out the free pages at each order for each migatetype */
976 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
977 {
978 	int order;
979 	pg_data_t *pgdat = (pg_data_t *)arg;
980 
981 	/* Print header */
982 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
983 	for (order = 0; order < MAX_ORDER; ++order)
984 		seq_printf(m, "%6d ", order);
985 	seq_putc(m, '\n');
986 
987 	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
988 
989 	return 0;
990 }
991 
992 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
993 					pg_data_t *pgdat, struct zone *zone)
994 {
995 	int mtype;
996 	unsigned long pfn;
997 	unsigned long start_pfn = zone->zone_start_pfn;
998 	unsigned long end_pfn = zone_end_pfn(zone);
999 	unsigned long count[MIGRATE_TYPES] = { 0, };
1000 
1001 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1002 		struct page *page;
1003 
1004 		if (!pfn_valid(pfn))
1005 			continue;
1006 
1007 		page = pfn_to_page(pfn);
1008 
1009 		/* Watch for unexpected holes punched in the memmap */
1010 		if (!memmap_valid_within(pfn, page, zone))
1011 			continue;
1012 
1013 		mtype = get_pageblock_migratetype(page);
1014 
1015 		if (mtype < MIGRATE_TYPES)
1016 			count[mtype]++;
1017 	}
1018 
1019 	/* Print counts */
1020 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1021 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1022 		seq_printf(m, "%12lu ", count[mtype]);
1023 	seq_putc(m, '\n');
1024 }
1025 
1026 /* Print out the free pages at each order for each migratetype */
1027 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1028 {
1029 	int mtype;
1030 	pg_data_t *pgdat = (pg_data_t *)arg;
1031 
1032 	seq_printf(m, "\n%-23s", "Number of blocks type ");
1033 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1034 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1035 	seq_putc(m, '\n');
1036 	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
1037 
1038 	return 0;
1039 }
1040 
1041 #ifdef CONFIG_PAGE_OWNER
1042 static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
1043 							pg_data_t *pgdat,
1044 							struct zone *zone)
1045 {
1046 	struct page *page;
1047 	struct page_ext *page_ext;
1048 	unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
1049 	unsigned long end_pfn = pfn + zone->spanned_pages;
1050 	unsigned long count[MIGRATE_TYPES] = { 0, };
1051 	int pageblock_mt, page_mt;
1052 	int i;
1053 
1054 	/* Scan block by block. First and last block may be incomplete */
1055 	pfn = zone->zone_start_pfn;
1056 
1057 	/*
1058 	 * Walk the zone in pageblock_nr_pages steps. If a page block spans
1059 	 * a zone boundary, it will be double counted between zones. This does
1060 	 * not matter as the mixed block count will still be correct
1061 	 */
1062 	for (; pfn < end_pfn; ) {
1063 		if (!pfn_valid(pfn)) {
1064 			pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
1065 			continue;
1066 		}
1067 
1068 		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
1069 		block_end_pfn = min(block_end_pfn, end_pfn);
1070 
1071 		page = pfn_to_page(pfn);
1072 		pageblock_mt = get_pfnblock_migratetype(page, pfn);
1073 
1074 		for (; pfn < block_end_pfn; pfn++) {
1075 			if (!pfn_valid_within(pfn))
1076 				continue;
1077 
1078 			page = pfn_to_page(pfn);
1079 			if (PageBuddy(page)) {
1080 				pfn += (1UL << page_order(page)) - 1;
1081 				continue;
1082 			}
1083 
1084 			if (PageReserved(page))
1085 				continue;
1086 
1087 			page_ext = lookup_page_ext(page);
1088 
1089 			if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
1090 				continue;
1091 
1092 			page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
1093 			if (pageblock_mt != page_mt) {
1094 				if (is_migrate_cma(pageblock_mt))
1095 					count[MIGRATE_MOVABLE]++;
1096 				else
1097 					count[pageblock_mt]++;
1098 
1099 				pfn = block_end_pfn;
1100 				break;
1101 			}
1102 			pfn += (1UL << page_ext->order) - 1;
1103 		}
1104 	}
1105 
1106 	/* Print counts */
1107 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1108 	for (i = 0; i < MIGRATE_TYPES; i++)
1109 		seq_printf(m, "%12lu ", count[i]);
1110 	seq_putc(m, '\n');
1111 }
1112 #endif /* CONFIG_PAGE_OWNER */
1113 
1114 /*
1115  * Print out the number of pageblocks for each migratetype that contain pages
1116  * of other types. This gives an indication of how well fallbacks are being
1117  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1118  * to determine what is going on
1119  */
1120 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1121 {
1122 #ifdef CONFIG_PAGE_OWNER
1123 	int mtype;
1124 
1125 	if (!static_branch_unlikely(&page_owner_inited))
1126 		return;
1127 
1128 	drain_all_pages(NULL);
1129 
1130 	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1131 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1132 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1133 	seq_putc(m, '\n');
1134 
1135 	walk_zones_in_node(m, pgdat, pagetypeinfo_showmixedcount_print);
1136 #endif /* CONFIG_PAGE_OWNER */
1137 }
1138 
1139 /*
1140  * This prints out statistics in relation to grouping pages by mobility.
1141  * It is expensive to collect so do not constantly read the file.
1142  */
1143 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1144 {
1145 	pg_data_t *pgdat = (pg_data_t *)arg;
1146 
1147 	/* check memoryless node */
1148 	if (!node_state(pgdat->node_id, N_MEMORY))
1149 		return 0;
1150 
1151 	seq_printf(m, "Page block order: %d\n", pageblock_order);
1152 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1153 	seq_putc(m, '\n');
1154 	pagetypeinfo_showfree(m, pgdat);
1155 	pagetypeinfo_showblockcount(m, pgdat);
1156 	pagetypeinfo_showmixedcount(m, pgdat);
1157 
1158 	return 0;
1159 }
1160 
1161 static const struct seq_operations fragmentation_op = {
1162 	.start	= frag_start,
1163 	.next	= frag_next,
1164 	.stop	= frag_stop,
1165 	.show	= frag_show,
1166 };
1167 
1168 static int fragmentation_open(struct inode *inode, struct file *file)
1169 {
1170 	return seq_open(file, &fragmentation_op);
1171 }
1172 
1173 static const struct file_operations fragmentation_file_operations = {
1174 	.open		= fragmentation_open,
1175 	.read		= seq_read,
1176 	.llseek		= seq_lseek,
1177 	.release	= seq_release,
1178 };
1179 
1180 static const struct seq_operations pagetypeinfo_op = {
1181 	.start	= frag_start,
1182 	.next	= frag_next,
1183 	.stop	= frag_stop,
1184 	.show	= pagetypeinfo_show,
1185 };
1186 
1187 static int pagetypeinfo_open(struct inode *inode, struct file *file)
1188 {
1189 	return seq_open(file, &pagetypeinfo_op);
1190 }
1191 
1192 static const struct file_operations pagetypeinfo_file_ops = {
1193 	.open		= pagetypeinfo_open,
1194 	.read		= seq_read,
1195 	.llseek		= seq_lseek,
1196 	.release	= seq_release,
1197 };
1198 
1199 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1200 							struct zone *zone)
1201 {
1202 	int i;
1203 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1204 	seq_printf(m,
1205 		   "\n  pages free     %lu"
1206 		   "\n        min      %lu"
1207 		   "\n        low      %lu"
1208 		   "\n        high     %lu"
1209 		   "\n        scanned  %lu"
1210 		   "\n        spanned  %lu"
1211 		   "\n        present  %lu"
1212 		   "\n        managed  %lu",
1213 		   zone_page_state(zone, NR_FREE_PAGES),
1214 		   min_wmark_pages(zone),
1215 		   low_wmark_pages(zone),
1216 		   high_wmark_pages(zone),
1217 		   zone_page_state(zone, NR_PAGES_SCANNED),
1218 		   zone->spanned_pages,
1219 		   zone->present_pages,
1220 		   zone->managed_pages);
1221 
1222 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1223 		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
1224 				zone_page_state(zone, i));
1225 
1226 	seq_printf(m,
1227 		   "\n        protection: (%ld",
1228 		   zone->lowmem_reserve[0]);
1229 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1230 		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1231 	seq_printf(m,
1232 		   ")"
1233 		   "\n  pagesets");
1234 	for_each_online_cpu(i) {
1235 		struct per_cpu_pageset *pageset;
1236 
1237 		pageset = per_cpu_ptr(zone->pageset, i);
1238 		seq_printf(m,
1239 			   "\n    cpu: %i"
1240 			   "\n              count: %i"
1241 			   "\n              high:  %i"
1242 			   "\n              batch: %i",
1243 			   i,
1244 			   pageset->pcp.count,
1245 			   pageset->pcp.high,
1246 			   pageset->pcp.batch);
1247 #ifdef CONFIG_SMP
1248 		seq_printf(m, "\n  vm stats threshold: %d",
1249 				pageset->stat_threshold);
1250 #endif
1251 	}
1252 	seq_printf(m,
1253 		   "\n  all_unreclaimable: %u"
1254 		   "\n  start_pfn:         %lu"
1255 		   "\n  inactive_ratio:    %u",
1256 		   !zone_reclaimable(zone),
1257 		   zone->zone_start_pfn,
1258 		   zone->inactive_ratio);
1259 	seq_putc(m, '\n');
1260 }
1261 
1262 /*
1263  * Output information about zones in @pgdat.
1264  */
1265 static int zoneinfo_show(struct seq_file *m, void *arg)
1266 {
1267 	pg_data_t *pgdat = (pg_data_t *)arg;
1268 	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
1269 	return 0;
1270 }
1271 
1272 static const struct seq_operations zoneinfo_op = {
1273 	.start	= frag_start, /* iterate over all zones. The same as in
1274 			       * fragmentation. */
1275 	.next	= frag_next,
1276 	.stop	= frag_stop,
1277 	.show	= zoneinfo_show,
1278 };
1279 
1280 static int zoneinfo_open(struct inode *inode, struct file *file)
1281 {
1282 	return seq_open(file, &zoneinfo_op);
1283 }
1284 
1285 static const struct file_operations proc_zoneinfo_file_operations = {
1286 	.open		= zoneinfo_open,
1287 	.read		= seq_read,
1288 	.llseek		= seq_lseek,
1289 	.release	= seq_release,
1290 };
1291 
1292 enum writeback_stat_item {
1293 	NR_DIRTY_THRESHOLD,
1294 	NR_DIRTY_BG_THRESHOLD,
1295 	NR_VM_WRITEBACK_STAT_ITEMS,
1296 };
1297 
1298 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1299 {
1300 	unsigned long *v;
1301 	int i, stat_items_size;
1302 
1303 	if (*pos >= ARRAY_SIZE(vmstat_text))
1304 		return NULL;
1305 	stat_items_size = NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long) +
1306 			  NR_VM_WRITEBACK_STAT_ITEMS * sizeof(unsigned long);
1307 
1308 #ifdef CONFIG_VM_EVENT_COUNTERS
1309 	stat_items_size += sizeof(struct vm_event_state);
1310 #endif
1311 
1312 	v = kmalloc(stat_items_size, GFP_KERNEL);
1313 	m->private = v;
1314 	if (!v)
1315 		return ERR_PTR(-ENOMEM);
1316 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1317 		v[i] = global_page_state(i);
1318 	v += NR_VM_ZONE_STAT_ITEMS;
1319 
1320 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1321 			    v + NR_DIRTY_THRESHOLD);
1322 	v += NR_VM_WRITEBACK_STAT_ITEMS;
1323 
1324 #ifdef CONFIG_VM_EVENT_COUNTERS
1325 	all_vm_events(v);
1326 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1327 	v[PGPGOUT] /= 2;
1328 #endif
1329 	return (unsigned long *)m->private + *pos;
1330 }
1331 
1332 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1333 {
1334 	(*pos)++;
1335 	if (*pos >= ARRAY_SIZE(vmstat_text))
1336 		return NULL;
1337 	return (unsigned long *)m->private + *pos;
1338 }
1339 
1340 static int vmstat_show(struct seq_file *m, void *arg)
1341 {
1342 	unsigned long *l = arg;
1343 	unsigned long off = l - (unsigned long *)m->private;
1344 
1345 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
1346 	return 0;
1347 }
1348 
1349 static void vmstat_stop(struct seq_file *m, void *arg)
1350 {
1351 	kfree(m->private);
1352 	m->private = NULL;
1353 }
1354 
1355 static const struct seq_operations vmstat_op = {
1356 	.start	= vmstat_start,
1357 	.next	= vmstat_next,
1358 	.stop	= vmstat_stop,
1359 	.show	= vmstat_show,
1360 };
1361 
1362 static int vmstat_open(struct inode *inode, struct file *file)
1363 {
1364 	return seq_open(file, &vmstat_op);
1365 }
1366 
1367 static const struct file_operations proc_vmstat_file_operations = {
1368 	.open		= vmstat_open,
1369 	.read		= seq_read,
1370 	.llseek		= seq_lseek,
1371 	.release	= seq_release,
1372 };
1373 #endif /* CONFIG_PROC_FS */
1374 
1375 #ifdef CONFIG_SMP
1376 static struct workqueue_struct *vmstat_wq;
1377 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1378 int sysctl_stat_interval __read_mostly = HZ;
1379 static cpumask_var_t cpu_stat_off;
1380 
1381 static void vmstat_update(struct work_struct *w)
1382 {
1383 	if (refresh_cpu_vm_stats(true)) {
1384 		/*
1385 		 * Counters were updated so we expect more updates
1386 		 * to occur in the future. Keep on running the
1387 		 * update worker thread.
1388 		 * If we were marked on cpu_stat_off clear the flag
1389 		 * so that vmstat_shepherd doesn't schedule us again.
1390 		 */
1391 		if (!cpumask_test_and_clear_cpu(smp_processor_id(),
1392 						cpu_stat_off)) {
1393 			queue_delayed_work_on(smp_processor_id(), vmstat_wq,
1394 				this_cpu_ptr(&vmstat_work),
1395 				round_jiffies_relative(sysctl_stat_interval));
1396 		}
1397 	} else {
1398 		/*
1399 		 * We did not update any counters so the app may be in
1400 		 * a mode where it does not cause counter updates.
1401 		 * We may be uselessly running vmstat_update.
1402 		 * Defer the checking for differentials to the
1403 		 * shepherd thread on a different processor.
1404 		 */
1405 		cpumask_set_cpu(smp_processor_id(), cpu_stat_off);
1406 	}
1407 }
1408 
1409 /*
1410  * Switch off vmstat processing and then fold all the remaining differentials
1411  * until the diffs stay at zero. The function is used by NOHZ and can only be
1412  * invoked when tick processing is not active.
1413  */
1414 /*
1415  * Check if the diffs for a certain cpu indicate that
1416  * an update is needed.
1417  */
1418 static bool need_update(int cpu)
1419 {
1420 	struct zone *zone;
1421 
1422 	for_each_populated_zone(zone) {
1423 		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1424 
1425 		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1426 		/*
1427 		 * The fast way of checking if there are any vmstat diffs.
1428 		 * This works because the diffs are byte sized items.
1429 		 */
1430 		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
1431 			return true;
1432 
1433 	}
1434 	return false;
1435 }
1436 
1437 void quiet_vmstat(void)
1438 {
1439 	if (system_state != SYSTEM_RUNNING)
1440 		return;
1441 
1442 	/*
1443 	 * If we are already in hands of the shepherd then there
1444 	 * is nothing for us to do here.
1445 	 */
1446 	if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
1447 		return;
1448 
1449 	if (!need_update(smp_processor_id()))
1450 		return;
1451 
1452 	/*
1453 	 * Just refresh counters and do not care about the pending delayed
1454 	 * vmstat_update. It doesn't fire that often to matter and canceling
1455 	 * it would be too expensive from this path.
1456 	 * vmstat_shepherd will take care about that for us.
1457 	 */
1458 	refresh_cpu_vm_stats(false);
1459 }
1460 
1461 
1462 /*
1463  * Shepherd worker thread that checks the
1464  * differentials of processors that have their worker
1465  * threads for vm statistics updates disabled because of
1466  * inactivity.
1467  */
1468 static void vmstat_shepherd(struct work_struct *w);
1469 
1470 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1471 
1472 static void vmstat_shepherd(struct work_struct *w)
1473 {
1474 	int cpu;
1475 
1476 	get_online_cpus();
1477 	/* Check processors whose vmstat worker threads have been disabled */
1478 	for_each_cpu(cpu, cpu_stat_off) {
1479 		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1480 
1481 		if (need_update(cpu)) {
1482 			if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
1483 				queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
1484 		} else {
1485 			/*
1486 			 * Cancel the work if quiet_vmstat has put this
1487 			 * cpu on cpu_stat_off because the work item might
1488 			 * be still scheduled
1489 			 */
1490 			cancel_delayed_work(dw);
1491 		}
1492 	}
1493 	put_online_cpus();
1494 
1495 	schedule_delayed_work(&shepherd,
1496 		round_jiffies_relative(sysctl_stat_interval));
1497 }
1498 
1499 static void __init start_shepherd_timer(void)
1500 {
1501 	int cpu;
1502 
1503 	for_each_possible_cpu(cpu)
1504 		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1505 			vmstat_update);
1506 
1507 	if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
1508 		BUG();
1509 	cpumask_copy(cpu_stat_off, cpu_online_mask);
1510 
1511 	vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1512 	schedule_delayed_work(&shepherd,
1513 		round_jiffies_relative(sysctl_stat_interval));
1514 }
1515 
1516 static void vmstat_cpu_dead(int node)
1517 {
1518 	int cpu;
1519 
1520 	get_online_cpus();
1521 	for_each_online_cpu(cpu)
1522 		if (cpu_to_node(cpu) == node)
1523 			goto end;
1524 
1525 	node_clear_state(node, N_CPU);
1526 end:
1527 	put_online_cpus();
1528 }
1529 
1530 /*
1531  * Use the cpu notifier to insure that the thresholds are recalculated
1532  * when necessary.
1533  */
1534 static int vmstat_cpuup_callback(struct notifier_block *nfb,
1535 		unsigned long action,
1536 		void *hcpu)
1537 {
1538 	long cpu = (long)hcpu;
1539 
1540 	switch (action) {
1541 	case CPU_ONLINE:
1542 	case CPU_ONLINE_FROZEN:
1543 		refresh_zone_stat_thresholds();
1544 		node_set_state(cpu_to_node(cpu), N_CPU);
1545 		cpumask_set_cpu(cpu, cpu_stat_off);
1546 		break;
1547 	case CPU_DOWN_PREPARE:
1548 	case CPU_DOWN_PREPARE_FROZEN:
1549 		cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
1550 		cpumask_clear_cpu(cpu, cpu_stat_off);
1551 		break;
1552 	case CPU_DOWN_FAILED:
1553 	case CPU_DOWN_FAILED_FROZEN:
1554 		cpumask_set_cpu(cpu, cpu_stat_off);
1555 		break;
1556 	case CPU_DEAD:
1557 	case CPU_DEAD_FROZEN:
1558 		refresh_zone_stat_thresholds();
1559 		vmstat_cpu_dead(cpu_to_node(cpu));
1560 		break;
1561 	default:
1562 		break;
1563 	}
1564 	return NOTIFY_OK;
1565 }
1566 
1567 static struct notifier_block vmstat_notifier =
1568 	{ &vmstat_cpuup_callback, NULL, 0 };
1569 #endif
1570 
1571 static int __init setup_vmstat(void)
1572 {
1573 #ifdef CONFIG_SMP
1574 	cpu_notifier_register_begin();
1575 	__register_cpu_notifier(&vmstat_notifier);
1576 
1577 	start_shepherd_timer();
1578 	cpu_notifier_register_done();
1579 #endif
1580 #ifdef CONFIG_PROC_FS
1581 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
1582 	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
1583 	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
1584 	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
1585 #endif
1586 	return 0;
1587 }
1588 module_init(setup_vmstat)
1589 
1590 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1591 
1592 /*
1593  * Return an index indicating how much of the available free memory is
1594  * unusable for an allocation of the requested size.
1595  */
1596 static int unusable_free_index(unsigned int order,
1597 				struct contig_page_info *info)
1598 {
1599 	/* No free memory is interpreted as all free memory is unusable */
1600 	if (info->free_pages == 0)
1601 		return 1000;
1602 
1603 	/*
1604 	 * Index should be a value between 0 and 1. Return a value to 3
1605 	 * decimal places.
1606 	 *
1607 	 * 0 => no fragmentation
1608 	 * 1 => high fragmentation
1609 	 */
1610 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
1611 
1612 }
1613 
1614 static void unusable_show_print(struct seq_file *m,
1615 					pg_data_t *pgdat, struct zone *zone)
1616 {
1617 	unsigned int order;
1618 	int index;
1619 	struct contig_page_info info;
1620 
1621 	seq_printf(m, "Node %d, zone %8s ",
1622 				pgdat->node_id,
1623 				zone->name);
1624 	for (order = 0; order < MAX_ORDER; ++order) {
1625 		fill_contig_page_info(zone, order, &info);
1626 		index = unusable_free_index(order, &info);
1627 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1628 	}
1629 
1630 	seq_putc(m, '\n');
1631 }
1632 
1633 /*
1634  * Display unusable free space index
1635  *
1636  * The unusable free space index measures how much of the available free
1637  * memory cannot be used to satisfy an allocation of a given size and is a
1638  * value between 0 and 1. The higher the value, the more of free memory is
1639  * unusable and by implication, the worse the external fragmentation is. This
1640  * can be expressed as a percentage by multiplying by 100.
1641  */
1642 static int unusable_show(struct seq_file *m, void *arg)
1643 {
1644 	pg_data_t *pgdat = (pg_data_t *)arg;
1645 
1646 	/* check memoryless node */
1647 	if (!node_state(pgdat->node_id, N_MEMORY))
1648 		return 0;
1649 
1650 	walk_zones_in_node(m, pgdat, unusable_show_print);
1651 
1652 	return 0;
1653 }
1654 
1655 static const struct seq_operations unusable_op = {
1656 	.start	= frag_start,
1657 	.next	= frag_next,
1658 	.stop	= frag_stop,
1659 	.show	= unusable_show,
1660 };
1661 
1662 static int unusable_open(struct inode *inode, struct file *file)
1663 {
1664 	return seq_open(file, &unusable_op);
1665 }
1666 
1667 static const struct file_operations unusable_file_ops = {
1668 	.open		= unusable_open,
1669 	.read		= seq_read,
1670 	.llseek		= seq_lseek,
1671 	.release	= seq_release,
1672 };
1673 
1674 static void extfrag_show_print(struct seq_file *m,
1675 					pg_data_t *pgdat, struct zone *zone)
1676 {
1677 	unsigned int order;
1678 	int index;
1679 
1680 	/* Alloc on stack as interrupts are disabled for zone walk */
1681 	struct contig_page_info info;
1682 
1683 	seq_printf(m, "Node %d, zone %8s ",
1684 				pgdat->node_id,
1685 				zone->name);
1686 	for (order = 0; order < MAX_ORDER; ++order) {
1687 		fill_contig_page_info(zone, order, &info);
1688 		index = __fragmentation_index(order, &info);
1689 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
1690 	}
1691 
1692 	seq_putc(m, '\n');
1693 }
1694 
1695 /*
1696  * Display fragmentation index for orders that allocations would fail for
1697  */
1698 static int extfrag_show(struct seq_file *m, void *arg)
1699 {
1700 	pg_data_t *pgdat = (pg_data_t *)arg;
1701 
1702 	walk_zones_in_node(m, pgdat, extfrag_show_print);
1703 
1704 	return 0;
1705 }
1706 
1707 static const struct seq_operations extfrag_op = {
1708 	.start	= frag_start,
1709 	.next	= frag_next,
1710 	.stop	= frag_stop,
1711 	.show	= extfrag_show,
1712 };
1713 
1714 static int extfrag_open(struct inode *inode, struct file *file)
1715 {
1716 	return seq_open(file, &extfrag_op);
1717 }
1718 
1719 static const struct file_operations extfrag_file_ops = {
1720 	.open		= extfrag_open,
1721 	.read		= seq_read,
1722 	.llseek		= seq_lseek,
1723 	.release	= seq_release,
1724 };
1725 
1726 static int __init extfrag_debug_init(void)
1727 {
1728 	struct dentry *extfrag_debug_root;
1729 
1730 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1731 	if (!extfrag_debug_root)
1732 		return -ENOMEM;
1733 
1734 	if (!debugfs_create_file("unusable_index", 0444,
1735 			extfrag_debug_root, NULL, &unusable_file_ops))
1736 		goto fail;
1737 
1738 	if (!debugfs_create_file("extfrag_index", 0444,
1739 			extfrag_debug_root, NULL, &extfrag_file_ops))
1740 		goto fail;
1741 
1742 	return 0;
1743 fail:
1744 	debugfs_remove_recursive(extfrag_debug_root);
1745 	return -ENOMEM;
1746 }
1747 
1748 module_init(extfrag_debug_init);
1749 #endif
1750