xref: /openbmc/linux/mm/vmstat.c (revision 54a611b6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/vmstat.c
4  *
5  *  Manages VM statistics
6  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
7  *
8  *  zoned VM statistics
9  *  Copyright (C) 2006 Silicon Graphics, Inc.,
10  *		Christoph Lameter <christoph@lameter.com>
11  *  Copyright (C) 2008-2014 Christoph Lameter
12  */
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/err.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18 #include <linux/cpu.h>
19 #include <linux/cpumask.h>
20 #include <linux/vmstat.h>
21 #include <linux/proc_fs.h>
22 #include <linux/seq_file.h>
23 #include <linux/debugfs.h>
24 #include <linux/sched.h>
25 #include <linux/math64.h>
26 #include <linux/writeback.h>
27 #include <linux/compaction.h>
28 #include <linux/mm_inline.h>
29 #include <linux/page_ext.h>
30 #include <linux/page_owner.h>
31 
32 #include "internal.h"
33 
34 #ifdef CONFIG_NUMA
35 int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
36 
37 /* zero numa counters within a zone */
38 static void zero_zone_numa_counters(struct zone *zone)
39 {
40 	int item, cpu;
41 
42 	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++) {
43 		atomic_long_set(&zone->vm_numa_event[item], 0);
44 		for_each_online_cpu(cpu) {
45 			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
46 						= 0;
47 		}
48 	}
49 }
50 
51 /* zero numa counters of all the populated zones */
52 static void zero_zones_numa_counters(void)
53 {
54 	struct zone *zone;
55 
56 	for_each_populated_zone(zone)
57 		zero_zone_numa_counters(zone);
58 }
59 
60 /* zero global numa counters */
61 static void zero_global_numa_counters(void)
62 {
63 	int item;
64 
65 	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
66 		atomic_long_set(&vm_numa_event[item], 0);
67 }
68 
69 static void invalid_numa_statistics(void)
70 {
71 	zero_zones_numa_counters();
72 	zero_global_numa_counters();
73 }
74 
75 static DEFINE_MUTEX(vm_numa_stat_lock);
76 
77 int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
78 		void *buffer, size_t *length, loff_t *ppos)
79 {
80 	int ret, oldval;
81 
82 	mutex_lock(&vm_numa_stat_lock);
83 	if (write)
84 		oldval = sysctl_vm_numa_stat;
85 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
86 	if (ret || !write)
87 		goto out;
88 
89 	if (oldval == sysctl_vm_numa_stat)
90 		goto out;
91 	else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
92 		static_branch_enable(&vm_numa_stat_key);
93 		pr_info("enable numa statistics\n");
94 	} else {
95 		static_branch_disable(&vm_numa_stat_key);
96 		invalid_numa_statistics();
97 		pr_info("disable numa statistics, and clear numa counters\n");
98 	}
99 
100 out:
101 	mutex_unlock(&vm_numa_stat_lock);
102 	return ret;
103 }
104 #endif
105 
106 #ifdef CONFIG_VM_EVENT_COUNTERS
107 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
108 EXPORT_PER_CPU_SYMBOL(vm_event_states);
109 
110 static void sum_vm_events(unsigned long *ret)
111 {
112 	int cpu;
113 	int i;
114 
115 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
116 
117 	for_each_online_cpu(cpu) {
118 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
119 
120 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
121 			ret[i] += this->event[i];
122 	}
123 }
124 
125 /*
126  * Accumulate the vm event counters across all CPUs.
127  * The result is unavoidably approximate - it can change
128  * during and after execution of this function.
129 */
130 void all_vm_events(unsigned long *ret)
131 {
132 	cpus_read_lock();
133 	sum_vm_events(ret);
134 	cpus_read_unlock();
135 }
136 EXPORT_SYMBOL_GPL(all_vm_events);
137 
138 /*
139  * Fold the foreign cpu events into our own.
140  *
141  * This is adding to the events on one processor
142  * but keeps the global counts constant.
143  */
144 void vm_events_fold_cpu(int cpu)
145 {
146 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
147 	int i;
148 
149 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
150 		count_vm_events(i, fold_state->event[i]);
151 		fold_state->event[i] = 0;
152 	}
153 }
154 
155 #endif /* CONFIG_VM_EVENT_COUNTERS */
156 
157 /*
158  * Manage combined zone based / global counters
159  *
160  * vm_stat contains the global counters
161  */
162 atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
163 atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
164 atomic_long_t vm_numa_event[NR_VM_NUMA_EVENT_ITEMS] __cacheline_aligned_in_smp;
165 EXPORT_SYMBOL(vm_zone_stat);
166 EXPORT_SYMBOL(vm_node_stat);
167 
168 #ifdef CONFIG_NUMA
169 static void fold_vm_zone_numa_events(struct zone *zone)
170 {
171 	unsigned long zone_numa_events[NR_VM_NUMA_EVENT_ITEMS] = { 0, };
172 	int cpu;
173 	enum numa_stat_item item;
174 
175 	for_each_online_cpu(cpu) {
176 		struct per_cpu_zonestat *pzstats;
177 
178 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
179 		for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
180 			zone_numa_events[item] += xchg(&pzstats->vm_numa_event[item], 0);
181 	}
182 
183 	for (item = 0; item < NR_VM_NUMA_EVENT_ITEMS; item++)
184 		zone_numa_event_add(zone_numa_events[item], zone, item);
185 }
186 
187 void fold_vm_numa_events(void)
188 {
189 	struct zone *zone;
190 
191 	for_each_populated_zone(zone)
192 		fold_vm_zone_numa_events(zone);
193 }
194 #endif
195 
196 #ifdef CONFIG_SMP
197 
198 int calculate_pressure_threshold(struct zone *zone)
199 {
200 	int threshold;
201 	int watermark_distance;
202 
203 	/*
204 	 * As vmstats are not up to date, there is drift between the estimated
205 	 * and real values. For high thresholds and a high number of CPUs, it
206 	 * is possible for the min watermark to be breached while the estimated
207 	 * value looks fine. The pressure threshold is a reduced value such
208 	 * that even the maximum amount of drift will not accidentally breach
209 	 * the min watermark
210 	 */
211 	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
212 	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
213 
214 	/*
215 	 * Maximum threshold is 125
216 	 */
217 	threshold = min(125, threshold);
218 
219 	return threshold;
220 }
221 
222 int calculate_normal_threshold(struct zone *zone)
223 {
224 	int threshold;
225 	int mem;	/* memory in 128 MB units */
226 
227 	/*
228 	 * The threshold scales with the number of processors and the amount
229 	 * of memory per zone. More memory means that we can defer updates for
230 	 * longer, more processors could lead to more contention.
231  	 * fls() is used to have a cheap way of logarithmic scaling.
232 	 *
233 	 * Some sample thresholds:
234 	 *
235 	 * Threshold	Processors	(fls)	Zonesize	fls(mem)+1
236 	 * ------------------------------------------------------------------
237 	 * 8		1		1	0.9-1 GB	4
238 	 * 16		2		2	0.9-1 GB	4
239 	 * 20 		2		2	1-2 GB		5
240 	 * 24		2		2	2-4 GB		6
241 	 * 28		2		2	4-8 GB		7
242 	 * 32		2		2	8-16 GB		8
243 	 * 4		2		2	<128M		1
244 	 * 30		4		3	2-4 GB		5
245 	 * 48		4		3	8-16 GB		8
246 	 * 32		8		4	1-2 GB		4
247 	 * 32		8		4	0.9-1GB		4
248 	 * 10		16		5	<128M		1
249 	 * 40		16		5	900M		4
250 	 * 70		64		7	2-4 GB		5
251 	 * 84		64		7	4-8 GB		6
252 	 * 108		512		9	4-8 GB		6
253 	 * 125		1024		10	8-16 GB		8
254 	 * 125		1024		10	16-32 GB	9
255 	 */
256 
257 	mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
258 
259 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
260 
261 	/*
262 	 * Maximum threshold is 125
263 	 */
264 	threshold = min(125, threshold);
265 
266 	return threshold;
267 }
268 
269 /*
270  * Refresh the thresholds for each zone.
271  */
272 void refresh_zone_stat_thresholds(void)
273 {
274 	struct pglist_data *pgdat;
275 	struct zone *zone;
276 	int cpu;
277 	int threshold;
278 
279 	/* Zero current pgdat thresholds */
280 	for_each_online_pgdat(pgdat) {
281 		for_each_online_cpu(cpu) {
282 			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
283 		}
284 	}
285 
286 	for_each_populated_zone(zone) {
287 		struct pglist_data *pgdat = zone->zone_pgdat;
288 		unsigned long max_drift, tolerate_drift;
289 
290 		threshold = calculate_normal_threshold(zone);
291 
292 		for_each_online_cpu(cpu) {
293 			int pgdat_threshold;
294 
295 			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
296 							= threshold;
297 
298 			/* Base nodestat threshold on the largest populated zone. */
299 			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
300 			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
301 				= max(threshold, pgdat_threshold);
302 		}
303 
304 		/*
305 		 * Only set percpu_drift_mark if there is a danger that
306 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
307 		 * the min watermark could be breached by an allocation
308 		 */
309 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
310 		max_drift = num_online_cpus() * threshold;
311 		if (max_drift > tolerate_drift)
312 			zone->percpu_drift_mark = high_wmark_pages(zone) +
313 					max_drift;
314 	}
315 }
316 
317 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
318 				int (*calculate_pressure)(struct zone *))
319 {
320 	struct zone *zone;
321 	int cpu;
322 	int threshold;
323 	int i;
324 
325 	for (i = 0; i < pgdat->nr_zones; i++) {
326 		zone = &pgdat->node_zones[i];
327 		if (!zone->percpu_drift_mark)
328 			continue;
329 
330 		threshold = (*calculate_pressure)(zone);
331 		for_each_online_cpu(cpu)
332 			per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
333 							= threshold;
334 	}
335 }
336 
337 /*
338  * For use when we know that interrupts are disabled,
339  * or when we know that preemption is disabled and that
340  * particular counter cannot be updated from interrupt context.
341  */
342 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
343 			   long delta)
344 {
345 	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
346 	s8 __percpu *p = pcp->vm_stat_diff + item;
347 	long x;
348 	long t;
349 
350 	/*
351 	 * Accurate vmstat updates require a RMW. On !PREEMPT_RT kernels,
352 	 * atomicity is provided by IRQs being disabled -- either explicitly
353 	 * or via local_lock_irq. On PREEMPT_RT, local_lock_irq only disables
354 	 * CPU migrations and preemption potentially corrupts a counter so
355 	 * disable preemption.
356 	 */
357 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
358 		preempt_disable();
359 
360 	x = delta + __this_cpu_read(*p);
361 
362 	t = __this_cpu_read(pcp->stat_threshold);
363 
364 	if (unlikely(abs(x) > t)) {
365 		zone_page_state_add(x, zone, item);
366 		x = 0;
367 	}
368 	__this_cpu_write(*p, x);
369 
370 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
371 		preempt_enable();
372 }
373 EXPORT_SYMBOL(__mod_zone_page_state);
374 
375 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
376 				long delta)
377 {
378 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
379 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
380 	long x;
381 	long t;
382 
383 	if (vmstat_item_in_bytes(item)) {
384 		/*
385 		 * Only cgroups use subpage accounting right now; at
386 		 * the global level, these items still change in
387 		 * multiples of whole pages. Store them as pages
388 		 * internally to keep the per-cpu counters compact.
389 		 */
390 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
391 		delta >>= PAGE_SHIFT;
392 	}
393 
394 	/* See __mod_node_page_state */
395 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
396 		preempt_disable();
397 
398 	x = delta + __this_cpu_read(*p);
399 
400 	t = __this_cpu_read(pcp->stat_threshold);
401 
402 	if (unlikely(abs(x) > t)) {
403 		node_page_state_add(x, pgdat, item);
404 		x = 0;
405 	}
406 	__this_cpu_write(*p, x);
407 
408 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
409 		preempt_enable();
410 }
411 EXPORT_SYMBOL(__mod_node_page_state);
412 
413 /*
414  * Optimized increment and decrement functions.
415  *
416  * These are only for a single page and therefore can take a struct page *
417  * argument instead of struct zone *. This allows the inclusion of the code
418  * generated for page_zone(page) into the optimized functions.
419  *
420  * No overflow check is necessary and therefore the differential can be
421  * incremented or decremented in place which may allow the compilers to
422  * generate better code.
423  * The increment or decrement is known and therefore one boundary check can
424  * be omitted.
425  *
426  * NOTE: These functions are very performance sensitive. Change only
427  * with care.
428  *
429  * Some processors have inc/dec instructions that are atomic vs an interrupt.
430  * However, the code must first determine the differential location in a zone
431  * based on the processor number and then inc/dec the counter. There is no
432  * guarantee without disabling preemption that the processor will not change
433  * in between and therefore the atomicity vs. interrupt cannot be exploited
434  * in a useful way here.
435  */
436 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
437 {
438 	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
439 	s8 __percpu *p = pcp->vm_stat_diff + item;
440 	s8 v, t;
441 
442 	/* See __mod_node_page_state */
443 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
444 		preempt_disable();
445 
446 	v = __this_cpu_inc_return(*p);
447 	t = __this_cpu_read(pcp->stat_threshold);
448 	if (unlikely(v > t)) {
449 		s8 overstep = t >> 1;
450 
451 		zone_page_state_add(v + overstep, zone, item);
452 		__this_cpu_write(*p, -overstep);
453 	}
454 
455 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
456 		preempt_enable();
457 }
458 
459 void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
460 {
461 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
462 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
463 	s8 v, t;
464 
465 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
466 
467 	/* See __mod_node_page_state */
468 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
469 		preempt_disable();
470 
471 	v = __this_cpu_inc_return(*p);
472 	t = __this_cpu_read(pcp->stat_threshold);
473 	if (unlikely(v > t)) {
474 		s8 overstep = t >> 1;
475 
476 		node_page_state_add(v + overstep, pgdat, item);
477 		__this_cpu_write(*p, -overstep);
478 	}
479 
480 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
481 		preempt_enable();
482 }
483 
484 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
485 {
486 	__inc_zone_state(page_zone(page), item);
487 }
488 EXPORT_SYMBOL(__inc_zone_page_state);
489 
490 void __inc_node_page_state(struct page *page, enum node_stat_item item)
491 {
492 	__inc_node_state(page_pgdat(page), item);
493 }
494 EXPORT_SYMBOL(__inc_node_page_state);
495 
496 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
497 {
498 	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
499 	s8 __percpu *p = pcp->vm_stat_diff + item;
500 	s8 v, t;
501 
502 	/* See __mod_node_page_state */
503 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
504 		preempt_disable();
505 
506 	v = __this_cpu_dec_return(*p);
507 	t = __this_cpu_read(pcp->stat_threshold);
508 	if (unlikely(v < - t)) {
509 		s8 overstep = t >> 1;
510 
511 		zone_page_state_add(v - overstep, zone, item);
512 		__this_cpu_write(*p, overstep);
513 	}
514 
515 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
516 		preempt_enable();
517 }
518 
519 void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
520 {
521 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
522 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
523 	s8 v, t;
524 
525 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
526 
527 	/* See __mod_node_page_state */
528 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
529 		preempt_disable();
530 
531 	v = __this_cpu_dec_return(*p);
532 	t = __this_cpu_read(pcp->stat_threshold);
533 	if (unlikely(v < - t)) {
534 		s8 overstep = t >> 1;
535 
536 		node_page_state_add(v - overstep, pgdat, item);
537 		__this_cpu_write(*p, overstep);
538 	}
539 
540 	if (IS_ENABLED(CONFIG_PREEMPT_RT))
541 		preempt_enable();
542 }
543 
544 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
545 {
546 	__dec_zone_state(page_zone(page), item);
547 }
548 EXPORT_SYMBOL(__dec_zone_page_state);
549 
550 void __dec_node_page_state(struct page *page, enum node_stat_item item)
551 {
552 	__dec_node_state(page_pgdat(page), item);
553 }
554 EXPORT_SYMBOL(__dec_node_page_state);
555 
556 #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
557 /*
558  * If we have cmpxchg_local support then we do not need to incur the overhead
559  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
560  *
561  * mod_state() modifies the zone counter state through atomic per cpu
562  * operations.
563  *
564  * Overstep mode specifies how overstep should handled:
565  *     0       No overstepping
566  *     1       Overstepping half of threshold
567  *     -1      Overstepping minus half of threshold
568 */
569 static inline void mod_zone_state(struct zone *zone,
570        enum zone_stat_item item, long delta, int overstep_mode)
571 {
572 	struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
573 	s8 __percpu *p = pcp->vm_stat_diff + item;
574 	long o, n, t, z;
575 
576 	do {
577 		z = 0;  /* overflow to zone counters */
578 
579 		/*
580 		 * The fetching of the stat_threshold is racy. We may apply
581 		 * a counter threshold to the wrong the cpu if we get
582 		 * rescheduled while executing here. However, the next
583 		 * counter update will apply the threshold again and
584 		 * therefore bring the counter under the threshold again.
585 		 *
586 		 * Most of the time the thresholds are the same anyways
587 		 * for all cpus in a zone.
588 		 */
589 		t = this_cpu_read(pcp->stat_threshold);
590 
591 		o = this_cpu_read(*p);
592 		n = delta + o;
593 
594 		if (abs(n) > t) {
595 			int os = overstep_mode * (t >> 1) ;
596 
597 			/* Overflow must be added to zone counters */
598 			z = n + os;
599 			n = -os;
600 		}
601 	} while (this_cpu_cmpxchg(*p, o, n) != o);
602 
603 	if (z)
604 		zone_page_state_add(z, zone, item);
605 }
606 
607 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
608 			 long delta)
609 {
610 	mod_zone_state(zone, item, delta, 0);
611 }
612 EXPORT_SYMBOL(mod_zone_page_state);
613 
614 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
615 {
616 	mod_zone_state(page_zone(page), item, 1, 1);
617 }
618 EXPORT_SYMBOL(inc_zone_page_state);
619 
620 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
621 {
622 	mod_zone_state(page_zone(page), item, -1, -1);
623 }
624 EXPORT_SYMBOL(dec_zone_page_state);
625 
626 static inline void mod_node_state(struct pglist_data *pgdat,
627        enum node_stat_item item, int delta, int overstep_mode)
628 {
629 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
630 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
631 	long o, n, t, z;
632 
633 	if (vmstat_item_in_bytes(item)) {
634 		/*
635 		 * Only cgroups use subpage accounting right now; at
636 		 * the global level, these items still change in
637 		 * multiples of whole pages. Store them as pages
638 		 * internally to keep the per-cpu counters compact.
639 		 */
640 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
641 		delta >>= PAGE_SHIFT;
642 	}
643 
644 	do {
645 		z = 0;  /* overflow to node counters */
646 
647 		/*
648 		 * The fetching of the stat_threshold is racy. We may apply
649 		 * a counter threshold to the wrong the cpu if we get
650 		 * rescheduled while executing here. However, the next
651 		 * counter update will apply the threshold again and
652 		 * therefore bring the counter under the threshold again.
653 		 *
654 		 * Most of the time the thresholds are the same anyways
655 		 * for all cpus in a node.
656 		 */
657 		t = this_cpu_read(pcp->stat_threshold);
658 
659 		o = this_cpu_read(*p);
660 		n = delta + o;
661 
662 		if (abs(n) > t) {
663 			int os = overstep_mode * (t >> 1) ;
664 
665 			/* Overflow must be added to node counters */
666 			z = n + os;
667 			n = -os;
668 		}
669 	} while (this_cpu_cmpxchg(*p, o, n) != o);
670 
671 	if (z)
672 		node_page_state_add(z, pgdat, item);
673 }
674 
675 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
676 					long delta)
677 {
678 	mod_node_state(pgdat, item, delta, 0);
679 }
680 EXPORT_SYMBOL(mod_node_page_state);
681 
682 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
683 {
684 	mod_node_state(pgdat, item, 1, 1);
685 }
686 
687 void inc_node_page_state(struct page *page, enum node_stat_item item)
688 {
689 	mod_node_state(page_pgdat(page), item, 1, 1);
690 }
691 EXPORT_SYMBOL(inc_node_page_state);
692 
693 void dec_node_page_state(struct page *page, enum node_stat_item item)
694 {
695 	mod_node_state(page_pgdat(page), item, -1, -1);
696 }
697 EXPORT_SYMBOL(dec_node_page_state);
698 #else
699 /*
700  * Use interrupt disable to serialize counter updates
701  */
702 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
703 			 long delta)
704 {
705 	unsigned long flags;
706 
707 	local_irq_save(flags);
708 	__mod_zone_page_state(zone, item, delta);
709 	local_irq_restore(flags);
710 }
711 EXPORT_SYMBOL(mod_zone_page_state);
712 
713 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
714 {
715 	unsigned long flags;
716 	struct zone *zone;
717 
718 	zone = page_zone(page);
719 	local_irq_save(flags);
720 	__inc_zone_state(zone, item);
721 	local_irq_restore(flags);
722 }
723 EXPORT_SYMBOL(inc_zone_page_state);
724 
725 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
726 {
727 	unsigned long flags;
728 
729 	local_irq_save(flags);
730 	__dec_zone_page_state(page, item);
731 	local_irq_restore(flags);
732 }
733 EXPORT_SYMBOL(dec_zone_page_state);
734 
735 void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
736 {
737 	unsigned long flags;
738 
739 	local_irq_save(flags);
740 	__inc_node_state(pgdat, item);
741 	local_irq_restore(flags);
742 }
743 EXPORT_SYMBOL(inc_node_state);
744 
745 void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
746 					long delta)
747 {
748 	unsigned long flags;
749 
750 	local_irq_save(flags);
751 	__mod_node_page_state(pgdat, item, delta);
752 	local_irq_restore(flags);
753 }
754 EXPORT_SYMBOL(mod_node_page_state);
755 
756 void inc_node_page_state(struct page *page, enum node_stat_item item)
757 {
758 	unsigned long flags;
759 	struct pglist_data *pgdat;
760 
761 	pgdat = page_pgdat(page);
762 	local_irq_save(flags);
763 	__inc_node_state(pgdat, item);
764 	local_irq_restore(flags);
765 }
766 EXPORT_SYMBOL(inc_node_page_state);
767 
768 void dec_node_page_state(struct page *page, enum node_stat_item item)
769 {
770 	unsigned long flags;
771 
772 	local_irq_save(flags);
773 	__dec_node_page_state(page, item);
774 	local_irq_restore(flags);
775 }
776 EXPORT_SYMBOL(dec_node_page_state);
777 #endif
778 
779 /*
780  * Fold a differential into the global counters.
781  * Returns the number of counters updated.
782  */
783 static int fold_diff(int *zone_diff, int *node_diff)
784 {
785 	int i;
786 	int changes = 0;
787 
788 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
789 		if (zone_diff[i]) {
790 			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
791 			changes++;
792 	}
793 
794 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
795 		if (node_diff[i]) {
796 			atomic_long_add(node_diff[i], &vm_node_stat[i]);
797 			changes++;
798 	}
799 	return changes;
800 }
801 
802 /*
803  * Update the zone counters for the current cpu.
804  *
805  * Note that refresh_cpu_vm_stats strives to only access
806  * node local memory. The per cpu pagesets on remote zones are placed
807  * in the memory local to the processor using that pageset. So the
808  * loop over all zones will access a series of cachelines local to
809  * the processor.
810  *
811  * The call to zone_page_state_add updates the cachelines with the
812  * statistics in the remote zone struct as well as the global cachelines
813  * with the global counters. These could cause remote node cache line
814  * bouncing and will have to be only done when necessary.
815  *
816  * The function returns the number of global counters updated.
817  */
818 static int refresh_cpu_vm_stats(bool do_pagesets)
819 {
820 	struct pglist_data *pgdat;
821 	struct zone *zone;
822 	int i;
823 	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
824 	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
825 	int changes = 0;
826 
827 	for_each_populated_zone(zone) {
828 		struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
829 #ifdef CONFIG_NUMA
830 		struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
831 #endif
832 
833 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
834 			int v;
835 
836 			v = this_cpu_xchg(pzstats->vm_stat_diff[i], 0);
837 			if (v) {
838 
839 				atomic_long_add(v, &zone->vm_stat[i]);
840 				global_zone_diff[i] += v;
841 #ifdef CONFIG_NUMA
842 				/* 3 seconds idle till flush */
843 				__this_cpu_write(pcp->expire, 3);
844 #endif
845 			}
846 		}
847 #ifdef CONFIG_NUMA
848 
849 		if (do_pagesets) {
850 			cond_resched();
851 			/*
852 			 * Deal with draining the remote pageset of this
853 			 * processor
854 			 *
855 			 * Check if there are pages remaining in this pageset
856 			 * if not then there is nothing to expire.
857 			 */
858 			if (!__this_cpu_read(pcp->expire) ||
859 			       !__this_cpu_read(pcp->count))
860 				continue;
861 
862 			/*
863 			 * We never drain zones local to this processor.
864 			 */
865 			if (zone_to_nid(zone) == numa_node_id()) {
866 				__this_cpu_write(pcp->expire, 0);
867 				continue;
868 			}
869 
870 			if (__this_cpu_dec_return(pcp->expire))
871 				continue;
872 
873 			if (__this_cpu_read(pcp->count)) {
874 				drain_zone_pages(zone, this_cpu_ptr(pcp));
875 				changes++;
876 			}
877 		}
878 #endif
879 	}
880 
881 	for_each_online_pgdat(pgdat) {
882 		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
883 
884 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
885 			int v;
886 
887 			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
888 			if (v) {
889 				atomic_long_add(v, &pgdat->vm_stat[i]);
890 				global_node_diff[i] += v;
891 			}
892 		}
893 	}
894 
895 	changes += fold_diff(global_zone_diff, global_node_diff);
896 	return changes;
897 }
898 
899 /*
900  * Fold the data for an offline cpu into the global array.
901  * There cannot be any access by the offline cpu and therefore
902  * synchronization is simplified.
903  */
904 void cpu_vm_stats_fold(int cpu)
905 {
906 	struct pglist_data *pgdat;
907 	struct zone *zone;
908 	int i;
909 	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
910 	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
911 
912 	for_each_populated_zone(zone) {
913 		struct per_cpu_zonestat *pzstats;
914 
915 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
916 
917 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
918 			if (pzstats->vm_stat_diff[i]) {
919 				int v;
920 
921 				v = pzstats->vm_stat_diff[i];
922 				pzstats->vm_stat_diff[i] = 0;
923 				atomic_long_add(v, &zone->vm_stat[i]);
924 				global_zone_diff[i] += v;
925 			}
926 		}
927 #ifdef CONFIG_NUMA
928 		for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
929 			if (pzstats->vm_numa_event[i]) {
930 				unsigned long v;
931 
932 				v = pzstats->vm_numa_event[i];
933 				pzstats->vm_numa_event[i] = 0;
934 				zone_numa_event_add(v, zone, i);
935 			}
936 		}
937 #endif
938 	}
939 
940 	for_each_online_pgdat(pgdat) {
941 		struct per_cpu_nodestat *p;
942 
943 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
944 
945 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
946 			if (p->vm_node_stat_diff[i]) {
947 				int v;
948 
949 				v = p->vm_node_stat_diff[i];
950 				p->vm_node_stat_diff[i] = 0;
951 				atomic_long_add(v, &pgdat->vm_stat[i]);
952 				global_node_diff[i] += v;
953 			}
954 	}
955 
956 	fold_diff(global_zone_diff, global_node_diff);
957 }
958 
959 /*
960  * this is only called if !populated_zone(zone), which implies no other users of
961  * pset->vm_stat_diff[] exist.
962  */
963 void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
964 {
965 	unsigned long v;
966 	int i;
967 
968 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
969 		if (pzstats->vm_stat_diff[i]) {
970 			v = pzstats->vm_stat_diff[i];
971 			pzstats->vm_stat_diff[i] = 0;
972 			zone_page_state_add(v, zone, i);
973 		}
974 	}
975 
976 #ifdef CONFIG_NUMA
977 	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) {
978 		if (pzstats->vm_numa_event[i]) {
979 			v = pzstats->vm_numa_event[i];
980 			pzstats->vm_numa_event[i] = 0;
981 			zone_numa_event_add(v, zone, i);
982 		}
983 	}
984 #endif
985 }
986 #endif
987 
988 #ifdef CONFIG_NUMA
989 /*
990  * Determine the per node value of a stat item. This function
991  * is called frequently in a NUMA machine, so try to be as
992  * frugal as possible.
993  */
994 unsigned long sum_zone_node_page_state(int node,
995 				 enum zone_stat_item item)
996 {
997 	struct zone *zones = NODE_DATA(node)->node_zones;
998 	int i;
999 	unsigned long count = 0;
1000 
1001 	for (i = 0; i < MAX_NR_ZONES; i++)
1002 		count += zone_page_state(zones + i, item);
1003 
1004 	return count;
1005 }
1006 
1007 /* Determine the per node value of a numa stat item. */
1008 unsigned long sum_zone_numa_event_state(int node,
1009 				 enum numa_stat_item item)
1010 {
1011 	struct zone *zones = NODE_DATA(node)->node_zones;
1012 	unsigned long count = 0;
1013 	int i;
1014 
1015 	for (i = 0; i < MAX_NR_ZONES; i++)
1016 		count += zone_numa_event_state(zones + i, item);
1017 
1018 	return count;
1019 }
1020 
1021 /*
1022  * Determine the per node value of a stat item.
1023  */
1024 unsigned long node_page_state_pages(struct pglist_data *pgdat,
1025 				    enum node_stat_item item)
1026 {
1027 	long x = atomic_long_read(&pgdat->vm_stat[item]);
1028 #ifdef CONFIG_SMP
1029 	if (x < 0)
1030 		x = 0;
1031 #endif
1032 	return x;
1033 }
1034 
1035 unsigned long node_page_state(struct pglist_data *pgdat,
1036 			      enum node_stat_item item)
1037 {
1038 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1039 
1040 	return node_page_state_pages(pgdat, item);
1041 }
1042 #endif
1043 
1044 #ifdef CONFIG_COMPACTION
1045 
1046 struct contig_page_info {
1047 	unsigned long free_pages;
1048 	unsigned long free_blocks_total;
1049 	unsigned long free_blocks_suitable;
1050 };
1051 
1052 /*
1053  * Calculate the number of free pages in a zone, how many contiguous
1054  * pages are free and how many are large enough to satisfy an allocation of
1055  * the target size. Note that this function makes no attempt to estimate
1056  * how many suitable free blocks there *might* be if MOVABLE pages were
1057  * migrated. Calculating that is possible, but expensive and can be
1058  * figured out from userspace
1059  */
1060 static void fill_contig_page_info(struct zone *zone,
1061 				unsigned int suitable_order,
1062 				struct contig_page_info *info)
1063 {
1064 	unsigned int order;
1065 
1066 	info->free_pages = 0;
1067 	info->free_blocks_total = 0;
1068 	info->free_blocks_suitable = 0;
1069 
1070 	for (order = 0; order < MAX_ORDER; order++) {
1071 		unsigned long blocks;
1072 
1073 		/*
1074 		 * Count number of free blocks.
1075 		 *
1076 		 * Access to nr_free is lockless as nr_free is used only for
1077 		 * diagnostic purposes. Use data_race to avoid KCSAN warning.
1078 		 */
1079 		blocks = data_race(zone->free_area[order].nr_free);
1080 		info->free_blocks_total += blocks;
1081 
1082 		/* Count free base pages */
1083 		info->free_pages += blocks << order;
1084 
1085 		/* Count the suitable free blocks */
1086 		if (order >= suitable_order)
1087 			info->free_blocks_suitable += blocks <<
1088 						(order - suitable_order);
1089 	}
1090 }
1091 
1092 /*
1093  * A fragmentation index only makes sense if an allocation of a requested
1094  * size would fail. If that is true, the fragmentation index indicates
1095  * whether external fragmentation or a lack of memory was the problem.
1096  * The value can be used to determine if page reclaim or compaction
1097  * should be used
1098  */
1099 static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1100 {
1101 	unsigned long requested = 1UL << order;
1102 
1103 	if (WARN_ON_ONCE(order >= MAX_ORDER))
1104 		return 0;
1105 
1106 	if (!info->free_blocks_total)
1107 		return 0;
1108 
1109 	/* Fragmentation index only makes sense when a request would fail */
1110 	if (info->free_blocks_suitable)
1111 		return -1000;
1112 
1113 	/*
1114 	 * Index is between 0 and 1 so return within 3 decimal places
1115 	 *
1116 	 * 0 => allocation would fail due to lack of memory
1117 	 * 1 => allocation would fail due to fragmentation
1118 	 */
1119 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1120 }
1121 
1122 /*
1123  * Calculates external fragmentation within a zone wrt the given order.
1124  * It is defined as the percentage of pages found in blocks of size
1125  * less than 1 << order. It returns values in range [0, 100].
1126  */
1127 unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1128 {
1129 	struct contig_page_info info;
1130 
1131 	fill_contig_page_info(zone, order, &info);
1132 	if (info.free_pages == 0)
1133 		return 0;
1134 
1135 	return div_u64((info.free_pages -
1136 			(info.free_blocks_suitable << order)) * 100,
1137 			info.free_pages);
1138 }
1139 
1140 /* Same as __fragmentation index but allocs contig_page_info on stack */
1141 int fragmentation_index(struct zone *zone, unsigned int order)
1142 {
1143 	struct contig_page_info info;
1144 
1145 	fill_contig_page_info(zone, order, &info);
1146 	return __fragmentation_index(order, &info);
1147 }
1148 #endif
1149 
1150 #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1151     defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1152 #ifdef CONFIG_ZONE_DMA
1153 #define TEXT_FOR_DMA(xx) xx "_dma",
1154 #else
1155 #define TEXT_FOR_DMA(xx)
1156 #endif
1157 
1158 #ifdef CONFIG_ZONE_DMA32
1159 #define TEXT_FOR_DMA32(xx) xx "_dma32",
1160 #else
1161 #define TEXT_FOR_DMA32(xx)
1162 #endif
1163 
1164 #ifdef CONFIG_HIGHMEM
1165 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
1166 #else
1167 #define TEXT_FOR_HIGHMEM(xx)
1168 #endif
1169 
1170 #ifdef CONFIG_ZONE_DEVICE
1171 #define TEXT_FOR_DEVICE(xx) xx "_device",
1172 #else
1173 #define TEXT_FOR_DEVICE(xx)
1174 #endif
1175 
1176 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1177 					TEXT_FOR_HIGHMEM(xx) xx "_movable", \
1178 					TEXT_FOR_DEVICE(xx)
1179 
1180 const char * const vmstat_text[] = {
1181 	/* enum zone_stat_item counters */
1182 	"nr_free_pages",
1183 	"nr_zone_inactive_anon",
1184 	"nr_zone_active_anon",
1185 	"nr_zone_inactive_file",
1186 	"nr_zone_active_file",
1187 	"nr_zone_unevictable",
1188 	"nr_zone_write_pending",
1189 	"nr_mlock",
1190 	"nr_bounce",
1191 #if IS_ENABLED(CONFIG_ZSMALLOC)
1192 	"nr_zspages",
1193 #endif
1194 	"nr_free_cma",
1195 
1196 	/* enum numa_stat_item counters */
1197 #ifdef CONFIG_NUMA
1198 	"numa_hit",
1199 	"numa_miss",
1200 	"numa_foreign",
1201 	"numa_interleave",
1202 	"numa_local",
1203 	"numa_other",
1204 #endif
1205 
1206 	/* enum node_stat_item counters */
1207 	"nr_inactive_anon",
1208 	"nr_active_anon",
1209 	"nr_inactive_file",
1210 	"nr_active_file",
1211 	"nr_unevictable",
1212 	"nr_slab_reclaimable",
1213 	"nr_slab_unreclaimable",
1214 	"nr_isolated_anon",
1215 	"nr_isolated_file",
1216 	"workingset_nodes",
1217 	"workingset_refault_anon",
1218 	"workingset_refault_file",
1219 	"workingset_activate_anon",
1220 	"workingset_activate_file",
1221 	"workingset_restore_anon",
1222 	"workingset_restore_file",
1223 	"workingset_nodereclaim",
1224 	"nr_anon_pages",
1225 	"nr_mapped",
1226 	"nr_file_pages",
1227 	"nr_dirty",
1228 	"nr_writeback",
1229 	"nr_writeback_temp",
1230 	"nr_shmem",
1231 	"nr_shmem_hugepages",
1232 	"nr_shmem_pmdmapped",
1233 	"nr_file_hugepages",
1234 	"nr_file_pmdmapped",
1235 	"nr_anon_transparent_hugepages",
1236 	"nr_vmscan_write",
1237 	"nr_vmscan_immediate_reclaim",
1238 	"nr_dirtied",
1239 	"nr_written",
1240 	"nr_throttled_written",
1241 	"nr_kernel_misc_reclaimable",
1242 	"nr_foll_pin_acquired",
1243 	"nr_foll_pin_released",
1244 	"nr_kernel_stack",
1245 #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1246 	"nr_shadow_call_stack",
1247 #endif
1248 	"nr_page_table_pages",
1249 #ifdef CONFIG_SWAP
1250 	"nr_swapcached",
1251 #endif
1252 #ifdef CONFIG_NUMA_BALANCING
1253 	"pgpromote_success",
1254 	"pgpromote_candidate",
1255 #endif
1256 
1257 	/* enum writeback_stat_item counters */
1258 	"nr_dirty_threshold",
1259 	"nr_dirty_background_threshold",
1260 
1261 #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1262 	/* enum vm_event_item counters */
1263 	"pgpgin",
1264 	"pgpgout",
1265 	"pswpin",
1266 	"pswpout",
1267 
1268 	TEXTS_FOR_ZONES("pgalloc")
1269 	TEXTS_FOR_ZONES("allocstall")
1270 	TEXTS_FOR_ZONES("pgskip")
1271 
1272 	"pgfree",
1273 	"pgactivate",
1274 	"pgdeactivate",
1275 	"pglazyfree",
1276 
1277 	"pgfault",
1278 	"pgmajfault",
1279 	"pglazyfreed",
1280 
1281 	"pgrefill",
1282 	"pgreuse",
1283 	"pgsteal_kswapd",
1284 	"pgsteal_direct",
1285 	"pgdemote_kswapd",
1286 	"pgdemote_direct",
1287 	"pgscan_kswapd",
1288 	"pgscan_direct",
1289 	"pgscan_direct_throttle",
1290 	"pgscan_anon",
1291 	"pgscan_file",
1292 	"pgsteal_anon",
1293 	"pgsteal_file",
1294 
1295 #ifdef CONFIG_NUMA
1296 	"zone_reclaim_failed",
1297 #endif
1298 	"pginodesteal",
1299 	"slabs_scanned",
1300 	"kswapd_inodesteal",
1301 	"kswapd_low_wmark_hit_quickly",
1302 	"kswapd_high_wmark_hit_quickly",
1303 	"pageoutrun",
1304 
1305 	"pgrotated",
1306 
1307 	"drop_pagecache",
1308 	"drop_slab",
1309 	"oom_kill",
1310 
1311 #ifdef CONFIG_NUMA_BALANCING
1312 	"numa_pte_updates",
1313 	"numa_huge_pte_updates",
1314 	"numa_hint_faults",
1315 	"numa_hint_faults_local",
1316 	"numa_pages_migrated",
1317 #endif
1318 #ifdef CONFIG_MIGRATION
1319 	"pgmigrate_success",
1320 	"pgmigrate_fail",
1321 	"thp_migration_success",
1322 	"thp_migration_fail",
1323 	"thp_migration_split",
1324 #endif
1325 #ifdef CONFIG_COMPACTION
1326 	"compact_migrate_scanned",
1327 	"compact_free_scanned",
1328 	"compact_isolated",
1329 	"compact_stall",
1330 	"compact_fail",
1331 	"compact_success",
1332 	"compact_daemon_wake",
1333 	"compact_daemon_migrate_scanned",
1334 	"compact_daemon_free_scanned",
1335 #endif
1336 
1337 #ifdef CONFIG_HUGETLB_PAGE
1338 	"htlb_buddy_alloc_success",
1339 	"htlb_buddy_alloc_fail",
1340 #endif
1341 #ifdef CONFIG_CMA
1342 	"cma_alloc_success",
1343 	"cma_alloc_fail",
1344 #endif
1345 	"unevictable_pgs_culled",
1346 	"unevictable_pgs_scanned",
1347 	"unevictable_pgs_rescued",
1348 	"unevictable_pgs_mlocked",
1349 	"unevictable_pgs_munlocked",
1350 	"unevictable_pgs_cleared",
1351 	"unevictable_pgs_stranded",
1352 
1353 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1354 	"thp_fault_alloc",
1355 	"thp_fault_fallback",
1356 	"thp_fault_fallback_charge",
1357 	"thp_collapse_alloc",
1358 	"thp_collapse_alloc_failed",
1359 	"thp_file_alloc",
1360 	"thp_file_fallback",
1361 	"thp_file_fallback_charge",
1362 	"thp_file_mapped",
1363 	"thp_split_page",
1364 	"thp_split_page_failed",
1365 	"thp_deferred_split_page",
1366 	"thp_split_pmd",
1367 	"thp_scan_exceed_none_pte",
1368 	"thp_scan_exceed_swap_pte",
1369 	"thp_scan_exceed_share_pte",
1370 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1371 	"thp_split_pud",
1372 #endif
1373 	"thp_zero_page_alloc",
1374 	"thp_zero_page_alloc_failed",
1375 	"thp_swpout",
1376 	"thp_swpout_fallback",
1377 #endif
1378 #ifdef CONFIG_MEMORY_BALLOON
1379 	"balloon_inflate",
1380 	"balloon_deflate",
1381 #ifdef CONFIG_BALLOON_COMPACTION
1382 	"balloon_migrate",
1383 #endif
1384 #endif /* CONFIG_MEMORY_BALLOON */
1385 #ifdef CONFIG_DEBUG_TLBFLUSH
1386 	"nr_tlb_remote_flush",
1387 	"nr_tlb_remote_flush_received",
1388 	"nr_tlb_local_flush_all",
1389 	"nr_tlb_local_flush_one",
1390 #endif /* CONFIG_DEBUG_TLBFLUSH */
1391 
1392 #ifdef CONFIG_DEBUG_VM_VMACACHE
1393 	"vmacache_find_calls",
1394 	"vmacache_find_hits",
1395 #endif
1396 #ifdef CONFIG_SWAP
1397 	"swap_ra",
1398 	"swap_ra_hit",
1399 #ifdef CONFIG_KSM
1400 	"ksm_swpin_copy",
1401 #endif
1402 #endif
1403 #ifdef CONFIG_KSM
1404 	"cow_ksm",
1405 #endif
1406 #ifdef CONFIG_ZSWAP
1407 	"zswpin",
1408 	"zswpout",
1409 #endif
1410 #ifdef CONFIG_X86
1411 	"direct_map_level2_splits",
1412 	"direct_map_level3_splits",
1413 #endif
1414 #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1415 };
1416 #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1417 
1418 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1419      defined(CONFIG_PROC_FS)
1420 static void *frag_start(struct seq_file *m, loff_t *pos)
1421 {
1422 	pg_data_t *pgdat;
1423 	loff_t node = *pos;
1424 
1425 	for (pgdat = first_online_pgdat();
1426 	     pgdat && node;
1427 	     pgdat = next_online_pgdat(pgdat))
1428 		--node;
1429 
1430 	return pgdat;
1431 }
1432 
1433 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1434 {
1435 	pg_data_t *pgdat = (pg_data_t *)arg;
1436 
1437 	(*pos)++;
1438 	return next_online_pgdat(pgdat);
1439 }
1440 
1441 static void frag_stop(struct seq_file *m, void *arg)
1442 {
1443 }
1444 
1445 /*
1446  * Walk zones in a node and print using a callback.
1447  * If @assert_populated is true, only use callback for zones that are populated.
1448  */
1449 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1450 		bool assert_populated, bool nolock,
1451 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1452 {
1453 	struct zone *zone;
1454 	struct zone *node_zones = pgdat->node_zones;
1455 	unsigned long flags;
1456 
1457 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1458 		if (assert_populated && !populated_zone(zone))
1459 			continue;
1460 
1461 		if (!nolock)
1462 			spin_lock_irqsave(&zone->lock, flags);
1463 		print(m, pgdat, zone);
1464 		if (!nolock)
1465 			spin_unlock_irqrestore(&zone->lock, flags);
1466 	}
1467 }
1468 #endif
1469 
1470 #ifdef CONFIG_PROC_FS
1471 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1472 						struct zone *zone)
1473 {
1474 	int order;
1475 
1476 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1477 	for (order = 0; order < MAX_ORDER; ++order)
1478 		/*
1479 		 * Access to nr_free is lockless as nr_free is used only for
1480 		 * printing purposes. Use data_race to avoid KCSAN warning.
1481 		 */
1482 		seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
1483 	seq_putc(m, '\n');
1484 }
1485 
1486 /*
1487  * This walks the free areas for each zone.
1488  */
1489 static int frag_show(struct seq_file *m, void *arg)
1490 {
1491 	pg_data_t *pgdat = (pg_data_t *)arg;
1492 	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1493 	return 0;
1494 }
1495 
1496 static void pagetypeinfo_showfree_print(struct seq_file *m,
1497 					pg_data_t *pgdat, struct zone *zone)
1498 {
1499 	int order, mtype;
1500 
1501 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1502 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1503 					pgdat->node_id,
1504 					zone->name,
1505 					migratetype_names[mtype]);
1506 		for (order = 0; order < MAX_ORDER; ++order) {
1507 			unsigned long freecount = 0;
1508 			struct free_area *area;
1509 			struct list_head *curr;
1510 			bool overflow = false;
1511 
1512 			area = &(zone->free_area[order]);
1513 
1514 			list_for_each(curr, &area->free_list[mtype]) {
1515 				/*
1516 				 * Cap the free_list iteration because it might
1517 				 * be really large and we are under a spinlock
1518 				 * so a long time spent here could trigger a
1519 				 * hard lockup detector. Anyway this is a
1520 				 * debugging tool so knowing there is a handful
1521 				 * of pages of this order should be more than
1522 				 * sufficient.
1523 				 */
1524 				if (++freecount >= 100000) {
1525 					overflow = true;
1526 					break;
1527 				}
1528 			}
1529 			seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1530 			spin_unlock_irq(&zone->lock);
1531 			cond_resched();
1532 			spin_lock_irq(&zone->lock);
1533 		}
1534 		seq_putc(m, '\n');
1535 	}
1536 }
1537 
1538 /* Print out the free pages at each order for each migatetype */
1539 static void pagetypeinfo_showfree(struct seq_file *m, void *arg)
1540 {
1541 	int order;
1542 	pg_data_t *pgdat = (pg_data_t *)arg;
1543 
1544 	/* Print header */
1545 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1546 	for (order = 0; order < MAX_ORDER; ++order)
1547 		seq_printf(m, "%6d ", order);
1548 	seq_putc(m, '\n');
1549 
1550 	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1551 }
1552 
1553 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1554 					pg_data_t *pgdat, struct zone *zone)
1555 {
1556 	int mtype;
1557 	unsigned long pfn;
1558 	unsigned long start_pfn = zone->zone_start_pfn;
1559 	unsigned long end_pfn = zone_end_pfn(zone);
1560 	unsigned long count[MIGRATE_TYPES] = { 0, };
1561 
1562 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1563 		struct page *page;
1564 
1565 		page = pfn_to_online_page(pfn);
1566 		if (!page)
1567 			continue;
1568 
1569 		if (page_zone(page) != zone)
1570 			continue;
1571 
1572 		mtype = get_pageblock_migratetype(page);
1573 
1574 		if (mtype < MIGRATE_TYPES)
1575 			count[mtype]++;
1576 	}
1577 
1578 	/* Print counts */
1579 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1580 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1581 		seq_printf(m, "%12lu ", count[mtype]);
1582 	seq_putc(m, '\n');
1583 }
1584 
1585 /* Print out the number of pageblocks for each migratetype */
1586 static void pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1587 {
1588 	int mtype;
1589 	pg_data_t *pgdat = (pg_data_t *)arg;
1590 
1591 	seq_printf(m, "\n%-23s", "Number of blocks type ");
1592 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1593 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1594 	seq_putc(m, '\n');
1595 	walk_zones_in_node(m, pgdat, true, false,
1596 		pagetypeinfo_showblockcount_print);
1597 }
1598 
1599 /*
1600  * Print out the number of pageblocks for each migratetype that contain pages
1601  * of other types. This gives an indication of how well fallbacks are being
1602  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1603  * to determine what is going on
1604  */
1605 static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1606 {
1607 #ifdef CONFIG_PAGE_OWNER
1608 	int mtype;
1609 
1610 	if (!static_branch_unlikely(&page_owner_inited))
1611 		return;
1612 
1613 	drain_all_pages(NULL);
1614 
1615 	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1616 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1617 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1618 	seq_putc(m, '\n');
1619 
1620 	walk_zones_in_node(m, pgdat, true, true,
1621 		pagetypeinfo_showmixedcount_print);
1622 #endif /* CONFIG_PAGE_OWNER */
1623 }
1624 
1625 /*
1626  * This prints out statistics in relation to grouping pages by mobility.
1627  * It is expensive to collect so do not constantly read the file.
1628  */
1629 static int pagetypeinfo_show(struct seq_file *m, void *arg)
1630 {
1631 	pg_data_t *pgdat = (pg_data_t *)arg;
1632 
1633 	/* check memoryless node */
1634 	if (!node_state(pgdat->node_id, N_MEMORY))
1635 		return 0;
1636 
1637 	seq_printf(m, "Page block order: %d\n", pageblock_order);
1638 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1639 	seq_putc(m, '\n');
1640 	pagetypeinfo_showfree(m, pgdat);
1641 	pagetypeinfo_showblockcount(m, pgdat);
1642 	pagetypeinfo_showmixedcount(m, pgdat);
1643 
1644 	return 0;
1645 }
1646 
1647 static const struct seq_operations fragmentation_op = {
1648 	.start	= frag_start,
1649 	.next	= frag_next,
1650 	.stop	= frag_stop,
1651 	.show	= frag_show,
1652 };
1653 
1654 static const struct seq_operations pagetypeinfo_op = {
1655 	.start	= frag_start,
1656 	.next	= frag_next,
1657 	.stop	= frag_stop,
1658 	.show	= pagetypeinfo_show,
1659 };
1660 
1661 static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1662 {
1663 	int zid;
1664 
1665 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1666 		struct zone *compare = &pgdat->node_zones[zid];
1667 
1668 		if (populated_zone(compare))
1669 			return zone == compare;
1670 	}
1671 
1672 	return false;
1673 }
1674 
1675 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1676 							struct zone *zone)
1677 {
1678 	int i;
1679 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1680 	if (is_zone_first_populated(pgdat, zone)) {
1681 		seq_printf(m, "\n  per-node stats");
1682 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1683 			unsigned long pages = node_page_state_pages(pgdat, i);
1684 
1685 			if (vmstat_item_print_in_thp(i))
1686 				pages /= HPAGE_PMD_NR;
1687 			seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
1688 				   pages);
1689 		}
1690 	}
1691 	seq_printf(m,
1692 		   "\n  pages free     %lu"
1693 		   "\n        boost    %lu"
1694 		   "\n        min      %lu"
1695 		   "\n        low      %lu"
1696 		   "\n        high     %lu"
1697 		   "\n        spanned  %lu"
1698 		   "\n        present  %lu"
1699 		   "\n        managed  %lu"
1700 		   "\n        cma      %lu",
1701 		   zone_page_state(zone, NR_FREE_PAGES),
1702 		   zone->watermark_boost,
1703 		   min_wmark_pages(zone),
1704 		   low_wmark_pages(zone),
1705 		   high_wmark_pages(zone),
1706 		   zone->spanned_pages,
1707 		   zone->present_pages,
1708 		   zone_managed_pages(zone),
1709 		   zone_cma_pages(zone));
1710 
1711 	seq_printf(m,
1712 		   "\n        protection: (%ld",
1713 		   zone->lowmem_reserve[0]);
1714 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1715 		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1716 	seq_putc(m, ')');
1717 
1718 	/* If unpopulated, no other information is useful */
1719 	if (!populated_zone(zone)) {
1720 		seq_putc(m, '\n');
1721 		return;
1722 	}
1723 
1724 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1725 		seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
1726 			   zone_page_state(zone, i));
1727 
1728 #ifdef CONFIG_NUMA
1729 	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1730 		seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
1731 			   zone_numa_event_state(zone, i));
1732 #endif
1733 
1734 	seq_printf(m, "\n  pagesets");
1735 	for_each_online_cpu(i) {
1736 		struct per_cpu_pages *pcp;
1737 		struct per_cpu_zonestat __maybe_unused *pzstats;
1738 
1739 		pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
1740 		seq_printf(m,
1741 			   "\n    cpu: %i"
1742 			   "\n              count: %i"
1743 			   "\n              high:  %i"
1744 			   "\n              batch: %i",
1745 			   i,
1746 			   pcp->count,
1747 			   pcp->high,
1748 			   pcp->batch);
1749 #ifdef CONFIG_SMP
1750 		pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
1751 		seq_printf(m, "\n  vm stats threshold: %d",
1752 				pzstats->stat_threshold);
1753 #endif
1754 	}
1755 	seq_printf(m,
1756 		   "\n  node_unreclaimable:  %u"
1757 		   "\n  start_pfn:           %lu",
1758 		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1759 		   zone->zone_start_pfn);
1760 	seq_putc(m, '\n');
1761 }
1762 
1763 /*
1764  * Output information about zones in @pgdat.  All zones are printed regardless
1765  * of whether they are populated or not: lowmem_reserve_ratio operates on the
1766  * set of all zones and userspace would not be aware of such zones if they are
1767  * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1768  */
1769 static int zoneinfo_show(struct seq_file *m, void *arg)
1770 {
1771 	pg_data_t *pgdat = (pg_data_t *)arg;
1772 	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1773 	return 0;
1774 }
1775 
1776 static const struct seq_operations zoneinfo_op = {
1777 	.start	= frag_start, /* iterate over all zones. The same as in
1778 			       * fragmentation. */
1779 	.next	= frag_next,
1780 	.stop	= frag_stop,
1781 	.show	= zoneinfo_show,
1782 };
1783 
1784 #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1785 			 NR_VM_NUMA_EVENT_ITEMS + \
1786 			 NR_VM_NODE_STAT_ITEMS + \
1787 			 NR_VM_WRITEBACK_STAT_ITEMS + \
1788 			 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1789 			  NR_VM_EVENT_ITEMS : 0))
1790 
1791 static void *vmstat_start(struct seq_file *m, loff_t *pos)
1792 {
1793 	unsigned long *v;
1794 	int i;
1795 
1796 	if (*pos >= NR_VMSTAT_ITEMS)
1797 		return NULL;
1798 
1799 	BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1800 	fold_vm_numa_events();
1801 	v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1802 	m->private = v;
1803 	if (!v)
1804 		return ERR_PTR(-ENOMEM);
1805 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1806 		v[i] = global_zone_page_state(i);
1807 	v += NR_VM_ZONE_STAT_ITEMS;
1808 
1809 #ifdef CONFIG_NUMA
1810 	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
1811 		v[i] = global_numa_event_state(i);
1812 	v += NR_VM_NUMA_EVENT_ITEMS;
1813 #endif
1814 
1815 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1816 		v[i] = global_node_page_state_pages(i);
1817 		if (vmstat_item_print_in_thp(i))
1818 			v[i] /= HPAGE_PMD_NR;
1819 	}
1820 	v += NR_VM_NODE_STAT_ITEMS;
1821 
1822 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1823 			    v + NR_DIRTY_THRESHOLD);
1824 	v += NR_VM_WRITEBACK_STAT_ITEMS;
1825 
1826 #ifdef CONFIG_VM_EVENT_COUNTERS
1827 	all_vm_events(v);
1828 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1829 	v[PGPGOUT] /= 2;
1830 #endif
1831 	return (unsigned long *)m->private + *pos;
1832 }
1833 
1834 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1835 {
1836 	(*pos)++;
1837 	if (*pos >= NR_VMSTAT_ITEMS)
1838 		return NULL;
1839 	return (unsigned long *)m->private + *pos;
1840 }
1841 
1842 static int vmstat_show(struct seq_file *m, void *arg)
1843 {
1844 	unsigned long *l = arg;
1845 	unsigned long off = l - (unsigned long *)m->private;
1846 
1847 	seq_puts(m, vmstat_text[off]);
1848 	seq_put_decimal_ull(m, " ", *l);
1849 	seq_putc(m, '\n');
1850 
1851 	if (off == NR_VMSTAT_ITEMS - 1) {
1852 		/*
1853 		 * We've come to the end - add any deprecated counters to avoid
1854 		 * breaking userspace which might depend on them being present.
1855 		 */
1856 		seq_puts(m, "nr_unstable 0\n");
1857 	}
1858 	return 0;
1859 }
1860 
1861 static void vmstat_stop(struct seq_file *m, void *arg)
1862 {
1863 	kfree(m->private);
1864 	m->private = NULL;
1865 }
1866 
1867 static const struct seq_operations vmstat_op = {
1868 	.start	= vmstat_start,
1869 	.next	= vmstat_next,
1870 	.stop	= vmstat_stop,
1871 	.show	= vmstat_show,
1872 };
1873 #endif /* CONFIG_PROC_FS */
1874 
1875 #ifdef CONFIG_SMP
1876 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1877 int sysctl_stat_interval __read_mostly = HZ;
1878 
1879 #ifdef CONFIG_PROC_FS
1880 static void refresh_vm_stats(struct work_struct *work)
1881 {
1882 	refresh_cpu_vm_stats(true);
1883 }
1884 
1885 int vmstat_refresh(struct ctl_table *table, int write,
1886 		   void *buffer, size_t *lenp, loff_t *ppos)
1887 {
1888 	long val;
1889 	int err;
1890 	int i;
1891 
1892 	/*
1893 	 * The regular update, every sysctl_stat_interval, may come later
1894 	 * than expected: leaving a significant amount in per_cpu buckets.
1895 	 * This is particularly misleading when checking a quantity of HUGE
1896 	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1897 	 * which can equally be echo'ed to or cat'ted from (by root),
1898 	 * can be used to update the stats just before reading them.
1899 	 *
1900 	 * Oh, and since global_zone_page_state() etc. are so careful to hide
1901 	 * transiently negative values, report an error here if any of
1902 	 * the stats is negative, so we know to go looking for imbalance.
1903 	 */
1904 	err = schedule_on_each_cpu(refresh_vm_stats);
1905 	if (err)
1906 		return err;
1907 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1908 		/*
1909 		 * Skip checking stats known to go negative occasionally.
1910 		 */
1911 		switch (i) {
1912 		case NR_ZONE_WRITE_PENDING:
1913 		case NR_FREE_CMA_PAGES:
1914 			continue;
1915 		}
1916 		val = atomic_long_read(&vm_zone_stat[i]);
1917 		if (val < 0) {
1918 			pr_warn("%s: %s %ld\n",
1919 				__func__, zone_stat_name(i), val);
1920 		}
1921 	}
1922 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1923 		/*
1924 		 * Skip checking stats known to go negative occasionally.
1925 		 */
1926 		switch (i) {
1927 		case NR_WRITEBACK:
1928 			continue;
1929 		}
1930 		val = atomic_long_read(&vm_node_stat[i]);
1931 		if (val < 0) {
1932 			pr_warn("%s: %s %ld\n",
1933 				__func__, node_stat_name(i), val);
1934 		}
1935 	}
1936 	if (write)
1937 		*ppos += *lenp;
1938 	else
1939 		*lenp = 0;
1940 	return 0;
1941 }
1942 #endif /* CONFIG_PROC_FS */
1943 
1944 static void vmstat_update(struct work_struct *w)
1945 {
1946 	if (refresh_cpu_vm_stats(true)) {
1947 		/*
1948 		 * Counters were updated so we expect more updates
1949 		 * to occur in the future. Keep on running the
1950 		 * update worker thread.
1951 		 */
1952 		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1953 				this_cpu_ptr(&vmstat_work),
1954 				round_jiffies_relative(sysctl_stat_interval));
1955 	}
1956 }
1957 
1958 /*
1959  * Check if the diffs for a certain cpu indicate that
1960  * an update is needed.
1961  */
1962 static bool need_update(int cpu)
1963 {
1964 	pg_data_t *last_pgdat = NULL;
1965 	struct zone *zone;
1966 
1967 	for_each_populated_zone(zone) {
1968 		struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
1969 		struct per_cpu_nodestat *n;
1970 
1971 		/*
1972 		 * The fast way of checking if there are any vmstat diffs.
1973 		 */
1974 		if (memchr_inv(pzstats->vm_stat_diff, 0, sizeof(pzstats->vm_stat_diff)))
1975 			return true;
1976 
1977 		if (last_pgdat == zone->zone_pgdat)
1978 			continue;
1979 		last_pgdat = zone->zone_pgdat;
1980 		n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
1981 		if (memchr_inv(n->vm_node_stat_diff, 0, sizeof(n->vm_node_stat_diff)))
1982 			return true;
1983 	}
1984 	return false;
1985 }
1986 
1987 /*
1988  * Switch off vmstat processing and then fold all the remaining differentials
1989  * until the diffs stay at zero. The function is used by NOHZ and can only be
1990  * invoked when tick processing is not active.
1991  */
1992 void quiet_vmstat(void)
1993 {
1994 	if (system_state != SYSTEM_RUNNING)
1995 		return;
1996 
1997 	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1998 		return;
1999 
2000 	if (!need_update(smp_processor_id()))
2001 		return;
2002 
2003 	/*
2004 	 * Just refresh counters and do not care about the pending delayed
2005 	 * vmstat_update. It doesn't fire that often to matter and canceling
2006 	 * it would be too expensive from this path.
2007 	 * vmstat_shepherd will take care about that for us.
2008 	 */
2009 	refresh_cpu_vm_stats(false);
2010 }
2011 
2012 /*
2013  * Shepherd worker thread that checks the
2014  * differentials of processors that have their worker
2015  * threads for vm statistics updates disabled because of
2016  * inactivity.
2017  */
2018 static void vmstat_shepherd(struct work_struct *w);
2019 
2020 static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
2021 
2022 static void vmstat_shepherd(struct work_struct *w)
2023 {
2024 	int cpu;
2025 
2026 	cpus_read_lock();
2027 	/* Check processors whose vmstat worker threads have been disabled */
2028 	for_each_online_cpu(cpu) {
2029 		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
2030 
2031 		if (!delayed_work_pending(dw) && need_update(cpu))
2032 			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
2033 
2034 		cond_resched();
2035 	}
2036 	cpus_read_unlock();
2037 
2038 	schedule_delayed_work(&shepherd,
2039 		round_jiffies_relative(sysctl_stat_interval));
2040 }
2041 
2042 static void __init start_shepherd_timer(void)
2043 {
2044 	int cpu;
2045 
2046 	for_each_possible_cpu(cpu)
2047 		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
2048 			vmstat_update);
2049 
2050 	schedule_delayed_work(&shepherd,
2051 		round_jiffies_relative(sysctl_stat_interval));
2052 }
2053 
2054 static void __init init_cpu_node_state(void)
2055 {
2056 	int node;
2057 
2058 	for_each_online_node(node) {
2059 		if (!cpumask_empty(cpumask_of_node(node)))
2060 			node_set_state(node, N_CPU);
2061 	}
2062 }
2063 
2064 static int vmstat_cpu_online(unsigned int cpu)
2065 {
2066 	refresh_zone_stat_thresholds();
2067 
2068 	if (!node_state(cpu_to_node(cpu), N_CPU)) {
2069 		node_set_state(cpu_to_node(cpu), N_CPU);
2070 	}
2071 
2072 	return 0;
2073 }
2074 
2075 static int vmstat_cpu_down_prep(unsigned int cpu)
2076 {
2077 	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2078 	return 0;
2079 }
2080 
2081 static int vmstat_cpu_dead(unsigned int cpu)
2082 {
2083 	const struct cpumask *node_cpus;
2084 	int node;
2085 
2086 	node = cpu_to_node(cpu);
2087 
2088 	refresh_zone_stat_thresholds();
2089 	node_cpus = cpumask_of_node(node);
2090 	if (!cpumask_empty(node_cpus))
2091 		return 0;
2092 
2093 	node_clear_state(node, N_CPU);
2094 
2095 	return 0;
2096 }
2097 
2098 #endif
2099 
2100 struct workqueue_struct *mm_percpu_wq;
2101 
2102 void __init init_mm_internals(void)
2103 {
2104 	int ret __maybe_unused;
2105 
2106 	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2107 
2108 #ifdef CONFIG_SMP
2109 	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2110 					NULL, vmstat_cpu_dead);
2111 	if (ret < 0)
2112 		pr_err("vmstat: failed to register 'dead' hotplug state\n");
2113 
2114 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2115 					vmstat_cpu_online,
2116 					vmstat_cpu_down_prep);
2117 	if (ret < 0)
2118 		pr_err("vmstat: failed to register 'online' hotplug state\n");
2119 
2120 	cpus_read_lock();
2121 	init_cpu_node_state();
2122 	cpus_read_unlock();
2123 
2124 	start_shepherd_timer();
2125 #endif
2126 #ifdef CONFIG_PROC_FS
2127 	proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2128 	proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2129 	proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2130 	proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2131 #endif
2132 }
2133 
2134 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2135 
2136 /*
2137  * Return an index indicating how much of the available free memory is
2138  * unusable for an allocation of the requested size.
2139  */
2140 static int unusable_free_index(unsigned int order,
2141 				struct contig_page_info *info)
2142 {
2143 	/* No free memory is interpreted as all free memory is unusable */
2144 	if (info->free_pages == 0)
2145 		return 1000;
2146 
2147 	/*
2148 	 * Index should be a value between 0 and 1. Return a value to 3
2149 	 * decimal places.
2150 	 *
2151 	 * 0 => no fragmentation
2152 	 * 1 => high fragmentation
2153 	 */
2154 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2155 
2156 }
2157 
2158 static void unusable_show_print(struct seq_file *m,
2159 					pg_data_t *pgdat, struct zone *zone)
2160 {
2161 	unsigned int order;
2162 	int index;
2163 	struct contig_page_info info;
2164 
2165 	seq_printf(m, "Node %d, zone %8s ",
2166 				pgdat->node_id,
2167 				zone->name);
2168 	for (order = 0; order < MAX_ORDER; ++order) {
2169 		fill_contig_page_info(zone, order, &info);
2170 		index = unusable_free_index(order, &info);
2171 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2172 	}
2173 
2174 	seq_putc(m, '\n');
2175 }
2176 
2177 /*
2178  * Display unusable free space index
2179  *
2180  * The unusable free space index measures how much of the available free
2181  * memory cannot be used to satisfy an allocation of a given size and is a
2182  * value between 0 and 1. The higher the value, the more of free memory is
2183  * unusable and by implication, the worse the external fragmentation is. This
2184  * can be expressed as a percentage by multiplying by 100.
2185  */
2186 static int unusable_show(struct seq_file *m, void *arg)
2187 {
2188 	pg_data_t *pgdat = (pg_data_t *)arg;
2189 
2190 	/* check memoryless node */
2191 	if (!node_state(pgdat->node_id, N_MEMORY))
2192 		return 0;
2193 
2194 	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2195 
2196 	return 0;
2197 }
2198 
2199 static const struct seq_operations unusable_sops = {
2200 	.start	= frag_start,
2201 	.next	= frag_next,
2202 	.stop	= frag_stop,
2203 	.show	= unusable_show,
2204 };
2205 
2206 DEFINE_SEQ_ATTRIBUTE(unusable);
2207 
2208 static void extfrag_show_print(struct seq_file *m,
2209 					pg_data_t *pgdat, struct zone *zone)
2210 {
2211 	unsigned int order;
2212 	int index;
2213 
2214 	/* Alloc on stack as interrupts are disabled for zone walk */
2215 	struct contig_page_info info;
2216 
2217 	seq_printf(m, "Node %d, zone %8s ",
2218 				pgdat->node_id,
2219 				zone->name);
2220 	for (order = 0; order < MAX_ORDER; ++order) {
2221 		fill_contig_page_info(zone, order, &info);
2222 		index = __fragmentation_index(order, &info);
2223 		seq_printf(m, "%2d.%03d ", index / 1000, index % 1000);
2224 	}
2225 
2226 	seq_putc(m, '\n');
2227 }
2228 
2229 /*
2230  * Display fragmentation index for orders that allocations would fail for
2231  */
2232 static int extfrag_show(struct seq_file *m, void *arg)
2233 {
2234 	pg_data_t *pgdat = (pg_data_t *)arg;
2235 
2236 	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2237 
2238 	return 0;
2239 }
2240 
2241 static const struct seq_operations extfrag_sops = {
2242 	.start	= frag_start,
2243 	.next	= frag_next,
2244 	.stop	= frag_stop,
2245 	.show	= extfrag_show,
2246 };
2247 
2248 DEFINE_SEQ_ATTRIBUTE(extfrag);
2249 
2250 static int __init extfrag_debug_init(void)
2251 {
2252 	struct dentry *extfrag_debug_root;
2253 
2254 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2255 
2256 	debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2257 			    &unusable_fops);
2258 
2259 	debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2260 			    &extfrag_fops);
2261 
2262 	return 0;
2263 }
2264 
2265 module_init(extfrag_debug_init);
2266 #endif
2267