xref: /openbmc/linux/mm/vmstat.c (revision b04b4f78)
1 /*
2  *  linux/mm/vmstat.c
3  *
4  *  Manages VM statistics
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  zoned VM statistics
8  *  Copyright (C) 2006 Silicon Graphics, Inc.,
9  *		Christoph Lameter <christoph@lameter.com>
10  */
11 #include <linux/fs.h>
12 #include <linux/mm.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/cpu.h>
16 #include <linux/vmstat.h>
17 #include <linux/sched.h>
18 
19 #ifdef CONFIG_VM_EVENT_COUNTERS
20 DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
21 EXPORT_PER_CPU_SYMBOL(vm_event_states);
22 
23 static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask)
24 {
25 	int cpu;
26 	int i;
27 
28 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
29 
30 	for_each_cpu(cpu, cpumask) {
31 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
32 
33 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
34 			ret[i] += this->event[i];
35 	}
36 }
37 
38 /*
39  * Accumulate the vm event counters across all CPUs.
40  * The result is unavoidably approximate - it can change
41  * during and after execution of this function.
42 */
43 void all_vm_events(unsigned long *ret)
44 {
45 	get_online_cpus();
46 	sum_vm_events(ret, cpu_online_mask);
47 	put_online_cpus();
48 }
49 EXPORT_SYMBOL_GPL(all_vm_events);
50 
51 #ifdef CONFIG_HOTPLUG
52 /*
53  * Fold the foreign cpu events into our own.
54  *
55  * This is adding to the events on one processor
56  * but keeps the global counts constant.
57  */
58 void vm_events_fold_cpu(int cpu)
59 {
60 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
61 	int i;
62 
63 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
64 		count_vm_events(i, fold_state->event[i]);
65 		fold_state->event[i] = 0;
66 	}
67 }
68 #endif /* CONFIG_HOTPLUG */
69 
70 #endif /* CONFIG_VM_EVENT_COUNTERS */
71 
72 /*
73  * Manage combined zone based / global counters
74  *
75  * vm_stat contains the global counters
76  */
77 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
78 EXPORT_SYMBOL(vm_stat);
79 
80 #ifdef CONFIG_SMP
81 
82 static int calculate_threshold(struct zone *zone)
83 {
84 	int threshold;
85 	int mem;	/* memory in 128 MB units */
86 
87 	/*
88 	 * The threshold scales with the number of processors and the amount
89 	 * of memory per zone. More memory means that we can defer updates for
90 	 * longer, more processors could lead to more contention.
91  	 * fls() is used to have a cheap way of logarithmic scaling.
92 	 *
93 	 * Some sample thresholds:
94 	 *
95 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
96 	 * ------------------------------------------------------------------
97 	 * 8		1		1	0.9-1 GB	4
98 	 * 16		2		2	0.9-1 GB	4
99 	 * 20 		2		2	1-2 GB		5
100 	 * 24		2		2	2-4 GB		6
101 	 * 28		2		2	4-8 GB		7
102 	 * 32		2		2	8-16 GB		8
103 	 * 4		2		2	<128M		1
104 	 * 30		4		3	2-4 GB		5
105 	 * 48		4		3	8-16 GB		8
106 	 * 32		8		4	1-2 GB		4
107 	 * 32		8		4	0.9-1GB		4
108 	 * 10		16		5	<128M		1
109 	 * 40		16		5	900M		4
110 	 * 70		64		7	2-4 GB		5
111 	 * 84		64		7	4-8 GB		6
112 	 * 108		512		9	4-8 GB		6
113 	 * 125		1024		10	8-16 GB		8
114 	 * 125		1024		10	16-32 GB	9
115 	 */
116 
117 	mem = zone->present_pages >> (27 - PAGE_SHIFT);
118 
119 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
120 
121 	/*
122 	 * Maximum threshold is 125
123 	 */
124 	threshold = min(125, threshold);
125 
126 	return threshold;
127 }
128 
129 /*
130  * Refresh the thresholds for each zone.
131  */
132 static void refresh_zone_stat_thresholds(void)
133 {
134 	struct zone *zone;
135 	int cpu;
136 	int threshold;
137 
138 	for_each_populated_zone(zone) {
139 		threshold = calculate_threshold(zone);
140 
141 		for_each_online_cpu(cpu)
142 			zone_pcp(zone, cpu)->stat_threshold = threshold;
143 	}
144 }
145 
146 /*
147  * For use when we know that interrupts are disabled.
148  */
149 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
150 				int delta)
151 {
152 	struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
153 	s8 *p = pcp->vm_stat_diff + item;
154 	long x;
155 
156 	x = delta + *p;
157 
158 	if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
159 		zone_page_state_add(x, zone, item);
160 		x = 0;
161 	}
162 	*p = x;
163 }
164 EXPORT_SYMBOL(__mod_zone_page_state);
165 
166 /*
167  * For an unknown interrupt state
168  */
169 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
170 					int delta)
171 {
172 	unsigned long flags;
173 
174 	local_irq_save(flags);
175 	__mod_zone_page_state(zone, item, delta);
176 	local_irq_restore(flags);
177 }
178 EXPORT_SYMBOL(mod_zone_page_state);
179 
180 /*
181  * Optimized increment and decrement functions.
182  *
183  * These are only for a single page and therefore can take a struct page *
184  * argument instead of struct zone *. This allows the inclusion of the code
185  * generated for page_zone(page) into the optimized functions.
186  *
187  * No overflow check is necessary and therefore the differential can be
188  * incremented or decremented in place which may allow the compilers to
189  * generate better code.
190  * The increment or decrement is known and therefore one boundary check can
191  * be omitted.
192  *
193  * NOTE: These functions are very performance sensitive. Change only
194  * with care.
195  *
196  * Some processors have inc/dec instructions that are atomic vs an interrupt.
197  * However, the code must first determine the differential location in a zone
198  * based on the processor number and then inc/dec the counter. There is no
199  * guarantee without disabling preemption that the processor will not change
200  * in between and therefore the atomicity vs. interrupt cannot be exploited
201  * in a useful way here.
202  */
203 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
204 {
205 	struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
206 	s8 *p = pcp->vm_stat_diff + item;
207 
208 	(*p)++;
209 
210 	if (unlikely(*p > pcp->stat_threshold)) {
211 		int overstep = pcp->stat_threshold / 2;
212 
213 		zone_page_state_add(*p + overstep, zone, item);
214 		*p = -overstep;
215 	}
216 }
217 
218 void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
219 {
220 	__inc_zone_state(page_zone(page), item);
221 }
222 EXPORT_SYMBOL(__inc_zone_page_state);
223 
224 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
225 {
226 	struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
227 	s8 *p = pcp->vm_stat_diff + item;
228 
229 	(*p)--;
230 
231 	if (unlikely(*p < - pcp->stat_threshold)) {
232 		int overstep = pcp->stat_threshold / 2;
233 
234 		zone_page_state_add(*p - overstep, zone, item);
235 		*p = overstep;
236 	}
237 }
238 
239 void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
240 {
241 	__dec_zone_state(page_zone(page), item);
242 }
243 EXPORT_SYMBOL(__dec_zone_page_state);
244 
245 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
246 {
247 	unsigned long flags;
248 
249 	local_irq_save(flags);
250 	__inc_zone_state(zone, item);
251 	local_irq_restore(flags);
252 }
253 
254 void inc_zone_page_state(struct page *page, enum zone_stat_item item)
255 {
256 	unsigned long flags;
257 	struct zone *zone;
258 
259 	zone = page_zone(page);
260 	local_irq_save(flags);
261 	__inc_zone_state(zone, item);
262 	local_irq_restore(flags);
263 }
264 EXPORT_SYMBOL(inc_zone_page_state);
265 
266 void dec_zone_page_state(struct page *page, enum zone_stat_item item)
267 {
268 	unsigned long flags;
269 
270 	local_irq_save(flags);
271 	__dec_zone_page_state(page, item);
272 	local_irq_restore(flags);
273 }
274 EXPORT_SYMBOL(dec_zone_page_state);
275 
276 /*
277  * Update the zone counters for one cpu.
278  *
279  * The cpu specified must be either the current cpu or a processor that
280  * is not online. If it is the current cpu then the execution thread must
281  * be pinned to the current cpu.
282  *
283  * Note that refresh_cpu_vm_stats strives to only access
284  * node local memory. The per cpu pagesets on remote zones are placed
285  * in the memory local to the processor using that pageset. So the
286  * loop over all zones will access a series of cachelines local to
287  * the processor.
288  *
289  * The call to zone_page_state_add updates the cachelines with the
290  * statistics in the remote zone struct as well as the global cachelines
291  * with the global counters. These could cause remote node cache line
292  * bouncing and will have to be only done when necessary.
293  */
294 void refresh_cpu_vm_stats(int cpu)
295 {
296 	struct zone *zone;
297 	int i;
298 	int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
299 
300 	for_each_populated_zone(zone) {
301 		struct per_cpu_pageset *p;
302 
303 		p = zone_pcp(zone, cpu);
304 
305 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
306 			if (p->vm_stat_diff[i]) {
307 				unsigned long flags;
308 				int v;
309 
310 				local_irq_save(flags);
311 				v = p->vm_stat_diff[i];
312 				p->vm_stat_diff[i] = 0;
313 				local_irq_restore(flags);
314 				atomic_long_add(v, &zone->vm_stat[i]);
315 				global_diff[i] += v;
316 #ifdef CONFIG_NUMA
317 				/* 3 seconds idle till flush */
318 				p->expire = 3;
319 #endif
320 			}
321 		cond_resched();
322 #ifdef CONFIG_NUMA
323 		/*
324 		 * Deal with draining the remote pageset of this
325 		 * processor
326 		 *
327 		 * Check if there are pages remaining in this pageset
328 		 * if not then there is nothing to expire.
329 		 */
330 		if (!p->expire || !p->pcp.count)
331 			continue;
332 
333 		/*
334 		 * We never drain zones local to this processor.
335 		 */
336 		if (zone_to_nid(zone) == numa_node_id()) {
337 			p->expire = 0;
338 			continue;
339 		}
340 
341 		p->expire--;
342 		if (p->expire)
343 			continue;
344 
345 		if (p->pcp.count)
346 			drain_zone_pages(zone, &p->pcp);
347 #endif
348 	}
349 
350 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
351 		if (global_diff[i])
352 			atomic_long_add(global_diff[i], &vm_stat[i]);
353 }
354 
355 #endif
356 
357 #ifdef CONFIG_NUMA
358 /*
359  * zonelist = the list of zones passed to the allocator
360  * z 	    = the zone from which the allocation occurred.
361  *
362  * Must be called with interrupts disabled.
363  */
364 void zone_statistics(struct zone *preferred_zone, struct zone *z)
365 {
366 	if (z->zone_pgdat == preferred_zone->zone_pgdat) {
367 		__inc_zone_state(z, NUMA_HIT);
368 	} else {
369 		__inc_zone_state(z, NUMA_MISS);
370 		__inc_zone_state(preferred_zone, NUMA_FOREIGN);
371 	}
372 	if (z->node == numa_node_id())
373 		__inc_zone_state(z, NUMA_LOCAL);
374 	else
375 		__inc_zone_state(z, NUMA_OTHER);
376 }
377 #endif
378 
379 #ifdef CONFIG_PROC_FS
380 #include <linux/proc_fs.h>
381 #include <linux/seq_file.h>
382 
383 static char * const migratetype_names[MIGRATE_TYPES] = {
384 	"Unmovable",
385 	"Reclaimable",
386 	"Movable",
387 	"Reserve",
388 	"Isolate",
389 };
390 
391 static void *frag_start(struct seq_file *m, loff_t *pos)
392 {
393 	pg_data_t *pgdat;
394 	loff_t node = *pos;
395 	for (pgdat = first_online_pgdat();
396 	     pgdat && node;
397 	     pgdat = next_online_pgdat(pgdat))
398 		--node;
399 
400 	return pgdat;
401 }
402 
403 static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
404 {
405 	pg_data_t *pgdat = (pg_data_t *)arg;
406 
407 	(*pos)++;
408 	return next_online_pgdat(pgdat);
409 }
410 
411 static void frag_stop(struct seq_file *m, void *arg)
412 {
413 }
414 
415 /* Walk all the zones in a node and print using a callback */
416 static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
417 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
418 {
419 	struct zone *zone;
420 	struct zone *node_zones = pgdat->node_zones;
421 	unsigned long flags;
422 
423 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
424 		if (!populated_zone(zone))
425 			continue;
426 
427 		spin_lock_irqsave(&zone->lock, flags);
428 		print(m, pgdat, zone);
429 		spin_unlock_irqrestore(&zone->lock, flags);
430 	}
431 }
432 
433 static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
434 						struct zone *zone)
435 {
436 	int order;
437 
438 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
439 	for (order = 0; order < MAX_ORDER; ++order)
440 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
441 	seq_putc(m, '\n');
442 }
443 
444 /*
445  * This walks the free areas for each zone.
446  */
447 static int frag_show(struct seq_file *m, void *arg)
448 {
449 	pg_data_t *pgdat = (pg_data_t *)arg;
450 	walk_zones_in_node(m, pgdat, frag_show_print);
451 	return 0;
452 }
453 
454 static void pagetypeinfo_showfree_print(struct seq_file *m,
455 					pg_data_t *pgdat, struct zone *zone)
456 {
457 	int order, mtype;
458 
459 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
460 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
461 					pgdat->node_id,
462 					zone->name,
463 					migratetype_names[mtype]);
464 		for (order = 0; order < MAX_ORDER; ++order) {
465 			unsigned long freecount = 0;
466 			struct free_area *area;
467 			struct list_head *curr;
468 
469 			area = &(zone->free_area[order]);
470 
471 			list_for_each(curr, &area->free_list[mtype])
472 				freecount++;
473 			seq_printf(m, "%6lu ", freecount);
474 		}
475 		seq_putc(m, '\n');
476 	}
477 }
478 
479 /* Print out the free pages at each order for each migatetype */
480 static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
481 {
482 	int order;
483 	pg_data_t *pgdat = (pg_data_t *)arg;
484 
485 	/* Print header */
486 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
487 	for (order = 0; order < MAX_ORDER; ++order)
488 		seq_printf(m, "%6d ", order);
489 	seq_putc(m, '\n');
490 
491 	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);
492 
493 	return 0;
494 }
495 
496 static void pagetypeinfo_showblockcount_print(struct seq_file *m,
497 					pg_data_t *pgdat, struct zone *zone)
498 {
499 	int mtype;
500 	unsigned long pfn;
501 	unsigned long start_pfn = zone->zone_start_pfn;
502 	unsigned long end_pfn = start_pfn + zone->spanned_pages;
503 	unsigned long count[MIGRATE_TYPES] = { 0, };
504 
505 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
506 		struct page *page;
507 
508 		if (!pfn_valid(pfn))
509 			continue;
510 
511 		page = pfn_to_page(pfn);
512 #ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES
513 		/*
514 		 * Ordinarily, memory holes in flatmem still have a valid
515 		 * memmap for the PFN range. However, an architecture for
516 		 * embedded systems (e.g. ARM) can free up the memmap backing
517 		 * holes to save memory on the assumption the memmap is
518 		 * never used. The page_zone linkages are then broken even
519 		 * though pfn_valid() returns true. Skip the page if the
520 		 * linkages are broken. Even if this test passed, the impact
521 		 * is that the counters for the movable type are off but
522 		 * fragmentation monitoring is likely meaningless on small
523 		 * systems.
524 		 */
525 		if (page_zone(page) != zone)
526 			continue;
527 #endif
528 		mtype = get_pageblock_migratetype(page);
529 
530 		if (mtype < MIGRATE_TYPES)
531 			count[mtype]++;
532 	}
533 
534 	/* Print counts */
535 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
536 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
537 		seq_printf(m, "%12lu ", count[mtype]);
538 	seq_putc(m, '\n');
539 }
540 
541 /* Print out the free pages at each order for each migratetype */
542 static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
543 {
544 	int mtype;
545 	pg_data_t *pgdat = (pg_data_t *)arg;
546 
547 	seq_printf(m, "\n%-23s", "Number of blocks type ");
548 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
549 		seq_printf(m, "%12s ", migratetype_names[mtype]);
550 	seq_putc(m, '\n');
551 	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);
552 
553 	return 0;
554 }
555 
556 /*
557  * This prints out statistics in relation to grouping pages by mobility.
558  * It is expensive to collect so do not constantly read the file.
559  */
560 static int pagetypeinfo_show(struct seq_file *m, void *arg)
561 {
562 	pg_data_t *pgdat = (pg_data_t *)arg;
563 
564 	/* check memoryless node */
565 	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
566 		return 0;
567 
568 	seq_printf(m, "Page block order: %d\n", pageblock_order);
569 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
570 	seq_putc(m, '\n');
571 	pagetypeinfo_showfree(m, pgdat);
572 	pagetypeinfo_showblockcount(m, pgdat);
573 
574 	return 0;
575 }
576 
577 static const struct seq_operations fragmentation_op = {
578 	.start	= frag_start,
579 	.next	= frag_next,
580 	.stop	= frag_stop,
581 	.show	= frag_show,
582 };
583 
584 static int fragmentation_open(struct inode *inode, struct file *file)
585 {
586 	return seq_open(file, &fragmentation_op);
587 }
588 
589 static const struct file_operations fragmentation_file_operations = {
590 	.open		= fragmentation_open,
591 	.read		= seq_read,
592 	.llseek		= seq_lseek,
593 	.release	= seq_release,
594 };
595 
596 static const struct seq_operations pagetypeinfo_op = {
597 	.start	= frag_start,
598 	.next	= frag_next,
599 	.stop	= frag_stop,
600 	.show	= pagetypeinfo_show,
601 };
602 
603 static int pagetypeinfo_open(struct inode *inode, struct file *file)
604 {
605 	return seq_open(file, &pagetypeinfo_op);
606 }
607 
608 static const struct file_operations pagetypeinfo_file_ops = {
609 	.open		= pagetypeinfo_open,
610 	.read		= seq_read,
611 	.llseek		= seq_lseek,
612 	.release	= seq_release,
613 };
614 
615 #ifdef CONFIG_ZONE_DMA
616 #define TEXT_FOR_DMA(xx) xx "_dma",
617 #else
618 #define TEXT_FOR_DMA(xx)
619 #endif
620 
621 #ifdef CONFIG_ZONE_DMA32
622 #define TEXT_FOR_DMA32(xx) xx "_dma32",
623 #else
624 #define TEXT_FOR_DMA32(xx)
625 #endif
626 
627 #ifdef CONFIG_HIGHMEM
628 #define TEXT_FOR_HIGHMEM(xx) xx "_high",
629 #else
630 #define TEXT_FOR_HIGHMEM(xx)
631 #endif
632 
633 #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
634 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
635 
636 static const char * const vmstat_text[] = {
637 	/* Zoned VM counters */
638 	"nr_free_pages",
639 	"nr_inactive_anon",
640 	"nr_active_anon",
641 	"nr_inactive_file",
642 	"nr_active_file",
643 #ifdef CONFIG_UNEVICTABLE_LRU
644 	"nr_unevictable",
645 	"nr_mlock",
646 #endif
647 	"nr_anon_pages",
648 	"nr_mapped",
649 	"nr_file_pages",
650 	"nr_dirty",
651 	"nr_writeback",
652 	"nr_slab_reclaimable",
653 	"nr_slab_unreclaimable",
654 	"nr_page_table_pages",
655 	"nr_unstable",
656 	"nr_bounce",
657 	"nr_vmscan_write",
658 	"nr_writeback_temp",
659 
660 #ifdef CONFIG_NUMA
661 	"numa_hit",
662 	"numa_miss",
663 	"numa_foreign",
664 	"numa_interleave",
665 	"numa_local",
666 	"numa_other",
667 #endif
668 
669 #ifdef CONFIG_VM_EVENT_COUNTERS
670 	"pgpgin",
671 	"pgpgout",
672 	"pswpin",
673 	"pswpout",
674 
675 	TEXTS_FOR_ZONES("pgalloc")
676 
677 	"pgfree",
678 	"pgactivate",
679 	"pgdeactivate",
680 
681 	"pgfault",
682 	"pgmajfault",
683 
684 	TEXTS_FOR_ZONES("pgrefill")
685 	TEXTS_FOR_ZONES("pgsteal")
686 	TEXTS_FOR_ZONES("pgscan_kswapd")
687 	TEXTS_FOR_ZONES("pgscan_direct")
688 
689 	"pginodesteal",
690 	"slabs_scanned",
691 	"kswapd_steal",
692 	"kswapd_inodesteal",
693 	"pageoutrun",
694 	"allocstall",
695 
696 	"pgrotated",
697 #ifdef CONFIG_HUGETLB_PAGE
698 	"htlb_buddy_alloc_success",
699 	"htlb_buddy_alloc_fail",
700 #endif
701 #ifdef CONFIG_UNEVICTABLE_LRU
702 	"unevictable_pgs_culled",
703 	"unevictable_pgs_scanned",
704 	"unevictable_pgs_rescued",
705 	"unevictable_pgs_mlocked",
706 	"unevictable_pgs_munlocked",
707 	"unevictable_pgs_cleared",
708 	"unevictable_pgs_stranded",
709 	"unevictable_pgs_mlockfreed",
710 #endif
711 #endif
712 };
713 
714 static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
715 							struct zone *zone)
716 {
717 	int i;
718 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
719 	seq_printf(m,
720 		   "\n  pages free     %lu"
721 		   "\n        min      %lu"
722 		   "\n        low      %lu"
723 		   "\n        high     %lu"
724 		   "\n        scanned  %lu (aa: %lu ia: %lu af: %lu if: %lu)"
725 		   "\n        spanned  %lu"
726 		   "\n        present  %lu",
727 		   zone_page_state(zone, NR_FREE_PAGES),
728 		   zone->pages_min,
729 		   zone->pages_low,
730 		   zone->pages_high,
731 		   zone->pages_scanned,
732 		   zone->lru[LRU_ACTIVE_ANON].nr_scan,
733 		   zone->lru[LRU_INACTIVE_ANON].nr_scan,
734 		   zone->lru[LRU_ACTIVE_FILE].nr_scan,
735 		   zone->lru[LRU_INACTIVE_FILE].nr_scan,
736 		   zone->spanned_pages,
737 		   zone->present_pages);
738 
739 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
740 		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],
741 				zone_page_state(zone, i));
742 
743 	seq_printf(m,
744 		   "\n        protection: (%lu",
745 		   zone->lowmem_reserve[0]);
746 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
747 		seq_printf(m, ", %lu", zone->lowmem_reserve[i]);
748 	seq_printf(m,
749 		   ")"
750 		   "\n  pagesets");
751 	for_each_online_cpu(i) {
752 		struct per_cpu_pageset *pageset;
753 
754 		pageset = zone_pcp(zone, i);
755 		seq_printf(m,
756 			   "\n    cpu: %i"
757 			   "\n              count: %i"
758 			   "\n              high:  %i"
759 			   "\n              batch: %i",
760 			   i,
761 			   pageset->pcp.count,
762 			   pageset->pcp.high,
763 			   pageset->pcp.batch);
764 #ifdef CONFIG_SMP
765 		seq_printf(m, "\n  vm stats threshold: %d",
766 				pageset->stat_threshold);
767 #endif
768 	}
769 	seq_printf(m,
770 		   "\n  all_unreclaimable: %u"
771 		   "\n  prev_priority:     %i"
772 		   "\n  start_pfn:         %lu"
773 		   "\n  inactive_ratio:    %u",
774 			   zone_is_all_unreclaimable(zone),
775 		   zone->prev_priority,
776 		   zone->zone_start_pfn,
777 		   zone->inactive_ratio);
778 	seq_putc(m, '\n');
779 }
780 
781 /*
782  * Output information about zones in @pgdat.
783  */
784 static int zoneinfo_show(struct seq_file *m, void *arg)
785 {
786 	pg_data_t *pgdat = (pg_data_t *)arg;
787 	walk_zones_in_node(m, pgdat, zoneinfo_show_print);
788 	return 0;
789 }
790 
791 static const struct seq_operations zoneinfo_op = {
792 	.start	= frag_start, /* iterate over all zones. The same as in
793 			       * fragmentation. */
794 	.next	= frag_next,
795 	.stop	= frag_stop,
796 	.show	= zoneinfo_show,
797 };
798 
799 static int zoneinfo_open(struct inode *inode, struct file *file)
800 {
801 	return seq_open(file, &zoneinfo_op);
802 }
803 
804 static const struct file_operations proc_zoneinfo_file_operations = {
805 	.open		= zoneinfo_open,
806 	.read		= seq_read,
807 	.llseek		= seq_lseek,
808 	.release	= seq_release,
809 };
810 
811 static void *vmstat_start(struct seq_file *m, loff_t *pos)
812 {
813 	unsigned long *v;
814 #ifdef CONFIG_VM_EVENT_COUNTERS
815 	unsigned long *e;
816 #endif
817 	int i;
818 
819 	if (*pos >= ARRAY_SIZE(vmstat_text))
820 		return NULL;
821 
822 #ifdef CONFIG_VM_EVENT_COUNTERS
823 	v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)
824 			+ sizeof(struct vm_event_state), GFP_KERNEL);
825 #else
826 	v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),
827 			GFP_KERNEL);
828 #endif
829 	m->private = v;
830 	if (!v)
831 		return ERR_PTR(-ENOMEM);
832 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
833 		v[i] = global_page_state(i);
834 #ifdef CONFIG_VM_EVENT_COUNTERS
835 	e = v + NR_VM_ZONE_STAT_ITEMS;
836 	all_vm_events(e);
837 	e[PGPGIN] /= 2;		/* sectors -> kbytes */
838 	e[PGPGOUT] /= 2;
839 #endif
840 	return v + *pos;
841 }
842 
843 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
844 {
845 	(*pos)++;
846 	if (*pos >= ARRAY_SIZE(vmstat_text))
847 		return NULL;
848 	return (unsigned long *)m->private + *pos;
849 }
850 
851 static int vmstat_show(struct seq_file *m, void *arg)
852 {
853 	unsigned long *l = arg;
854 	unsigned long off = l - (unsigned long *)m->private;
855 
856 	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
857 	return 0;
858 }
859 
860 static void vmstat_stop(struct seq_file *m, void *arg)
861 {
862 	kfree(m->private);
863 	m->private = NULL;
864 }
865 
866 static const struct seq_operations vmstat_op = {
867 	.start	= vmstat_start,
868 	.next	= vmstat_next,
869 	.stop	= vmstat_stop,
870 	.show	= vmstat_show,
871 };
872 
873 static int vmstat_open(struct inode *inode, struct file *file)
874 {
875 	return seq_open(file, &vmstat_op);
876 }
877 
878 static const struct file_operations proc_vmstat_file_operations = {
879 	.open		= vmstat_open,
880 	.read		= seq_read,
881 	.llseek		= seq_lseek,
882 	.release	= seq_release,
883 };
884 #endif /* CONFIG_PROC_FS */
885 
886 #ifdef CONFIG_SMP
887 static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
888 int sysctl_stat_interval __read_mostly = HZ;
889 
890 static void vmstat_update(struct work_struct *w)
891 {
892 	refresh_cpu_vm_stats(smp_processor_id());
893 	schedule_delayed_work(&__get_cpu_var(vmstat_work),
894 		round_jiffies_relative(sysctl_stat_interval));
895 }
896 
897 static void __cpuinit start_cpu_timer(int cpu)
898 {
899 	struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
900 
901 	INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);
902 	schedule_delayed_work_on(cpu, vmstat_work,
903 				 __round_jiffies_relative(HZ, cpu));
904 }
905 
906 /*
907  * Use the cpu notifier to insure that the thresholds are recalculated
908  * when necessary.
909  */
910 static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
911 		unsigned long action,
912 		void *hcpu)
913 {
914 	long cpu = (long)hcpu;
915 
916 	switch (action) {
917 	case CPU_ONLINE:
918 	case CPU_ONLINE_FROZEN:
919 		start_cpu_timer(cpu);
920 		break;
921 	case CPU_DOWN_PREPARE:
922 	case CPU_DOWN_PREPARE_FROZEN:
923 		cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
924 		per_cpu(vmstat_work, cpu).work.func = NULL;
925 		break;
926 	case CPU_DOWN_FAILED:
927 	case CPU_DOWN_FAILED_FROZEN:
928 		start_cpu_timer(cpu);
929 		break;
930 	case CPU_DEAD:
931 	case CPU_DEAD_FROZEN:
932 		refresh_zone_stat_thresholds();
933 		break;
934 	default:
935 		break;
936 	}
937 	return NOTIFY_OK;
938 }
939 
940 static struct notifier_block __cpuinitdata vmstat_notifier =
941 	{ &vmstat_cpuup_callback, NULL, 0 };
942 #endif
943 
944 static int __init setup_vmstat(void)
945 {
946 #ifdef CONFIG_SMP
947 	int cpu;
948 
949 	refresh_zone_stat_thresholds();
950 	register_cpu_notifier(&vmstat_notifier);
951 
952 	for_each_online_cpu(cpu)
953 		start_cpu_timer(cpu);
954 #endif
955 #ifdef CONFIG_PROC_FS
956 	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
957 	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
958 	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
959 	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
960 #endif
961 	return 0;
962 }
963 module_init(setup_vmstat)
964