xref: /openbmc/linux/fs/proc/task_mmu.c (revision 94c7b6fc)
1 #include <linux/mm.h>
2 #include <linux/vmacache.h>
3 #include <linux/hugetlb.h>
4 #include <linux/huge_mm.h>
5 #include <linux/mount.h>
6 #include <linux/seq_file.h>
7 #include <linux/highmem.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/pagemap.h>
11 #include <linux/mempolicy.h>
12 #include <linux/rmap.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mmu_notifier.h>
16 
17 #include <asm/elf.h>
18 #include <asm/uaccess.h>
19 #include <asm/tlbflush.h>
20 #include "internal.h"
21 
22 void task_mem(struct seq_file *m, struct mm_struct *mm)
23 {
24 	unsigned long data, text, lib, swap;
25 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
26 
27 	/*
28 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
29 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
30 	 * collector of these hiwater stats must therefore get total_vm
31 	 * and rss too, which will usually be the higher.  Barriers? not
32 	 * worth the effort, such snapshots can always be inconsistent.
33 	 */
34 	hiwater_vm = total_vm = mm->total_vm;
35 	if (hiwater_vm < mm->hiwater_vm)
36 		hiwater_vm = mm->hiwater_vm;
37 	hiwater_rss = total_rss = get_mm_rss(mm);
38 	if (hiwater_rss < mm->hiwater_rss)
39 		hiwater_rss = mm->hiwater_rss;
40 
41 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
42 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
43 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
44 	swap = get_mm_counter(mm, MM_SWAPENTS);
45 	seq_printf(m,
46 		"VmPeak:\t%8lu kB\n"
47 		"VmSize:\t%8lu kB\n"
48 		"VmLck:\t%8lu kB\n"
49 		"VmPin:\t%8lu kB\n"
50 		"VmHWM:\t%8lu kB\n"
51 		"VmRSS:\t%8lu kB\n"
52 		"VmData:\t%8lu kB\n"
53 		"VmStk:\t%8lu kB\n"
54 		"VmExe:\t%8lu kB\n"
55 		"VmLib:\t%8lu kB\n"
56 		"VmPTE:\t%8lu kB\n"
57 		"VmSwap:\t%8lu kB\n",
58 		hiwater_vm << (PAGE_SHIFT-10),
59 		total_vm << (PAGE_SHIFT-10),
60 		mm->locked_vm << (PAGE_SHIFT-10),
61 		mm->pinned_vm << (PAGE_SHIFT-10),
62 		hiwater_rss << (PAGE_SHIFT-10),
63 		total_rss << (PAGE_SHIFT-10),
64 		data << (PAGE_SHIFT-10),
65 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
66 		(PTRS_PER_PTE * sizeof(pte_t) *
67 		 atomic_long_read(&mm->nr_ptes)) >> 10,
68 		swap << (PAGE_SHIFT-10));
69 }
70 
71 unsigned long task_vsize(struct mm_struct *mm)
72 {
73 	return PAGE_SIZE * mm->total_vm;
74 }
75 
76 unsigned long task_statm(struct mm_struct *mm,
77 			 unsigned long *shared, unsigned long *text,
78 			 unsigned long *data, unsigned long *resident)
79 {
80 	*shared = get_mm_counter(mm, MM_FILEPAGES);
81 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
82 								>> PAGE_SHIFT;
83 	*data = mm->total_vm - mm->shared_vm;
84 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
85 	return mm->total_vm;
86 }
87 
88 #ifdef CONFIG_NUMA
89 /*
90  * These functions are for numa_maps but called in generic **maps seq_file
91  * ->start(), ->stop() ops.
92  *
93  * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
94  * Each mempolicy object is controlled by reference counting. The problem here
95  * is how to avoid accessing dead mempolicy object.
96  *
97  * Because we're holding mmap_sem while reading seq_file, it's safe to access
98  * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
99  *
100  * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
101  * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
102  * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
103  * gurantee the task never exits under us. But taking task_lock() around
104  * get_vma_plicy() causes lock order problem.
105  *
106  * To access task->mempolicy without lock, we hold a reference count of an
107  * object pointed by task->mempolicy and remember it. This will guarantee
108  * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
109  */
110 static void hold_task_mempolicy(struct proc_maps_private *priv)
111 {
112 	struct task_struct *task = priv->task;
113 
114 	task_lock(task);
115 	priv->task_mempolicy = task->mempolicy;
116 	mpol_get(priv->task_mempolicy);
117 	task_unlock(task);
118 }
119 static void release_task_mempolicy(struct proc_maps_private *priv)
120 {
121 	mpol_put(priv->task_mempolicy);
122 }
123 #else
124 static void hold_task_mempolicy(struct proc_maps_private *priv)
125 {
126 }
127 static void release_task_mempolicy(struct proc_maps_private *priv)
128 {
129 }
130 #endif
131 
132 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
133 {
134 	if (vma && vma != priv->tail_vma) {
135 		struct mm_struct *mm = vma->vm_mm;
136 		release_task_mempolicy(priv);
137 		up_read(&mm->mmap_sem);
138 		mmput(mm);
139 	}
140 }
141 
142 static void *m_start(struct seq_file *m, loff_t *pos)
143 {
144 	struct proc_maps_private *priv = m->private;
145 	unsigned long last_addr = m->version;
146 	struct mm_struct *mm;
147 	struct vm_area_struct *vma, *tail_vma = NULL;
148 	loff_t l = *pos;
149 
150 	/* Clear the per syscall fields in priv */
151 	priv->task = NULL;
152 	priv->tail_vma = NULL;
153 
154 	/*
155 	 * We remember last_addr rather than next_addr to hit with
156 	 * vmacache most of the time. We have zero last_addr at
157 	 * the beginning and also after lseek. We will have -1 last_addr
158 	 * after the end of the vmas.
159 	 */
160 
161 	if (last_addr == -1UL)
162 		return NULL;
163 
164 	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
165 	if (!priv->task)
166 		return ERR_PTR(-ESRCH);
167 
168 	mm = mm_access(priv->task, PTRACE_MODE_READ);
169 	if (!mm || IS_ERR(mm))
170 		return mm;
171 	down_read(&mm->mmap_sem);
172 
173 	tail_vma = get_gate_vma(priv->task->mm);
174 	priv->tail_vma = tail_vma;
175 	hold_task_mempolicy(priv);
176 	/* Start with last addr hint */
177 	vma = find_vma(mm, last_addr);
178 	if (last_addr && vma) {
179 		vma = vma->vm_next;
180 		goto out;
181 	}
182 
183 	/*
184 	 * Check the vma index is within the range and do
185 	 * sequential scan until m_index.
186 	 */
187 	vma = NULL;
188 	if ((unsigned long)l < mm->map_count) {
189 		vma = mm->mmap;
190 		while (l-- && vma)
191 			vma = vma->vm_next;
192 		goto out;
193 	}
194 
195 	if (l != mm->map_count)
196 		tail_vma = NULL; /* After gate vma */
197 
198 out:
199 	if (vma)
200 		return vma;
201 
202 	release_task_mempolicy(priv);
203 	/* End of vmas has been reached */
204 	m->version = (tail_vma != NULL)? 0: -1UL;
205 	up_read(&mm->mmap_sem);
206 	mmput(mm);
207 	return tail_vma;
208 }
209 
210 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
211 {
212 	struct proc_maps_private *priv = m->private;
213 	struct vm_area_struct *vma = v;
214 	struct vm_area_struct *tail_vma = priv->tail_vma;
215 
216 	(*pos)++;
217 	if (vma && (vma != tail_vma) && vma->vm_next)
218 		return vma->vm_next;
219 	vma_stop(priv, vma);
220 	return (vma != tail_vma)? tail_vma: NULL;
221 }
222 
223 static void m_stop(struct seq_file *m, void *v)
224 {
225 	struct proc_maps_private *priv = m->private;
226 	struct vm_area_struct *vma = v;
227 
228 	if (!IS_ERR(vma))
229 		vma_stop(priv, vma);
230 	if (priv->task)
231 		put_task_struct(priv->task);
232 }
233 
234 static int do_maps_open(struct inode *inode, struct file *file,
235 			const struct seq_operations *ops)
236 {
237 	struct proc_maps_private *priv;
238 	int ret = -ENOMEM;
239 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
240 	if (priv) {
241 		priv->pid = proc_pid(inode);
242 		ret = seq_open(file, ops);
243 		if (!ret) {
244 			struct seq_file *m = file->private_data;
245 			m->private = priv;
246 		} else {
247 			kfree(priv);
248 		}
249 	}
250 	return ret;
251 }
252 
253 static void
254 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
255 {
256 	struct mm_struct *mm = vma->vm_mm;
257 	struct file *file = vma->vm_file;
258 	struct proc_maps_private *priv = m->private;
259 	struct task_struct *task = priv->task;
260 	vm_flags_t flags = vma->vm_flags;
261 	unsigned long ino = 0;
262 	unsigned long long pgoff = 0;
263 	unsigned long start, end;
264 	dev_t dev = 0;
265 	const char *name = NULL;
266 
267 	if (file) {
268 		struct inode *inode = file_inode(vma->vm_file);
269 		dev = inode->i_sb->s_dev;
270 		ino = inode->i_ino;
271 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
272 	}
273 
274 	/* We don't show the stack guard page in /proc/maps */
275 	start = vma->vm_start;
276 	if (stack_guard_page_start(vma, start))
277 		start += PAGE_SIZE;
278 	end = vma->vm_end;
279 	if (stack_guard_page_end(vma, end))
280 		end -= PAGE_SIZE;
281 
282 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
283 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
284 			start,
285 			end,
286 			flags & VM_READ ? 'r' : '-',
287 			flags & VM_WRITE ? 'w' : '-',
288 			flags & VM_EXEC ? 'x' : '-',
289 			flags & VM_MAYSHARE ? 's' : 'p',
290 			pgoff,
291 			MAJOR(dev), MINOR(dev), ino);
292 
293 	/*
294 	 * Print the dentry name for named mappings, and a
295 	 * special [heap] marker for the heap:
296 	 */
297 	if (file) {
298 		seq_pad(m, ' ');
299 		seq_path(m, &file->f_path, "\n");
300 		goto done;
301 	}
302 
303 	if (vma->vm_ops && vma->vm_ops->name) {
304 		name = vma->vm_ops->name(vma);
305 		if (name)
306 			goto done;
307 	}
308 
309 	name = arch_vma_name(vma);
310 	if (!name) {
311 		pid_t tid;
312 
313 		if (!mm) {
314 			name = "[vdso]";
315 			goto done;
316 		}
317 
318 		if (vma->vm_start <= mm->brk &&
319 		    vma->vm_end >= mm->start_brk) {
320 			name = "[heap]";
321 			goto done;
322 		}
323 
324 		tid = vm_is_stack(task, vma, is_pid);
325 
326 		if (tid != 0) {
327 			/*
328 			 * Thread stack in /proc/PID/task/TID/maps or
329 			 * the main process stack.
330 			 */
331 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
332 			    vma->vm_end >= mm->start_stack)) {
333 				name = "[stack]";
334 			} else {
335 				/* Thread stack in /proc/PID/maps */
336 				seq_pad(m, ' ');
337 				seq_printf(m, "[stack:%d]", tid);
338 			}
339 		}
340 	}
341 
342 done:
343 	if (name) {
344 		seq_pad(m, ' ');
345 		seq_puts(m, name);
346 	}
347 	seq_putc(m, '\n');
348 }
349 
350 static int show_map(struct seq_file *m, void *v, int is_pid)
351 {
352 	struct vm_area_struct *vma = v;
353 	struct proc_maps_private *priv = m->private;
354 	struct task_struct *task = priv->task;
355 
356 	show_map_vma(m, vma, is_pid);
357 
358 	if (m->count < m->size)  /* vma is copied successfully */
359 		m->version = (vma != get_gate_vma(task->mm))
360 			? vma->vm_start : 0;
361 	return 0;
362 }
363 
364 static int show_pid_map(struct seq_file *m, void *v)
365 {
366 	return show_map(m, v, 1);
367 }
368 
369 static int show_tid_map(struct seq_file *m, void *v)
370 {
371 	return show_map(m, v, 0);
372 }
373 
374 static const struct seq_operations proc_pid_maps_op = {
375 	.start	= m_start,
376 	.next	= m_next,
377 	.stop	= m_stop,
378 	.show	= show_pid_map
379 };
380 
381 static const struct seq_operations proc_tid_maps_op = {
382 	.start	= m_start,
383 	.next	= m_next,
384 	.stop	= m_stop,
385 	.show	= show_tid_map
386 };
387 
388 static int pid_maps_open(struct inode *inode, struct file *file)
389 {
390 	return do_maps_open(inode, file, &proc_pid_maps_op);
391 }
392 
393 static int tid_maps_open(struct inode *inode, struct file *file)
394 {
395 	return do_maps_open(inode, file, &proc_tid_maps_op);
396 }
397 
398 const struct file_operations proc_pid_maps_operations = {
399 	.open		= pid_maps_open,
400 	.read		= seq_read,
401 	.llseek		= seq_lseek,
402 	.release	= seq_release_private,
403 };
404 
405 const struct file_operations proc_tid_maps_operations = {
406 	.open		= tid_maps_open,
407 	.read		= seq_read,
408 	.llseek		= seq_lseek,
409 	.release	= seq_release_private,
410 };
411 
412 /*
413  * Proportional Set Size(PSS): my share of RSS.
414  *
415  * PSS of a process is the count of pages it has in memory, where each
416  * page is divided by the number of processes sharing it.  So if a
417  * process has 1000 pages all to itself, and 1000 shared with one other
418  * process, its PSS will be 1500.
419  *
420  * To keep (accumulated) division errors low, we adopt a 64bit
421  * fixed-point pss counter to minimize division errors. So (pss >>
422  * PSS_SHIFT) would be the real byte count.
423  *
424  * A shift of 12 before division means (assuming 4K page size):
425  * 	- 1M 3-user-pages add up to 8KB errors;
426  * 	- supports mapcount up to 2^24, or 16M;
427  * 	- supports PSS up to 2^52 bytes, or 4PB.
428  */
429 #define PSS_SHIFT 12
430 
431 #ifdef CONFIG_PROC_PAGE_MONITOR
432 struct mem_size_stats {
433 	struct vm_area_struct *vma;
434 	unsigned long resident;
435 	unsigned long shared_clean;
436 	unsigned long shared_dirty;
437 	unsigned long private_clean;
438 	unsigned long private_dirty;
439 	unsigned long referenced;
440 	unsigned long anonymous;
441 	unsigned long anonymous_thp;
442 	unsigned long swap;
443 	unsigned long nonlinear;
444 	u64 pss;
445 };
446 
447 
448 static void smaps_pte_entry(pte_t ptent, unsigned long addr,
449 		unsigned long ptent_size, struct mm_walk *walk)
450 {
451 	struct mem_size_stats *mss = walk->private;
452 	struct vm_area_struct *vma = mss->vma;
453 	pgoff_t pgoff = linear_page_index(vma, addr);
454 	struct page *page = NULL;
455 	int mapcount;
456 
457 	if (pte_present(ptent)) {
458 		page = vm_normal_page(vma, addr, ptent);
459 	} else if (is_swap_pte(ptent)) {
460 		swp_entry_t swpent = pte_to_swp_entry(ptent);
461 
462 		if (!non_swap_entry(swpent))
463 			mss->swap += ptent_size;
464 		else if (is_migration_entry(swpent))
465 			page = migration_entry_to_page(swpent);
466 	} else if (pte_file(ptent)) {
467 		if (pte_to_pgoff(ptent) != pgoff)
468 			mss->nonlinear += ptent_size;
469 	}
470 
471 	if (!page)
472 		return;
473 
474 	if (PageAnon(page))
475 		mss->anonymous += ptent_size;
476 
477 	if (page->index != pgoff)
478 		mss->nonlinear += ptent_size;
479 
480 	mss->resident += ptent_size;
481 	/* Accumulate the size in pages that have been accessed. */
482 	if (pte_young(ptent) || PageReferenced(page))
483 		mss->referenced += ptent_size;
484 	mapcount = page_mapcount(page);
485 	if (mapcount >= 2) {
486 		if (pte_dirty(ptent) || PageDirty(page))
487 			mss->shared_dirty += ptent_size;
488 		else
489 			mss->shared_clean += ptent_size;
490 		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
491 	} else {
492 		if (pte_dirty(ptent) || PageDirty(page))
493 			mss->private_dirty += ptent_size;
494 		else
495 			mss->private_clean += ptent_size;
496 		mss->pss += (ptent_size << PSS_SHIFT);
497 	}
498 }
499 
500 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
501 			   struct mm_walk *walk)
502 {
503 	struct mem_size_stats *mss = walk->private;
504 	struct vm_area_struct *vma = mss->vma;
505 	pte_t *pte;
506 	spinlock_t *ptl;
507 
508 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
509 		smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
510 		spin_unlock(ptl);
511 		mss->anonymous_thp += HPAGE_PMD_SIZE;
512 		return 0;
513 	}
514 
515 	if (pmd_trans_unstable(pmd))
516 		return 0;
517 	/*
518 	 * The mmap_sem held all the way back in m_start() is what
519 	 * keeps khugepaged out of here and from collapsing things
520 	 * in here.
521 	 */
522 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
523 	for (; addr != end; pte++, addr += PAGE_SIZE)
524 		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
525 	pte_unmap_unlock(pte - 1, ptl);
526 	cond_resched();
527 	return 0;
528 }
529 
530 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
531 {
532 	/*
533 	 * Don't forget to update Documentation/ on changes.
534 	 */
535 	static const char mnemonics[BITS_PER_LONG][2] = {
536 		/*
537 		 * In case if we meet a flag we don't know about.
538 		 */
539 		[0 ... (BITS_PER_LONG-1)] = "??",
540 
541 		[ilog2(VM_READ)]	= "rd",
542 		[ilog2(VM_WRITE)]	= "wr",
543 		[ilog2(VM_EXEC)]	= "ex",
544 		[ilog2(VM_SHARED)]	= "sh",
545 		[ilog2(VM_MAYREAD)]	= "mr",
546 		[ilog2(VM_MAYWRITE)]	= "mw",
547 		[ilog2(VM_MAYEXEC)]	= "me",
548 		[ilog2(VM_MAYSHARE)]	= "ms",
549 		[ilog2(VM_GROWSDOWN)]	= "gd",
550 		[ilog2(VM_PFNMAP)]	= "pf",
551 		[ilog2(VM_DENYWRITE)]	= "dw",
552 		[ilog2(VM_LOCKED)]	= "lo",
553 		[ilog2(VM_IO)]		= "io",
554 		[ilog2(VM_SEQ_READ)]	= "sr",
555 		[ilog2(VM_RAND_READ)]	= "rr",
556 		[ilog2(VM_DONTCOPY)]	= "dc",
557 		[ilog2(VM_DONTEXPAND)]	= "de",
558 		[ilog2(VM_ACCOUNT)]	= "ac",
559 		[ilog2(VM_NORESERVE)]	= "nr",
560 		[ilog2(VM_HUGETLB)]	= "ht",
561 		[ilog2(VM_NONLINEAR)]	= "nl",
562 		[ilog2(VM_ARCH_1)]	= "ar",
563 		[ilog2(VM_DONTDUMP)]	= "dd",
564 #ifdef CONFIG_MEM_SOFT_DIRTY
565 		[ilog2(VM_SOFTDIRTY)]	= "sd",
566 #endif
567 		[ilog2(VM_MIXEDMAP)]	= "mm",
568 		[ilog2(VM_HUGEPAGE)]	= "hg",
569 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
570 		[ilog2(VM_MERGEABLE)]	= "mg",
571 	};
572 	size_t i;
573 
574 	seq_puts(m, "VmFlags: ");
575 	for (i = 0; i < BITS_PER_LONG; i++) {
576 		if (vma->vm_flags & (1UL << i)) {
577 			seq_printf(m, "%c%c ",
578 				   mnemonics[i][0], mnemonics[i][1]);
579 		}
580 	}
581 	seq_putc(m, '\n');
582 }
583 
584 static int show_smap(struct seq_file *m, void *v, int is_pid)
585 {
586 	struct proc_maps_private *priv = m->private;
587 	struct task_struct *task = priv->task;
588 	struct vm_area_struct *vma = v;
589 	struct mem_size_stats mss;
590 	struct mm_walk smaps_walk = {
591 		.pmd_entry = smaps_pte_range,
592 		.mm = vma->vm_mm,
593 		.private = &mss,
594 	};
595 
596 	memset(&mss, 0, sizeof mss);
597 	mss.vma = vma;
598 	/* mmap_sem is held in m_start */
599 	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
600 		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
601 
602 	show_map_vma(m, vma, is_pid);
603 
604 	seq_printf(m,
605 		   "Size:           %8lu kB\n"
606 		   "Rss:            %8lu kB\n"
607 		   "Pss:            %8lu kB\n"
608 		   "Shared_Clean:   %8lu kB\n"
609 		   "Shared_Dirty:   %8lu kB\n"
610 		   "Private_Clean:  %8lu kB\n"
611 		   "Private_Dirty:  %8lu kB\n"
612 		   "Referenced:     %8lu kB\n"
613 		   "Anonymous:      %8lu kB\n"
614 		   "AnonHugePages:  %8lu kB\n"
615 		   "Swap:           %8lu kB\n"
616 		   "KernelPageSize: %8lu kB\n"
617 		   "MMUPageSize:    %8lu kB\n"
618 		   "Locked:         %8lu kB\n",
619 		   (vma->vm_end - vma->vm_start) >> 10,
620 		   mss.resident >> 10,
621 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
622 		   mss.shared_clean  >> 10,
623 		   mss.shared_dirty  >> 10,
624 		   mss.private_clean >> 10,
625 		   mss.private_dirty >> 10,
626 		   mss.referenced >> 10,
627 		   mss.anonymous >> 10,
628 		   mss.anonymous_thp >> 10,
629 		   mss.swap >> 10,
630 		   vma_kernel_pagesize(vma) >> 10,
631 		   vma_mmu_pagesize(vma) >> 10,
632 		   (vma->vm_flags & VM_LOCKED) ?
633 			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
634 
635 	if (vma->vm_flags & VM_NONLINEAR)
636 		seq_printf(m, "Nonlinear:      %8lu kB\n",
637 				mss.nonlinear >> 10);
638 
639 	show_smap_vma_flags(m, vma);
640 
641 	if (m->count < m->size)  /* vma is copied successfully */
642 		m->version = (vma != get_gate_vma(task->mm))
643 			? vma->vm_start : 0;
644 	return 0;
645 }
646 
647 static int show_pid_smap(struct seq_file *m, void *v)
648 {
649 	return show_smap(m, v, 1);
650 }
651 
652 static int show_tid_smap(struct seq_file *m, void *v)
653 {
654 	return show_smap(m, v, 0);
655 }
656 
657 static const struct seq_operations proc_pid_smaps_op = {
658 	.start	= m_start,
659 	.next	= m_next,
660 	.stop	= m_stop,
661 	.show	= show_pid_smap
662 };
663 
664 static const struct seq_operations proc_tid_smaps_op = {
665 	.start	= m_start,
666 	.next	= m_next,
667 	.stop	= m_stop,
668 	.show	= show_tid_smap
669 };
670 
671 static int pid_smaps_open(struct inode *inode, struct file *file)
672 {
673 	return do_maps_open(inode, file, &proc_pid_smaps_op);
674 }
675 
676 static int tid_smaps_open(struct inode *inode, struct file *file)
677 {
678 	return do_maps_open(inode, file, &proc_tid_smaps_op);
679 }
680 
681 const struct file_operations proc_pid_smaps_operations = {
682 	.open		= pid_smaps_open,
683 	.read		= seq_read,
684 	.llseek		= seq_lseek,
685 	.release	= seq_release_private,
686 };
687 
688 const struct file_operations proc_tid_smaps_operations = {
689 	.open		= tid_smaps_open,
690 	.read		= seq_read,
691 	.llseek		= seq_lseek,
692 	.release	= seq_release_private,
693 };
694 
695 /*
696  * We do not want to have constant page-shift bits sitting in
697  * pagemap entries and are about to reuse them some time soon.
698  *
699  * Here's the "migration strategy":
700  * 1. when the system boots these bits remain what they are,
701  *    but a warning about future change is printed in log;
702  * 2. once anyone clears soft-dirty bits via clear_refs file,
703  *    these flag is set to denote, that user is aware of the
704  *    new API and those page-shift bits change their meaning.
705  *    The respective warning is printed in dmesg;
706  * 3. In a couple of releases we will remove all the mentions
707  *    of page-shift in pagemap entries.
708  */
709 
710 static bool soft_dirty_cleared __read_mostly;
711 
712 enum clear_refs_types {
713 	CLEAR_REFS_ALL = 1,
714 	CLEAR_REFS_ANON,
715 	CLEAR_REFS_MAPPED,
716 	CLEAR_REFS_SOFT_DIRTY,
717 	CLEAR_REFS_LAST,
718 };
719 
720 struct clear_refs_private {
721 	struct vm_area_struct *vma;
722 	enum clear_refs_types type;
723 };
724 
725 static inline void clear_soft_dirty(struct vm_area_struct *vma,
726 		unsigned long addr, pte_t *pte)
727 {
728 #ifdef CONFIG_MEM_SOFT_DIRTY
729 	/*
730 	 * The soft-dirty tracker uses #PF-s to catch writes
731 	 * to pages, so write-protect the pte as well. See the
732 	 * Documentation/vm/soft-dirty.txt for full description
733 	 * of how soft-dirty works.
734 	 */
735 	pte_t ptent = *pte;
736 
737 	if (pte_present(ptent)) {
738 		ptent = pte_wrprotect(ptent);
739 		ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
740 	} else if (is_swap_pte(ptent)) {
741 		ptent = pte_swp_clear_soft_dirty(ptent);
742 	} else if (pte_file(ptent)) {
743 		ptent = pte_file_clear_soft_dirty(ptent);
744 	}
745 
746 	set_pte_at(vma->vm_mm, addr, pte, ptent);
747 #endif
748 }
749 
750 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
751 				unsigned long end, struct mm_walk *walk)
752 {
753 	struct clear_refs_private *cp = walk->private;
754 	struct vm_area_struct *vma = cp->vma;
755 	pte_t *pte, ptent;
756 	spinlock_t *ptl;
757 	struct page *page;
758 
759 	split_huge_page_pmd(vma, addr, pmd);
760 	if (pmd_trans_unstable(pmd))
761 		return 0;
762 
763 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
764 	for (; addr != end; pte++, addr += PAGE_SIZE) {
765 		ptent = *pte;
766 
767 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
768 			clear_soft_dirty(vma, addr, pte);
769 			continue;
770 		}
771 
772 		if (!pte_present(ptent))
773 			continue;
774 
775 		page = vm_normal_page(vma, addr, ptent);
776 		if (!page)
777 			continue;
778 
779 		/* Clear accessed and referenced bits. */
780 		ptep_test_and_clear_young(vma, addr, pte);
781 		ClearPageReferenced(page);
782 	}
783 	pte_unmap_unlock(pte - 1, ptl);
784 	cond_resched();
785 	return 0;
786 }
787 
788 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
789 				size_t count, loff_t *ppos)
790 {
791 	struct task_struct *task;
792 	char buffer[PROC_NUMBUF];
793 	struct mm_struct *mm;
794 	struct vm_area_struct *vma;
795 	enum clear_refs_types type;
796 	int itype;
797 	int rv;
798 
799 	memset(buffer, 0, sizeof(buffer));
800 	if (count > sizeof(buffer) - 1)
801 		count = sizeof(buffer) - 1;
802 	if (copy_from_user(buffer, buf, count))
803 		return -EFAULT;
804 	rv = kstrtoint(strstrip(buffer), 10, &itype);
805 	if (rv < 0)
806 		return rv;
807 	type = (enum clear_refs_types)itype;
808 	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
809 		return -EINVAL;
810 
811 	if (type == CLEAR_REFS_SOFT_DIRTY) {
812 		soft_dirty_cleared = true;
813 		pr_warn_once("The pagemap bits 55-60 has changed their meaning!"
814 			     " See the linux/Documentation/vm/pagemap.txt for "
815 			     "details.\n");
816 	}
817 
818 	task = get_proc_task(file_inode(file));
819 	if (!task)
820 		return -ESRCH;
821 	mm = get_task_mm(task);
822 	if (mm) {
823 		struct clear_refs_private cp = {
824 			.type = type,
825 		};
826 		struct mm_walk clear_refs_walk = {
827 			.pmd_entry = clear_refs_pte_range,
828 			.mm = mm,
829 			.private = &cp,
830 		};
831 		down_read(&mm->mmap_sem);
832 		if (type == CLEAR_REFS_SOFT_DIRTY)
833 			mmu_notifier_invalidate_range_start(mm, 0, -1);
834 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
835 			cp.vma = vma;
836 			if (is_vm_hugetlb_page(vma))
837 				continue;
838 			/*
839 			 * Writing 1 to /proc/pid/clear_refs affects all pages.
840 			 *
841 			 * Writing 2 to /proc/pid/clear_refs only affects
842 			 * Anonymous pages.
843 			 *
844 			 * Writing 3 to /proc/pid/clear_refs only affects file
845 			 * mapped pages.
846 			 *
847 			 * Writing 4 to /proc/pid/clear_refs affects all pages.
848 			 */
849 			if (type == CLEAR_REFS_ANON && vma->vm_file)
850 				continue;
851 			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
852 				continue;
853 			if (type == CLEAR_REFS_SOFT_DIRTY) {
854 				if (vma->vm_flags & VM_SOFTDIRTY)
855 					vma->vm_flags &= ~VM_SOFTDIRTY;
856 			}
857 			walk_page_range(vma->vm_start, vma->vm_end,
858 					&clear_refs_walk);
859 		}
860 		if (type == CLEAR_REFS_SOFT_DIRTY)
861 			mmu_notifier_invalidate_range_end(mm, 0, -1);
862 		flush_tlb_mm(mm);
863 		up_read(&mm->mmap_sem);
864 		mmput(mm);
865 	}
866 	put_task_struct(task);
867 
868 	return count;
869 }
870 
871 const struct file_operations proc_clear_refs_operations = {
872 	.write		= clear_refs_write,
873 	.llseek		= noop_llseek,
874 };
875 
876 typedef struct {
877 	u64 pme;
878 } pagemap_entry_t;
879 
880 struct pagemapread {
881 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
882 	pagemap_entry_t *buffer;
883 	bool v2;
884 };
885 
886 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
887 #define PAGEMAP_WALK_MASK	(PMD_MASK)
888 
889 #define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
890 #define PM_STATUS_BITS      3
891 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
892 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
893 #define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
894 #define PM_PSHIFT_BITS      6
895 #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
896 #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
897 #define __PM_PSHIFT(x)      (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
898 #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
899 #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
900 /* in "new" pagemap pshift bits are occupied with more status bits */
901 #define PM_STATUS2(v2, x)   (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
902 
903 #define __PM_SOFT_DIRTY      (1LL)
904 #define PM_PRESENT          PM_STATUS(4LL)
905 #define PM_SWAP             PM_STATUS(2LL)
906 #define PM_FILE             PM_STATUS(1LL)
907 #define PM_NOT_PRESENT(v2)  PM_STATUS2(v2, 0)
908 #define PM_END_OF_BUFFER    1
909 
910 static inline pagemap_entry_t make_pme(u64 val)
911 {
912 	return (pagemap_entry_t) { .pme = val };
913 }
914 
915 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
916 			  struct pagemapread *pm)
917 {
918 	pm->buffer[pm->pos++] = *pme;
919 	if (pm->pos >= pm->len)
920 		return PM_END_OF_BUFFER;
921 	return 0;
922 }
923 
924 static int pagemap_pte_hole(unsigned long start, unsigned long end,
925 				struct mm_walk *walk)
926 {
927 	struct pagemapread *pm = walk->private;
928 	unsigned long addr;
929 	int err = 0;
930 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
931 
932 	for (addr = start; addr < end; addr += PAGE_SIZE) {
933 		err = add_to_pagemap(addr, &pme, pm);
934 		if (err)
935 			break;
936 	}
937 	return err;
938 }
939 
940 static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
941 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
942 {
943 	u64 frame, flags;
944 	struct page *page = NULL;
945 	int flags2 = 0;
946 
947 	if (pte_present(pte)) {
948 		frame = pte_pfn(pte);
949 		flags = PM_PRESENT;
950 		page = vm_normal_page(vma, addr, pte);
951 		if (pte_soft_dirty(pte))
952 			flags2 |= __PM_SOFT_DIRTY;
953 	} else if (is_swap_pte(pte)) {
954 		swp_entry_t entry;
955 		if (pte_swp_soft_dirty(pte))
956 			flags2 |= __PM_SOFT_DIRTY;
957 		entry = pte_to_swp_entry(pte);
958 		frame = swp_type(entry) |
959 			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
960 		flags = PM_SWAP;
961 		if (is_migration_entry(entry))
962 			page = migration_entry_to_page(entry);
963 	} else {
964 		if (vma->vm_flags & VM_SOFTDIRTY)
965 			flags2 |= __PM_SOFT_DIRTY;
966 		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
967 		return;
968 	}
969 
970 	if (page && !PageAnon(page))
971 		flags |= PM_FILE;
972 	if ((vma->vm_flags & VM_SOFTDIRTY))
973 		flags2 |= __PM_SOFT_DIRTY;
974 
975 	*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
976 }
977 
978 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
979 static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
980 		pmd_t pmd, int offset, int pmd_flags2)
981 {
982 	/*
983 	 * Currently pmd for thp is always present because thp can not be
984 	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
985 	 * This if-check is just to prepare for future implementation.
986 	 */
987 	if (pmd_present(pmd))
988 		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
989 				| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
990 	else
991 		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
992 }
993 #else
994 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
995 		pmd_t pmd, int offset, int pmd_flags2)
996 {
997 }
998 #endif
999 
1000 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1001 			     struct mm_walk *walk)
1002 {
1003 	struct vm_area_struct *vma;
1004 	struct pagemapread *pm = walk->private;
1005 	spinlock_t *ptl;
1006 	pte_t *pte;
1007 	int err = 0;
1008 	pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
1009 
1010 	/* find the first VMA at or above 'addr' */
1011 	vma = find_vma(walk->mm, addr);
1012 	if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1013 		int pmd_flags2;
1014 
1015 		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
1016 			pmd_flags2 = __PM_SOFT_DIRTY;
1017 		else
1018 			pmd_flags2 = 0;
1019 
1020 		for (; addr != end; addr += PAGE_SIZE) {
1021 			unsigned long offset;
1022 
1023 			offset = (addr & ~PAGEMAP_WALK_MASK) >>
1024 					PAGE_SHIFT;
1025 			thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2);
1026 			err = add_to_pagemap(addr, &pme, pm);
1027 			if (err)
1028 				break;
1029 		}
1030 		spin_unlock(ptl);
1031 		return err;
1032 	}
1033 
1034 	if (pmd_trans_unstable(pmd))
1035 		return 0;
1036 	for (; addr != end; addr += PAGE_SIZE) {
1037 		int flags2;
1038 
1039 		/* check to see if we've left 'vma' behind
1040 		 * and need a new, higher one */
1041 		if (vma && (addr >= vma->vm_end)) {
1042 			vma = find_vma(walk->mm, addr);
1043 			if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1044 				flags2 = __PM_SOFT_DIRTY;
1045 			else
1046 				flags2 = 0;
1047 			pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
1048 		}
1049 
1050 		/* check that 'vma' actually covers this address,
1051 		 * and that it isn't a huge page vma */
1052 		if (vma && (vma->vm_start <= addr) &&
1053 		    !is_vm_hugetlb_page(vma)) {
1054 			pte = pte_offset_map(pmd, addr);
1055 			pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
1056 			/* unmap before userspace copy */
1057 			pte_unmap(pte);
1058 		}
1059 		err = add_to_pagemap(addr, &pme, pm);
1060 		if (err)
1061 			return err;
1062 	}
1063 
1064 	cond_resched();
1065 
1066 	return err;
1067 }
1068 
1069 #ifdef CONFIG_HUGETLB_PAGE
1070 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1071 					pte_t pte, int offset, int flags2)
1072 {
1073 	if (pte_present(pte))
1074 		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)	|
1075 				PM_STATUS2(pm->v2, flags2)		|
1076 				PM_PRESENT);
1077 	else
1078 		*pme = make_pme(PM_NOT_PRESENT(pm->v2)			|
1079 				PM_STATUS2(pm->v2, flags2));
1080 }
1081 
1082 /* This function walks within one hugetlb entry in the single call */
1083 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
1084 				 unsigned long addr, unsigned long end,
1085 				 struct mm_walk *walk)
1086 {
1087 	struct pagemapread *pm = walk->private;
1088 	struct vm_area_struct *vma;
1089 	int err = 0;
1090 	int flags2;
1091 	pagemap_entry_t pme;
1092 
1093 	vma = find_vma(walk->mm, addr);
1094 	WARN_ON_ONCE(!vma);
1095 
1096 	if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1097 		flags2 = __PM_SOFT_DIRTY;
1098 	else
1099 		flags2 = 0;
1100 
1101 	for (; addr != end; addr += PAGE_SIZE) {
1102 		int offset = (addr & ~hmask) >> PAGE_SHIFT;
1103 		huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
1104 		err = add_to_pagemap(addr, &pme, pm);
1105 		if (err)
1106 			return err;
1107 	}
1108 
1109 	cond_resched();
1110 
1111 	return err;
1112 }
1113 #endif /* HUGETLB_PAGE */
1114 
1115 /*
1116  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1117  *
1118  * For each page in the address space, this file contains one 64-bit entry
1119  * consisting of the following:
1120  *
1121  * Bits 0-54  page frame number (PFN) if present
1122  * Bits 0-4   swap type if swapped
1123  * Bits 5-54  swap offset if swapped
1124  * Bits 55-60 page shift (page size = 1<<page shift)
1125  * Bit  61    page is file-page or shared-anon
1126  * Bit  62    page swapped
1127  * Bit  63    page present
1128  *
1129  * If the page is not present but in swap, then the PFN contains an
1130  * encoding of the swap file number and the page's offset into the
1131  * swap. Unmapped pages return a null PFN. This allows determining
1132  * precisely which pages are mapped (or in swap) and comparing mapped
1133  * pages between processes.
1134  *
1135  * Efficient users of this interface will use /proc/pid/maps to
1136  * determine which areas of memory are actually mapped and llseek to
1137  * skip over unmapped regions.
1138  */
1139 static ssize_t pagemap_read(struct file *file, char __user *buf,
1140 			    size_t count, loff_t *ppos)
1141 {
1142 	struct task_struct *task = get_proc_task(file_inode(file));
1143 	struct mm_struct *mm;
1144 	struct pagemapread pm;
1145 	int ret = -ESRCH;
1146 	struct mm_walk pagemap_walk = {};
1147 	unsigned long src;
1148 	unsigned long svpfn;
1149 	unsigned long start_vaddr;
1150 	unsigned long end_vaddr;
1151 	int copied = 0;
1152 
1153 	if (!task)
1154 		goto out;
1155 
1156 	ret = -EINVAL;
1157 	/* file position must be aligned */
1158 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1159 		goto out_task;
1160 
1161 	ret = 0;
1162 	if (!count)
1163 		goto out_task;
1164 
1165 	pm.v2 = soft_dirty_cleared;
1166 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1167 	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1168 	ret = -ENOMEM;
1169 	if (!pm.buffer)
1170 		goto out_task;
1171 
1172 	mm = mm_access(task, PTRACE_MODE_READ);
1173 	ret = PTR_ERR(mm);
1174 	if (!mm || IS_ERR(mm))
1175 		goto out_free;
1176 
1177 	pagemap_walk.pmd_entry = pagemap_pte_range;
1178 	pagemap_walk.pte_hole = pagemap_pte_hole;
1179 #ifdef CONFIG_HUGETLB_PAGE
1180 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1181 #endif
1182 	pagemap_walk.mm = mm;
1183 	pagemap_walk.private = &pm;
1184 
1185 	src = *ppos;
1186 	svpfn = src / PM_ENTRY_BYTES;
1187 	start_vaddr = svpfn << PAGE_SHIFT;
1188 	end_vaddr = TASK_SIZE_OF(task);
1189 
1190 	/* watch out for wraparound */
1191 	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1192 		start_vaddr = end_vaddr;
1193 
1194 	/*
1195 	 * The odds are that this will stop walking way
1196 	 * before end_vaddr, because the length of the
1197 	 * user buffer is tracked in "pm", and the walk
1198 	 * will stop when we hit the end of the buffer.
1199 	 */
1200 	ret = 0;
1201 	while (count && (start_vaddr < end_vaddr)) {
1202 		int len;
1203 		unsigned long end;
1204 
1205 		pm.pos = 0;
1206 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1207 		/* overflow ? */
1208 		if (end < start_vaddr || end > end_vaddr)
1209 			end = end_vaddr;
1210 		down_read(&mm->mmap_sem);
1211 		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1212 		up_read(&mm->mmap_sem);
1213 		start_vaddr = end;
1214 
1215 		len = min(count, PM_ENTRY_BYTES * pm.pos);
1216 		if (copy_to_user(buf, pm.buffer, len)) {
1217 			ret = -EFAULT;
1218 			goto out_mm;
1219 		}
1220 		copied += len;
1221 		buf += len;
1222 		count -= len;
1223 	}
1224 	*ppos += copied;
1225 	if (!ret || ret == PM_END_OF_BUFFER)
1226 		ret = copied;
1227 
1228 out_mm:
1229 	mmput(mm);
1230 out_free:
1231 	kfree(pm.buffer);
1232 out_task:
1233 	put_task_struct(task);
1234 out:
1235 	return ret;
1236 }
1237 
1238 static int pagemap_open(struct inode *inode, struct file *file)
1239 {
1240 	pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
1241 			"to stop being page-shift some time soon. See the "
1242 			"linux/Documentation/vm/pagemap.txt for details.\n");
1243 	return 0;
1244 }
1245 
1246 const struct file_operations proc_pagemap_operations = {
1247 	.llseek		= mem_lseek, /* borrow this */
1248 	.read		= pagemap_read,
1249 	.open		= pagemap_open,
1250 };
1251 #endif /* CONFIG_PROC_PAGE_MONITOR */
1252 
1253 #ifdef CONFIG_NUMA
1254 
1255 struct numa_maps {
1256 	struct vm_area_struct *vma;
1257 	unsigned long pages;
1258 	unsigned long anon;
1259 	unsigned long active;
1260 	unsigned long writeback;
1261 	unsigned long mapcount_max;
1262 	unsigned long dirty;
1263 	unsigned long swapcache;
1264 	unsigned long node[MAX_NUMNODES];
1265 };
1266 
1267 struct numa_maps_private {
1268 	struct proc_maps_private proc_maps;
1269 	struct numa_maps md;
1270 };
1271 
1272 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1273 			unsigned long nr_pages)
1274 {
1275 	int count = page_mapcount(page);
1276 
1277 	md->pages += nr_pages;
1278 	if (pte_dirty || PageDirty(page))
1279 		md->dirty += nr_pages;
1280 
1281 	if (PageSwapCache(page))
1282 		md->swapcache += nr_pages;
1283 
1284 	if (PageActive(page) || PageUnevictable(page))
1285 		md->active += nr_pages;
1286 
1287 	if (PageWriteback(page))
1288 		md->writeback += nr_pages;
1289 
1290 	if (PageAnon(page))
1291 		md->anon += nr_pages;
1292 
1293 	if (count > md->mapcount_max)
1294 		md->mapcount_max = count;
1295 
1296 	md->node[page_to_nid(page)] += nr_pages;
1297 }
1298 
1299 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1300 		unsigned long addr)
1301 {
1302 	struct page *page;
1303 	int nid;
1304 
1305 	if (!pte_present(pte))
1306 		return NULL;
1307 
1308 	page = vm_normal_page(vma, addr, pte);
1309 	if (!page)
1310 		return NULL;
1311 
1312 	if (PageReserved(page))
1313 		return NULL;
1314 
1315 	nid = page_to_nid(page);
1316 	if (!node_isset(nid, node_states[N_MEMORY]))
1317 		return NULL;
1318 
1319 	return page;
1320 }
1321 
1322 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1323 		unsigned long end, struct mm_walk *walk)
1324 {
1325 	struct numa_maps *md;
1326 	spinlock_t *ptl;
1327 	pte_t *orig_pte;
1328 	pte_t *pte;
1329 
1330 	md = walk->private;
1331 
1332 	if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
1333 		pte_t huge_pte = *(pte_t *)pmd;
1334 		struct page *page;
1335 
1336 		page = can_gather_numa_stats(huge_pte, md->vma, addr);
1337 		if (page)
1338 			gather_stats(page, md, pte_dirty(huge_pte),
1339 				     HPAGE_PMD_SIZE/PAGE_SIZE);
1340 		spin_unlock(ptl);
1341 		return 0;
1342 	}
1343 
1344 	if (pmd_trans_unstable(pmd))
1345 		return 0;
1346 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1347 	do {
1348 		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1349 		if (!page)
1350 			continue;
1351 		gather_stats(page, md, pte_dirty(*pte), 1);
1352 
1353 	} while (pte++, addr += PAGE_SIZE, addr != end);
1354 	pte_unmap_unlock(orig_pte, ptl);
1355 	return 0;
1356 }
1357 #ifdef CONFIG_HUGETLB_PAGE
1358 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1359 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1360 {
1361 	struct numa_maps *md;
1362 	struct page *page;
1363 
1364 	if (!pte_present(*pte))
1365 		return 0;
1366 
1367 	page = pte_page(*pte);
1368 	if (!page)
1369 		return 0;
1370 
1371 	md = walk->private;
1372 	gather_stats(page, md, pte_dirty(*pte), 1);
1373 	return 0;
1374 }
1375 
1376 #else
1377 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1378 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1379 {
1380 	return 0;
1381 }
1382 #endif
1383 
1384 /*
1385  * Display pages allocated per node and memory policy via /proc.
1386  */
1387 static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1388 {
1389 	struct numa_maps_private *numa_priv = m->private;
1390 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1391 	struct vm_area_struct *vma = v;
1392 	struct numa_maps *md = &numa_priv->md;
1393 	struct file *file = vma->vm_file;
1394 	struct task_struct *task = proc_priv->task;
1395 	struct mm_struct *mm = vma->vm_mm;
1396 	struct mm_walk walk = {};
1397 	struct mempolicy *pol;
1398 	char buffer[64];
1399 	int nid;
1400 
1401 	if (!mm)
1402 		return 0;
1403 
1404 	/* Ensure we start with an empty set of numa_maps statistics. */
1405 	memset(md, 0, sizeof(*md));
1406 
1407 	md->vma = vma;
1408 
1409 	walk.hugetlb_entry = gather_hugetbl_stats;
1410 	walk.pmd_entry = gather_pte_stats;
1411 	walk.private = md;
1412 	walk.mm = mm;
1413 
1414 	pol = get_vma_policy(task, vma, vma->vm_start);
1415 	mpol_to_str(buffer, sizeof(buffer), pol);
1416 	mpol_cond_put(pol);
1417 
1418 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1419 
1420 	if (file) {
1421 		seq_puts(m, " file=");
1422 		seq_path(m, &file->f_path, "\n\t= ");
1423 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1424 		seq_puts(m, " heap");
1425 	} else {
1426 		pid_t tid = vm_is_stack(task, vma, is_pid);
1427 		if (tid != 0) {
1428 			/*
1429 			 * Thread stack in /proc/PID/task/TID/maps or
1430 			 * the main process stack.
1431 			 */
1432 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
1433 			    vma->vm_end >= mm->start_stack))
1434 				seq_puts(m, " stack");
1435 			else
1436 				seq_printf(m, " stack:%d", tid);
1437 		}
1438 	}
1439 
1440 	if (is_vm_hugetlb_page(vma))
1441 		seq_puts(m, " huge");
1442 
1443 	walk_page_range(vma->vm_start, vma->vm_end, &walk);
1444 
1445 	if (!md->pages)
1446 		goto out;
1447 
1448 	if (md->anon)
1449 		seq_printf(m, " anon=%lu", md->anon);
1450 
1451 	if (md->dirty)
1452 		seq_printf(m, " dirty=%lu", md->dirty);
1453 
1454 	if (md->pages != md->anon && md->pages != md->dirty)
1455 		seq_printf(m, " mapped=%lu", md->pages);
1456 
1457 	if (md->mapcount_max > 1)
1458 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1459 
1460 	if (md->swapcache)
1461 		seq_printf(m, " swapcache=%lu", md->swapcache);
1462 
1463 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1464 		seq_printf(m, " active=%lu", md->active);
1465 
1466 	if (md->writeback)
1467 		seq_printf(m, " writeback=%lu", md->writeback);
1468 
1469 	for_each_node_state(nid, N_MEMORY)
1470 		if (md->node[nid])
1471 			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1472 out:
1473 	seq_putc(m, '\n');
1474 
1475 	if (m->count < m->size)
1476 		m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
1477 	return 0;
1478 }
1479 
1480 static int show_pid_numa_map(struct seq_file *m, void *v)
1481 {
1482 	return show_numa_map(m, v, 1);
1483 }
1484 
1485 static int show_tid_numa_map(struct seq_file *m, void *v)
1486 {
1487 	return show_numa_map(m, v, 0);
1488 }
1489 
1490 static const struct seq_operations proc_pid_numa_maps_op = {
1491 	.start  = m_start,
1492 	.next   = m_next,
1493 	.stop   = m_stop,
1494 	.show   = show_pid_numa_map,
1495 };
1496 
1497 static const struct seq_operations proc_tid_numa_maps_op = {
1498 	.start  = m_start,
1499 	.next   = m_next,
1500 	.stop   = m_stop,
1501 	.show   = show_tid_numa_map,
1502 };
1503 
1504 static int numa_maps_open(struct inode *inode, struct file *file,
1505 			  const struct seq_operations *ops)
1506 {
1507 	struct numa_maps_private *priv;
1508 	int ret = -ENOMEM;
1509 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1510 	if (priv) {
1511 		priv->proc_maps.pid = proc_pid(inode);
1512 		ret = seq_open(file, ops);
1513 		if (!ret) {
1514 			struct seq_file *m = file->private_data;
1515 			m->private = priv;
1516 		} else {
1517 			kfree(priv);
1518 		}
1519 	}
1520 	return ret;
1521 }
1522 
1523 static int pid_numa_maps_open(struct inode *inode, struct file *file)
1524 {
1525 	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1526 }
1527 
1528 static int tid_numa_maps_open(struct inode *inode, struct file *file)
1529 {
1530 	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1531 }
1532 
1533 const struct file_operations proc_pid_numa_maps_operations = {
1534 	.open		= pid_numa_maps_open,
1535 	.read		= seq_read,
1536 	.llseek		= seq_lseek,
1537 	.release	= seq_release_private,
1538 };
1539 
1540 const struct file_operations proc_tid_numa_maps_operations = {
1541 	.open		= tid_numa_maps_open,
1542 	.read		= seq_read,
1543 	.llseek		= seq_lseek,
1544 	.release	= seq_release_private,
1545 };
1546 #endif /* CONFIG_NUMA */
1547