xref: /openbmc/linux/fs/proc/task_mmu.c (revision 3c6a73cc)
1 #include <linux/mm.h>
2 #include <linux/vmacache.h>
3 #include <linux/hugetlb.h>
4 #include <linux/huge_mm.h>
5 #include <linux/mount.h>
6 #include <linux/seq_file.h>
7 #include <linux/highmem.h>
8 #include <linux/ptrace.h>
9 #include <linux/slab.h>
10 #include <linux/pagemap.h>
11 #include <linux/mempolicy.h>
12 #include <linux/rmap.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mmu_notifier.h>
16 
17 #include <asm/elf.h>
18 #include <asm/uaccess.h>
19 #include <asm/tlbflush.h>
20 #include "internal.h"
21 
22 void task_mem(struct seq_file *m, struct mm_struct *mm)
23 {
24 	unsigned long data, text, lib, swap;
25 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
26 
27 	/*
28 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
29 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
30 	 * collector of these hiwater stats must therefore get total_vm
31 	 * and rss too, which will usually be the higher.  Barriers? not
32 	 * worth the effort, such snapshots can always be inconsistent.
33 	 */
34 	hiwater_vm = total_vm = mm->total_vm;
35 	if (hiwater_vm < mm->hiwater_vm)
36 		hiwater_vm = mm->hiwater_vm;
37 	hiwater_rss = total_rss = get_mm_rss(mm);
38 	if (hiwater_rss < mm->hiwater_rss)
39 		hiwater_rss = mm->hiwater_rss;
40 
41 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
42 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
43 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
44 	swap = get_mm_counter(mm, MM_SWAPENTS);
45 	seq_printf(m,
46 		"VmPeak:\t%8lu kB\n"
47 		"VmSize:\t%8lu kB\n"
48 		"VmLck:\t%8lu kB\n"
49 		"VmPin:\t%8lu kB\n"
50 		"VmHWM:\t%8lu kB\n"
51 		"VmRSS:\t%8lu kB\n"
52 		"VmData:\t%8lu kB\n"
53 		"VmStk:\t%8lu kB\n"
54 		"VmExe:\t%8lu kB\n"
55 		"VmLib:\t%8lu kB\n"
56 		"VmPTE:\t%8lu kB\n"
57 		"VmSwap:\t%8lu kB\n",
58 		hiwater_vm << (PAGE_SHIFT-10),
59 		total_vm << (PAGE_SHIFT-10),
60 		mm->locked_vm << (PAGE_SHIFT-10),
61 		mm->pinned_vm << (PAGE_SHIFT-10),
62 		hiwater_rss << (PAGE_SHIFT-10),
63 		total_rss << (PAGE_SHIFT-10),
64 		data << (PAGE_SHIFT-10),
65 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
66 		(PTRS_PER_PTE * sizeof(pte_t) *
67 		 atomic_long_read(&mm->nr_ptes)) >> 10,
68 		swap << (PAGE_SHIFT-10));
69 }
70 
71 unsigned long task_vsize(struct mm_struct *mm)
72 {
73 	return PAGE_SIZE * mm->total_vm;
74 }
75 
76 unsigned long task_statm(struct mm_struct *mm,
77 			 unsigned long *shared, unsigned long *text,
78 			 unsigned long *data, unsigned long *resident)
79 {
80 	*shared = get_mm_counter(mm, MM_FILEPAGES);
81 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
82 								>> PAGE_SHIFT;
83 	*data = mm->total_vm - mm->shared_vm;
84 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
85 	return mm->total_vm;
86 }
87 
88 #ifdef CONFIG_NUMA
89 /*
90  * Save get_task_policy() for show_numa_map().
91  */
92 static void hold_task_mempolicy(struct proc_maps_private *priv)
93 {
94 	struct task_struct *task = priv->task;
95 
96 	task_lock(task);
97 	priv->task_mempolicy = get_task_policy(task);
98 	mpol_get(priv->task_mempolicy);
99 	task_unlock(task);
100 }
101 static void release_task_mempolicy(struct proc_maps_private *priv)
102 {
103 	mpol_put(priv->task_mempolicy);
104 }
105 #else
106 static void hold_task_mempolicy(struct proc_maps_private *priv)
107 {
108 }
109 static void release_task_mempolicy(struct proc_maps_private *priv)
110 {
111 }
112 #endif
113 
114 static void vma_stop(struct proc_maps_private *priv)
115 {
116 	struct mm_struct *mm = priv->mm;
117 
118 	release_task_mempolicy(priv);
119 	up_read(&mm->mmap_sem);
120 	mmput(mm);
121 }
122 
123 static struct vm_area_struct *
124 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma)
125 {
126 	if (vma == priv->tail_vma)
127 		return NULL;
128 	return vma->vm_next ?: priv->tail_vma;
129 }
130 
131 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma)
132 {
133 	if (m->count < m->size)	/* vma is copied successfully */
134 		m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL;
135 }
136 
137 static void *m_start(struct seq_file *m, loff_t *ppos)
138 {
139 	struct proc_maps_private *priv = m->private;
140 	unsigned long last_addr = m->version;
141 	struct mm_struct *mm;
142 	struct vm_area_struct *vma;
143 	unsigned int pos = *ppos;
144 
145 	/* See m_cache_vma(). Zero at the start or after lseek. */
146 	if (last_addr == -1UL)
147 		return NULL;
148 
149 	priv->task = get_proc_task(priv->inode);
150 	if (!priv->task)
151 		return ERR_PTR(-ESRCH);
152 
153 	mm = priv->mm;
154 	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
155 		return NULL;
156 
157 	down_read(&mm->mmap_sem);
158 	hold_task_mempolicy(priv);
159 	priv->tail_vma = get_gate_vma(mm);
160 
161 	if (last_addr) {
162 		vma = find_vma(mm, last_addr);
163 		if (vma && (vma = m_next_vma(priv, vma)))
164 			return vma;
165 	}
166 
167 	m->version = 0;
168 	if (pos < mm->map_count) {
169 		for (vma = mm->mmap; pos; pos--) {
170 			m->version = vma->vm_start;
171 			vma = vma->vm_next;
172 		}
173 		return vma;
174 	}
175 
176 	/* we do not bother to update m->version in this case */
177 	if (pos == mm->map_count && priv->tail_vma)
178 		return priv->tail_vma;
179 
180 	vma_stop(priv);
181 	return NULL;
182 }
183 
184 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
185 {
186 	struct proc_maps_private *priv = m->private;
187 	struct vm_area_struct *next;
188 
189 	(*pos)++;
190 	next = m_next_vma(priv, v);
191 	if (!next)
192 		vma_stop(priv);
193 	return next;
194 }
195 
196 static void m_stop(struct seq_file *m, void *v)
197 {
198 	struct proc_maps_private *priv = m->private;
199 
200 	if (!IS_ERR_OR_NULL(v))
201 		vma_stop(priv);
202 	if (priv->task) {
203 		put_task_struct(priv->task);
204 		priv->task = NULL;
205 	}
206 }
207 
208 static int proc_maps_open(struct inode *inode, struct file *file,
209 			const struct seq_operations *ops, int psize)
210 {
211 	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
212 
213 	if (!priv)
214 		return -ENOMEM;
215 
216 	priv->inode = inode;
217 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
218 	if (IS_ERR(priv->mm)) {
219 		int err = PTR_ERR(priv->mm);
220 
221 		seq_release_private(inode, file);
222 		return err;
223 	}
224 
225 	return 0;
226 }
227 
228 static int proc_map_release(struct inode *inode, struct file *file)
229 {
230 	struct seq_file *seq = file->private_data;
231 	struct proc_maps_private *priv = seq->private;
232 
233 	if (priv->mm)
234 		mmdrop(priv->mm);
235 
236 	return seq_release_private(inode, file);
237 }
238 
239 static int do_maps_open(struct inode *inode, struct file *file,
240 			const struct seq_operations *ops)
241 {
242 	return proc_maps_open(inode, file, ops,
243 				sizeof(struct proc_maps_private));
244 }
245 
246 static pid_t pid_of_stack(struct proc_maps_private *priv,
247 				struct vm_area_struct *vma, bool is_pid)
248 {
249 	struct inode *inode = priv->inode;
250 	struct task_struct *task;
251 	pid_t ret = 0;
252 
253 	rcu_read_lock();
254 	task = pid_task(proc_pid(inode), PIDTYPE_PID);
255 	if (task) {
256 		task = task_of_stack(task, vma, is_pid);
257 		if (task)
258 			ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
259 	}
260 	rcu_read_unlock();
261 
262 	return ret;
263 }
264 
265 static void
266 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
267 {
268 	struct mm_struct *mm = vma->vm_mm;
269 	struct file *file = vma->vm_file;
270 	struct proc_maps_private *priv = m->private;
271 	vm_flags_t flags = vma->vm_flags;
272 	unsigned long ino = 0;
273 	unsigned long long pgoff = 0;
274 	unsigned long start, end;
275 	dev_t dev = 0;
276 	const char *name = NULL;
277 
278 	if (file) {
279 		struct inode *inode = file_inode(vma->vm_file);
280 		dev = inode->i_sb->s_dev;
281 		ino = inode->i_ino;
282 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
283 	}
284 
285 	/* We don't show the stack guard page in /proc/maps */
286 	start = vma->vm_start;
287 	if (stack_guard_page_start(vma, start))
288 		start += PAGE_SIZE;
289 	end = vma->vm_end;
290 	if (stack_guard_page_end(vma, end))
291 		end -= PAGE_SIZE;
292 
293 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
294 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
295 			start,
296 			end,
297 			flags & VM_READ ? 'r' : '-',
298 			flags & VM_WRITE ? 'w' : '-',
299 			flags & VM_EXEC ? 'x' : '-',
300 			flags & VM_MAYSHARE ? 's' : 'p',
301 			pgoff,
302 			MAJOR(dev), MINOR(dev), ino);
303 
304 	/*
305 	 * Print the dentry name for named mappings, and a
306 	 * special [heap] marker for the heap:
307 	 */
308 	if (file) {
309 		seq_pad(m, ' ');
310 		seq_path(m, &file->f_path, "\n");
311 		goto done;
312 	}
313 
314 	if (vma->vm_ops && vma->vm_ops->name) {
315 		name = vma->vm_ops->name(vma);
316 		if (name)
317 			goto done;
318 	}
319 
320 	name = arch_vma_name(vma);
321 	if (!name) {
322 		pid_t tid;
323 
324 		if (!mm) {
325 			name = "[vdso]";
326 			goto done;
327 		}
328 
329 		if (vma->vm_start <= mm->brk &&
330 		    vma->vm_end >= mm->start_brk) {
331 			name = "[heap]";
332 			goto done;
333 		}
334 
335 		tid = pid_of_stack(priv, vma, is_pid);
336 		if (tid != 0) {
337 			/*
338 			 * Thread stack in /proc/PID/task/TID/maps or
339 			 * the main process stack.
340 			 */
341 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
342 			    vma->vm_end >= mm->start_stack)) {
343 				name = "[stack]";
344 			} else {
345 				/* Thread stack in /proc/PID/maps */
346 				seq_pad(m, ' ');
347 				seq_printf(m, "[stack:%d]", tid);
348 			}
349 		}
350 	}
351 
352 done:
353 	if (name) {
354 		seq_pad(m, ' ');
355 		seq_puts(m, name);
356 	}
357 	seq_putc(m, '\n');
358 }
359 
360 static int show_map(struct seq_file *m, void *v, int is_pid)
361 {
362 	show_map_vma(m, v, is_pid);
363 	m_cache_vma(m, v);
364 	return 0;
365 }
366 
367 static int show_pid_map(struct seq_file *m, void *v)
368 {
369 	return show_map(m, v, 1);
370 }
371 
372 static int show_tid_map(struct seq_file *m, void *v)
373 {
374 	return show_map(m, v, 0);
375 }
376 
377 static const struct seq_operations proc_pid_maps_op = {
378 	.start	= m_start,
379 	.next	= m_next,
380 	.stop	= m_stop,
381 	.show	= show_pid_map
382 };
383 
384 static const struct seq_operations proc_tid_maps_op = {
385 	.start	= m_start,
386 	.next	= m_next,
387 	.stop	= m_stop,
388 	.show	= show_tid_map
389 };
390 
391 static int pid_maps_open(struct inode *inode, struct file *file)
392 {
393 	return do_maps_open(inode, file, &proc_pid_maps_op);
394 }
395 
396 static int tid_maps_open(struct inode *inode, struct file *file)
397 {
398 	return do_maps_open(inode, file, &proc_tid_maps_op);
399 }
400 
401 const struct file_operations proc_pid_maps_operations = {
402 	.open		= pid_maps_open,
403 	.read		= seq_read,
404 	.llseek		= seq_lseek,
405 	.release	= proc_map_release,
406 };
407 
408 const struct file_operations proc_tid_maps_operations = {
409 	.open		= tid_maps_open,
410 	.read		= seq_read,
411 	.llseek		= seq_lseek,
412 	.release	= proc_map_release,
413 };
414 
415 /*
416  * Proportional Set Size(PSS): my share of RSS.
417  *
418  * PSS of a process is the count of pages it has in memory, where each
419  * page is divided by the number of processes sharing it.  So if a
420  * process has 1000 pages all to itself, and 1000 shared with one other
421  * process, its PSS will be 1500.
422  *
423  * To keep (accumulated) division errors low, we adopt a 64bit
424  * fixed-point pss counter to minimize division errors. So (pss >>
425  * PSS_SHIFT) would be the real byte count.
426  *
427  * A shift of 12 before division means (assuming 4K page size):
428  * 	- 1M 3-user-pages add up to 8KB errors;
429  * 	- supports mapcount up to 2^24, or 16M;
430  * 	- supports PSS up to 2^52 bytes, or 4PB.
431  */
432 #define PSS_SHIFT 12
433 
434 #ifdef CONFIG_PROC_PAGE_MONITOR
435 struct mem_size_stats {
436 	struct vm_area_struct *vma;
437 	unsigned long resident;
438 	unsigned long shared_clean;
439 	unsigned long shared_dirty;
440 	unsigned long private_clean;
441 	unsigned long private_dirty;
442 	unsigned long referenced;
443 	unsigned long anonymous;
444 	unsigned long anonymous_thp;
445 	unsigned long swap;
446 	unsigned long nonlinear;
447 	u64 pss;
448 };
449 
450 
451 static void smaps_pte_entry(pte_t ptent, unsigned long addr,
452 		unsigned long ptent_size, struct mm_walk *walk)
453 {
454 	struct mem_size_stats *mss = walk->private;
455 	struct vm_area_struct *vma = mss->vma;
456 	pgoff_t pgoff = linear_page_index(vma, addr);
457 	struct page *page = NULL;
458 	int mapcount;
459 
460 	if (pte_present(ptent)) {
461 		page = vm_normal_page(vma, addr, ptent);
462 	} else if (is_swap_pte(ptent)) {
463 		swp_entry_t swpent = pte_to_swp_entry(ptent);
464 
465 		if (!non_swap_entry(swpent))
466 			mss->swap += ptent_size;
467 		else if (is_migration_entry(swpent))
468 			page = migration_entry_to_page(swpent);
469 	} else if (pte_file(ptent)) {
470 		if (pte_to_pgoff(ptent) != pgoff)
471 			mss->nonlinear += ptent_size;
472 	}
473 
474 	if (!page)
475 		return;
476 
477 	if (PageAnon(page))
478 		mss->anonymous += ptent_size;
479 
480 	if (page->index != pgoff)
481 		mss->nonlinear += ptent_size;
482 
483 	mss->resident += ptent_size;
484 	/* Accumulate the size in pages that have been accessed. */
485 	if (pte_young(ptent) || PageReferenced(page))
486 		mss->referenced += ptent_size;
487 	mapcount = page_mapcount(page);
488 	if (mapcount >= 2) {
489 		if (pte_dirty(ptent) || PageDirty(page))
490 			mss->shared_dirty += ptent_size;
491 		else
492 			mss->shared_clean += ptent_size;
493 		mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
494 	} else {
495 		if (pte_dirty(ptent) || PageDirty(page))
496 			mss->private_dirty += ptent_size;
497 		else
498 			mss->private_clean += ptent_size;
499 		mss->pss += (ptent_size << PSS_SHIFT);
500 	}
501 }
502 
503 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
504 			   struct mm_walk *walk)
505 {
506 	struct mem_size_stats *mss = walk->private;
507 	struct vm_area_struct *vma = mss->vma;
508 	pte_t *pte;
509 	spinlock_t *ptl;
510 
511 	if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
512 		smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
513 		spin_unlock(ptl);
514 		mss->anonymous_thp += HPAGE_PMD_SIZE;
515 		return 0;
516 	}
517 
518 	if (pmd_trans_unstable(pmd))
519 		return 0;
520 	/*
521 	 * The mmap_sem held all the way back in m_start() is what
522 	 * keeps khugepaged out of here and from collapsing things
523 	 * in here.
524 	 */
525 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
526 	for (; addr != end; pte++, addr += PAGE_SIZE)
527 		smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
528 	pte_unmap_unlock(pte - 1, ptl);
529 	cond_resched();
530 	return 0;
531 }
532 
533 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
534 {
535 	/*
536 	 * Don't forget to update Documentation/ on changes.
537 	 */
538 	static const char mnemonics[BITS_PER_LONG][2] = {
539 		/*
540 		 * In case if we meet a flag we don't know about.
541 		 */
542 		[0 ... (BITS_PER_LONG-1)] = "??",
543 
544 		[ilog2(VM_READ)]	= "rd",
545 		[ilog2(VM_WRITE)]	= "wr",
546 		[ilog2(VM_EXEC)]	= "ex",
547 		[ilog2(VM_SHARED)]	= "sh",
548 		[ilog2(VM_MAYREAD)]	= "mr",
549 		[ilog2(VM_MAYWRITE)]	= "mw",
550 		[ilog2(VM_MAYEXEC)]	= "me",
551 		[ilog2(VM_MAYSHARE)]	= "ms",
552 		[ilog2(VM_GROWSDOWN)]	= "gd",
553 		[ilog2(VM_PFNMAP)]	= "pf",
554 		[ilog2(VM_DENYWRITE)]	= "dw",
555 		[ilog2(VM_LOCKED)]	= "lo",
556 		[ilog2(VM_IO)]		= "io",
557 		[ilog2(VM_SEQ_READ)]	= "sr",
558 		[ilog2(VM_RAND_READ)]	= "rr",
559 		[ilog2(VM_DONTCOPY)]	= "dc",
560 		[ilog2(VM_DONTEXPAND)]	= "de",
561 		[ilog2(VM_ACCOUNT)]	= "ac",
562 		[ilog2(VM_NORESERVE)]	= "nr",
563 		[ilog2(VM_HUGETLB)]	= "ht",
564 		[ilog2(VM_NONLINEAR)]	= "nl",
565 		[ilog2(VM_ARCH_1)]	= "ar",
566 		[ilog2(VM_DONTDUMP)]	= "dd",
567 #ifdef CONFIG_MEM_SOFT_DIRTY
568 		[ilog2(VM_SOFTDIRTY)]	= "sd",
569 #endif
570 		[ilog2(VM_MIXEDMAP)]	= "mm",
571 		[ilog2(VM_HUGEPAGE)]	= "hg",
572 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
573 		[ilog2(VM_MERGEABLE)]	= "mg",
574 	};
575 	size_t i;
576 
577 	seq_puts(m, "VmFlags: ");
578 	for (i = 0; i < BITS_PER_LONG; i++) {
579 		if (vma->vm_flags & (1UL << i)) {
580 			seq_printf(m, "%c%c ",
581 				   mnemonics[i][0], mnemonics[i][1]);
582 		}
583 	}
584 	seq_putc(m, '\n');
585 }
586 
587 static int show_smap(struct seq_file *m, void *v, int is_pid)
588 {
589 	struct vm_area_struct *vma = v;
590 	struct mem_size_stats mss;
591 	struct mm_walk smaps_walk = {
592 		.pmd_entry = smaps_pte_range,
593 		.mm = vma->vm_mm,
594 		.private = &mss,
595 	};
596 
597 	memset(&mss, 0, sizeof mss);
598 	mss.vma = vma;
599 	/* mmap_sem is held in m_start */
600 	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
601 		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
602 
603 	show_map_vma(m, vma, is_pid);
604 
605 	seq_printf(m,
606 		   "Size:           %8lu kB\n"
607 		   "Rss:            %8lu kB\n"
608 		   "Pss:            %8lu kB\n"
609 		   "Shared_Clean:   %8lu kB\n"
610 		   "Shared_Dirty:   %8lu kB\n"
611 		   "Private_Clean:  %8lu kB\n"
612 		   "Private_Dirty:  %8lu kB\n"
613 		   "Referenced:     %8lu kB\n"
614 		   "Anonymous:      %8lu kB\n"
615 		   "AnonHugePages:  %8lu kB\n"
616 		   "Swap:           %8lu kB\n"
617 		   "KernelPageSize: %8lu kB\n"
618 		   "MMUPageSize:    %8lu kB\n"
619 		   "Locked:         %8lu kB\n",
620 		   (vma->vm_end - vma->vm_start) >> 10,
621 		   mss.resident >> 10,
622 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
623 		   mss.shared_clean  >> 10,
624 		   mss.shared_dirty  >> 10,
625 		   mss.private_clean >> 10,
626 		   mss.private_dirty >> 10,
627 		   mss.referenced >> 10,
628 		   mss.anonymous >> 10,
629 		   mss.anonymous_thp >> 10,
630 		   mss.swap >> 10,
631 		   vma_kernel_pagesize(vma) >> 10,
632 		   vma_mmu_pagesize(vma) >> 10,
633 		   (vma->vm_flags & VM_LOCKED) ?
634 			(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
635 
636 	if (vma->vm_flags & VM_NONLINEAR)
637 		seq_printf(m, "Nonlinear:      %8lu kB\n",
638 				mss.nonlinear >> 10);
639 
640 	show_smap_vma_flags(m, vma);
641 	m_cache_vma(m, vma);
642 	return 0;
643 }
644 
645 static int show_pid_smap(struct seq_file *m, void *v)
646 {
647 	return show_smap(m, v, 1);
648 }
649 
650 static int show_tid_smap(struct seq_file *m, void *v)
651 {
652 	return show_smap(m, v, 0);
653 }
654 
655 static const struct seq_operations proc_pid_smaps_op = {
656 	.start	= m_start,
657 	.next	= m_next,
658 	.stop	= m_stop,
659 	.show	= show_pid_smap
660 };
661 
662 static const struct seq_operations proc_tid_smaps_op = {
663 	.start	= m_start,
664 	.next	= m_next,
665 	.stop	= m_stop,
666 	.show	= show_tid_smap
667 };
668 
669 static int pid_smaps_open(struct inode *inode, struct file *file)
670 {
671 	return do_maps_open(inode, file, &proc_pid_smaps_op);
672 }
673 
674 static int tid_smaps_open(struct inode *inode, struct file *file)
675 {
676 	return do_maps_open(inode, file, &proc_tid_smaps_op);
677 }
678 
679 const struct file_operations proc_pid_smaps_operations = {
680 	.open		= pid_smaps_open,
681 	.read		= seq_read,
682 	.llseek		= seq_lseek,
683 	.release	= proc_map_release,
684 };
685 
686 const struct file_operations proc_tid_smaps_operations = {
687 	.open		= tid_smaps_open,
688 	.read		= seq_read,
689 	.llseek		= seq_lseek,
690 	.release	= proc_map_release,
691 };
692 
693 /*
694  * We do not want to have constant page-shift bits sitting in
695  * pagemap entries and are about to reuse them some time soon.
696  *
697  * Here's the "migration strategy":
698  * 1. when the system boots these bits remain what they are,
699  *    but a warning about future change is printed in log;
700  * 2. once anyone clears soft-dirty bits via clear_refs file,
701  *    these flag is set to denote, that user is aware of the
702  *    new API and those page-shift bits change their meaning.
703  *    The respective warning is printed in dmesg;
704  * 3. In a couple of releases we will remove all the mentions
705  *    of page-shift in pagemap entries.
706  */
707 
708 static bool soft_dirty_cleared __read_mostly;
709 
710 enum clear_refs_types {
711 	CLEAR_REFS_ALL = 1,
712 	CLEAR_REFS_ANON,
713 	CLEAR_REFS_MAPPED,
714 	CLEAR_REFS_SOFT_DIRTY,
715 	CLEAR_REFS_LAST,
716 };
717 
718 struct clear_refs_private {
719 	struct vm_area_struct *vma;
720 	enum clear_refs_types type;
721 };
722 
723 static inline void clear_soft_dirty(struct vm_area_struct *vma,
724 		unsigned long addr, pte_t *pte)
725 {
726 #ifdef CONFIG_MEM_SOFT_DIRTY
727 	/*
728 	 * The soft-dirty tracker uses #PF-s to catch writes
729 	 * to pages, so write-protect the pte as well. See the
730 	 * Documentation/vm/soft-dirty.txt for full description
731 	 * of how soft-dirty works.
732 	 */
733 	pte_t ptent = *pte;
734 
735 	if (pte_present(ptent)) {
736 		ptent = pte_wrprotect(ptent);
737 		ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
738 	} else if (is_swap_pte(ptent)) {
739 		ptent = pte_swp_clear_soft_dirty(ptent);
740 	} else if (pte_file(ptent)) {
741 		ptent = pte_file_clear_soft_dirty(ptent);
742 	}
743 
744 	set_pte_at(vma->vm_mm, addr, pte, ptent);
745 #endif
746 }
747 
748 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
749 				unsigned long end, struct mm_walk *walk)
750 {
751 	struct clear_refs_private *cp = walk->private;
752 	struct vm_area_struct *vma = cp->vma;
753 	pte_t *pte, ptent;
754 	spinlock_t *ptl;
755 	struct page *page;
756 
757 	split_huge_page_pmd(vma, addr, pmd);
758 	if (pmd_trans_unstable(pmd))
759 		return 0;
760 
761 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
762 	for (; addr != end; pte++, addr += PAGE_SIZE) {
763 		ptent = *pte;
764 
765 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
766 			clear_soft_dirty(vma, addr, pte);
767 			continue;
768 		}
769 
770 		if (!pte_present(ptent))
771 			continue;
772 
773 		page = vm_normal_page(vma, addr, ptent);
774 		if (!page)
775 			continue;
776 
777 		/* Clear accessed and referenced bits. */
778 		ptep_test_and_clear_young(vma, addr, pte);
779 		ClearPageReferenced(page);
780 	}
781 	pte_unmap_unlock(pte - 1, ptl);
782 	cond_resched();
783 	return 0;
784 }
785 
786 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
787 				size_t count, loff_t *ppos)
788 {
789 	struct task_struct *task;
790 	char buffer[PROC_NUMBUF];
791 	struct mm_struct *mm;
792 	struct vm_area_struct *vma;
793 	enum clear_refs_types type;
794 	int itype;
795 	int rv;
796 
797 	memset(buffer, 0, sizeof(buffer));
798 	if (count > sizeof(buffer) - 1)
799 		count = sizeof(buffer) - 1;
800 	if (copy_from_user(buffer, buf, count))
801 		return -EFAULT;
802 	rv = kstrtoint(strstrip(buffer), 10, &itype);
803 	if (rv < 0)
804 		return rv;
805 	type = (enum clear_refs_types)itype;
806 	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
807 		return -EINVAL;
808 
809 	if (type == CLEAR_REFS_SOFT_DIRTY) {
810 		soft_dirty_cleared = true;
811 		pr_warn_once("The pagemap bits 55-60 has changed their meaning!"
812 			     " See the linux/Documentation/vm/pagemap.txt for "
813 			     "details.\n");
814 	}
815 
816 	task = get_proc_task(file_inode(file));
817 	if (!task)
818 		return -ESRCH;
819 	mm = get_task_mm(task);
820 	if (mm) {
821 		struct clear_refs_private cp = {
822 			.type = type,
823 		};
824 		struct mm_walk clear_refs_walk = {
825 			.pmd_entry = clear_refs_pte_range,
826 			.mm = mm,
827 			.private = &cp,
828 		};
829 		down_read(&mm->mmap_sem);
830 		if (type == CLEAR_REFS_SOFT_DIRTY) {
831 			for (vma = mm->mmap; vma; vma = vma->vm_next) {
832 				if (!(vma->vm_flags & VM_SOFTDIRTY))
833 					continue;
834 				up_read(&mm->mmap_sem);
835 				down_write(&mm->mmap_sem);
836 				for (vma = mm->mmap; vma; vma = vma->vm_next) {
837 					vma->vm_flags &= ~VM_SOFTDIRTY;
838 					vma_set_page_prot(vma);
839 				}
840 				downgrade_write(&mm->mmap_sem);
841 				break;
842 			}
843 			mmu_notifier_invalidate_range_start(mm, 0, -1);
844 		}
845 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
846 			cp.vma = vma;
847 			if (is_vm_hugetlb_page(vma))
848 				continue;
849 			/*
850 			 * Writing 1 to /proc/pid/clear_refs affects all pages.
851 			 *
852 			 * Writing 2 to /proc/pid/clear_refs only affects
853 			 * Anonymous pages.
854 			 *
855 			 * Writing 3 to /proc/pid/clear_refs only affects file
856 			 * mapped pages.
857 			 *
858 			 * Writing 4 to /proc/pid/clear_refs affects all pages.
859 			 */
860 			if (type == CLEAR_REFS_ANON && vma->vm_file)
861 				continue;
862 			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
863 				continue;
864 			walk_page_range(vma->vm_start, vma->vm_end,
865 					&clear_refs_walk);
866 		}
867 		if (type == CLEAR_REFS_SOFT_DIRTY)
868 			mmu_notifier_invalidate_range_end(mm, 0, -1);
869 		flush_tlb_mm(mm);
870 		up_read(&mm->mmap_sem);
871 		mmput(mm);
872 	}
873 	put_task_struct(task);
874 
875 	return count;
876 }
877 
878 const struct file_operations proc_clear_refs_operations = {
879 	.write		= clear_refs_write,
880 	.llseek		= noop_llseek,
881 };
882 
883 typedef struct {
884 	u64 pme;
885 } pagemap_entry_t;
886 
887 struct pagemapread {
888 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
889 	pagemap_entry_t *buffer;
890 	bool v2;
891 };
892 
893 #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
894 #define PAGEMAP_WALK_MASK	(PMD_MASK)
895 
896 #define PM_ENTRY_BYTES      sizeof(pagemap_entry_t)
897 #define PM_STATUS_BITS      3
898 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
899 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
900 #define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
901 #define PM_PSHIFT_BITS      6
902 #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
903 #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
904 #define __PM_PSHIFT(x)      (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
905 #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
906 #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
907 /* in "new" pagemap pshift bits are occupied with more status bits */
908 #define PM_STATUS2(v2, x)   (__PM_PSHIFT(v2 ? x : PAGE_SHIFT))
909 
910 #define __PM_SOFT_DIRTY      (1LL)
911 #define PM_PRESENT          PM_STATUS(4LL)
912 #define PM_SWAP             PM_STATUS(2LL)
913 #define PM_FILE             PM_STATUS(1LL)
914 #define PM_NOT_PRESENT(v2)  PM_STATUS2(v2, 0)
915 #define PM_END_OF_BUFFER    1
916 
917 static inline pagemap_entry_t make_pme(u64 val)
918 {
919 	return (pagemap_entry_t) { .pme = val };
920 }
921 
922 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
923 			  struct pagemapread *pm)
924 {
925 	pm->buffer[pm->pos++] = *pme;
926 	if (pm->pos >= pm->len)
927 		return PM_END_OF_BUFFER;
928 	return 0;
929 }
930 
931 static int pagemap_pte_hole(unsigned long start, unsigned long end,
932 				struct mm_walk *walk)
933 {
934 	struct pagemapread *pm = walk->private;
935 	unsigned long addr = start;
936 	int err = 0;
937 
938 	while (addr < end) {
939 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
940 		pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
941 		/* End of address space hole, which we mark as non-present. */
942 		unsigned long hole_end;
943 
944 		if (vma)
945 			hole_end = min(end, vma->vm_start);
946 		else
947 			hole_end = end;
948 
949 		for (; addr < hole_end; addr += PAGE_SIZE) {
950 			err = add_to_pagemap(addr, &pme, pm);
951 			if (err)
952 				goto out;
953 		}
954 
955 		if (!vma)
956 			break;
957 
958 		/* Addresses in the VMA. */
959 		if (vma->vm_flags & VM_SOFTDIRTY)
960 			pme.pme |= PM_STATUS2(pm->v2, __PM_SOFT_DIRTY);
961 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
962 			err = add_to_pagemap(addr, &pme, pm);
963 			if (err)
964 				goto out;
965 		}
966 	}
967 out:
968 	return err;
969 }
970 
971 static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
972 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
973 {
974 	u64 frame, flags;
975 	struct page *page = NULL;
976 	int flags2 = 0;
977 
978 	if (pte_present(pte)) {
979 		frame = pte_pfn(pte);
980 		flags = PM_PRESENT;
981 		page = vm_normal_page(vma, addr, pte);
982 		if (pte_soft_dirty(pte))
983 			flags2 |= __PM_SOFT_DIRTY;
984 	} else if (is_swap_pte(pte)) {
985 		swp_entry_t entry;
986 		if (pte_swp_soft_dirty(pte))
987 			flags2 |= __PM_SOFT_DIRTY;
988 		entry = pte_to_swp_entry(pte);
989 		frame = swp_type(entry) |
990 			(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
991 		flags = PM_SWAP;
992 		if (is_migration_entry(entry))
993 			page = migration_entry_to_page(entry);
994 	} else {
995 		if (vma->vm_flags & VM_SOFTDIRTY)
996 			flags2 |= __PM_SOFT_DIRTY;
997 		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
998 		return;
999 	}
1000 
1001 	if (page && !PageAnon(page))
1002 		flags |= PM_FILE;
1003 	if ((vma->vm_flags & VM_SOFTDIRTY))
1004 		flags2 |= __PM_SOFT_DIRTY;
1005 
1006 	*pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags);
1007 }
1008 
1009 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1010 static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1011 		pmd_t pmd, int offset, int pmd_flags2)
1012 {
1013 	/*
1014 	 * Currently pmd for thp is always present because thp can not be
1015 	 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
1016 	 * This if-check is just to prepare for future implementation.
1017 	 */
1018 	if (pmd_present(pmd))
1019 		*pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
1020 				| PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT);
1021 	else
1022 		*pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, pmd_flags2));
1023 }
1024 #else
1025 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1026 		pmd_t pmd, int offset, int pmd_flags2)
1027 {
1028 }
1029 #endif
1030 
1031 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
1032 			     struct mm_walk *walk)
1033 {
1034 	struct vm_area_struct *vma;
1035 	struct pagemapread *pm = walk->private;
1036 	spinlock_t *ptl;
1037 	pte_t *pte;
1038 	int err = 0;
1039 
1040 	/* find the first VMA at or above 'addr' */
1041 	vma = find_vma(walk->mm, addr);
1042 	if (vma && pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1043 		int pmd_flags2;
1044 
1045 		if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(*pmd))
1046 			pmd_flags2 = __PM_SOFT_DIRTY;
1047 		else
1048 			pmd_flags2 = 0;
1049 
1050 		for (; addr != end; addr += PAGE_SIZE) {
1051 			unsigned long offset;
1052 			pagemap_entry_t pme;
1053 
1054 			offset = (addr & ~PAGEMAP_WALK_MASK) >>
1055 					PAGE_SHIFT;
1056 			thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2);
1057 			err = add_to_pagemap(addr, &pme, pm);
1058 			if (err)
1059 				break;
1060 		}
1061 		spin_unlock(ptl);
1062 		return err;
1063 	}
1064 
1065 	if (pmd_trans_unstable(pmd))
1066 		return 0;
1067 
1068 	while (1) {
1069 		/* End of address space hole, which we mark as non-present. */
1070 		unsigned long hole_end;
1071 
1072 		if (vma)
1073 			hole_end = min(end, vma->vm_start);
1074 		else
1075 			hole_end = end;
1076 
1077 		for (; addr < hole_end; addr += PAGE_SIZE) {
1078 			pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
1079 
1080 			err = add_to_pagemap(addr, &pme, pm);
1081 			if (err)
1082 				return err;
1083 		}
1084 
1085 		if (!vma || vma->vm_start >= end)
1086 			break;
1087 		/*
1088 		 * We can't possibly be in a hugetlb VMA. In general,
1089 		 * for a mm_walk with a pmd_entry and a hugetlb_entry,
1090 		 * the pmd_entry can only be called on addresses in a
1091 		 * hugetlb if the walk starts in a non-hugetlb VMA and
1092 		 * spans a hugepage VMA. Since pagemap_read walks are
1093 		 * PMD-sized and PMD-aligned, this will never be true.
1094 		 */
1095 		BUG_ON(is_vm_hugetlb_page(vma));
1096 
1097 		/* Addresses in the VMA. */
1098 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1099 			pagemap_entry_t pme;
1100 			pte = pte_offset_map(pmd, addr);
1101 			pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
1102 			pte_unmap(pte);
1103 			err = add_to_pagemap(addr, &pme, pm);
1104 			if (err)
1105 				return err;
1106 		}
1107 
1108 		if (addr == end)
1109 			break;
1110 
1111 		vma = find_vma(walk->mm, addr);
1112 	}
1113 
1114 	cond_resched();
1115 
1116 	return err;
1117 }
1118 
1119 #ifdef CONFIG_HUGETLB_PAGE
1120 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
1121 					pte_t pte, int offset, int flags2)
1122 {
1123 	if (pte_present(pte))
1124 		*pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)	|
1125 				PM_STATUS2(pm->v2, flags2)		|
1126 				PM_PRESENT);
1127 	else
1128 		*pme = make_pme(PM_NOT_PRESENT(pm->v2)			|
1129 				PM_STATUS2(pm->v2, flags2));
1130 }
1131 
1132 /* This function walks within one hugetlb entry in the single call */
1133 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
1134 				 unsigned long addr, unsigned long end,
1135 				 struct mm_walk *walk)
1136 {
1137 	struct pagemapread *pm = walk->private;
1138 	struct vm_area_struct *vma;
1139 	int err = 0;
1140 	int flags2;
1141 	pagemap_entry_t pme;
1142 
1143 	vma = find_vma(walk->mm, addr);
1144 	WARN_ON_ONCE(!vma);
1145 
1146 	if (vma && (vma->vm_flags & VM_SOFTDIRTY))
1147 		flags2 = __PM_SOFT_DIRTY;
1148 	else
1149 		flags2 = 0;
1150 
1151 	for (; addr != end; addr += PAGE_SIZE) {
1152 		int offset = (addr & ~hmask) >> PAGE_SHIFT;
1153 		huge_pte_to_pagemap_entry(&pme, pm, *pte, offset, flags2);
1154 		err = add_to_pagemap(addr, &pme, pm);
1155 		if (err)
1156 			return err;
1157 	}
1158 
1159 	cond_resched();
1160 
1161 	return err;
1162 }
1163 #endif /* HUGETLB_PAGE */
1164 
1165 /*
1166  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1167  *
1168  * For each page in the address space, this file contains one 64-bit entry
1169  * consisting of the following:
1170  *
1171  * Bits 0-54  page frame number (PFN) if present
1172  * Bits 0-4   swap type if swapped
1173  * Bits 5-54  swap offset if swapped
1174  * Bits 55-60 page shift (page size = 1<<page shift)
1175  * Bit  61    page is file-page or shared-anon
1176  * Bit  62    page swapped
1177  * Bit  63    page present
1178  *
1179  * If the page is not present but in swap, then the PFN contains an
1180  * encoding of the swap file number and the page's offset into the
1181  * swap. Unmapped pages return a null PFN. This allows determining
1182  * precisely which pages are mapped (or in swap) and comparing mapped
1183  * pages between processes.
1184  *
1185  * Efficient users of this interface will use /proc/pid/maps to
1186  * determine which areas of memory are actually mapped and llseek to
1187  * skip over unmapped regions.
1188  */
1189 static ssize_t pagemap_read(struct file *file, char __user *buf,
1190 			    size_t count, loff_t *ppos)
1191 {
1192 	struct task_struct *task = get_proc_task(file_inode(file));
1193 	struct mm_struct *mm;
1194 	struct pagemapread pm;
1195 	int ret = -ESRCH;
1196 	struct mm_walk pagemap_walk = {};
1197 	unsigned long src;
1198 	unsigned long svpfn;
1199 	unsigned long start_vaddr;
1200 	unsigned long end_vaddr;
1201 	int copied = 0;
1202 
1203 	if (!task)
1204 		goto out;
1205 
1206 	ret = -EINVAL;
1207 	/* file position must be aligned */
1208 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1209 		goto out_task;
1210 
1211 	ret = 0;
1212 	if (!count)
1213 		goto out_task;
1214 
1215 	pm.v2 = soft_dirty_cleared;
1216 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1217 	pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
1218 	ret = -ENOMEM;
1219 	if (!pm.buffer)
1220 		goto out_task;
1221 
1222 	mm = mm_access(task, PTRACE_MODE_READ);
1223 	ret = PTR_ERR(mm);
1224 	if (!mm || IS_ERR(mm))
1225 		goto out_free;
1226 
1227 	pagemap_walk.pmd_entry = pagemap_pte_range;
1228 	pagemap_walk.pte_hole = pagemap_pte_hole;
1229 #ifdef CONFIG_HUGETLB_PAGE
1230 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
1231 #endif
1232 	pagemap_walk.mm = mm;
1233 	pagemap_walk.private = &pm;
1234 
1235 	src = *ppos;
1236 	svpfn = src / PM_ENTRY_BYTES;
1237 	start_vaddr = svpfn << PAGE_SHIFT;
1238 	end_vaddr = TASK_SIZE_OF(task);
1239 
1240 	/* watch out for wraparound */
1241 	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1242 		start_vaddr = end_vaddr;
1243 
1244 	/*
1245 	 * The odds are that this will stop walking way
1246 	 * before end_vaddr, because the length of the
1247 	 * user buffer is tracked in "pm", and the walk
1248 	 * will stop when we hit the end of the buffer.
1249 	 */
1250 	ret = 0;
1251 	while (count && (start_vaddr < end_vaddr)) {
1252 		int len;
1253 		unsigned long end;
1254 
1255 		pm.pos = 0;
1256 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1257 		/* overflow ? */
1258 		if (end < start_vaddr || end > end_vaddr)
1259 			end = end_vaddr;
1260 		down_read(&mm->mmap_sem);
1261 		ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1262 		up_read(&mm->mmap_sem);
1263 		start_vaddr = end;
1264 
1265 		len = min(count, PM_ENTRY_BYTES * pm.pos);
1266 		if (copy_to_user(buf, pm.buffer, len)) {
1267 			ret = -EFAULT;
1268 			goto out_mm;
1269 		}
1270 		copied += len;
1271 		buf += len;
1272 		count -= len;
1273 	}
1274 	*ppos += copied;
1275 	if (!ret || ret == PM_END_OF_BUFFER)
1276 		ret = copied;
1277 
1278 out_mm:
1279 	mmput(mm);
1280 out_free:
1281 	kfree(pm.buffer);
1282 out_task:
1283 	put_task_struct(task);
1284 out:
1285 	return ret;
1286 }
1287 
1288 static int pagemap_open(struct inode *inode, struct file *file)
1289 {
1290 	pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about "
1291 			"to stop being page-shift some time soon. See the "
1292 			"linux/Documentation/vm/pagemap.txt for details.\n");
1293 	return 0;
1294 }
1295 
1296 const struct file_operations proc_pagemap_operations = {
1297 	.llseek		= mem_lseek, /* borrow this */
1298 	.read		= pagemap_read,
1299 	.open		= pagemap_open,
1300 };
1301 #endif /* CONFIG_PROC_PAGE_MONITOR */
1302 
1303 #ifdef CONFIG_NUMA
1304 
1305 struct numa_maps {
1306 	struct vm_area_struct *vma;
1307 	unsigned long pages;
1308 	unsigned long anon;
1309 	unsigned long active;
1310 	unsigned long writeback;
1311 	unsigned long mapcount_max;
1312 	unsigned long dirty;
1313 	unsigned long swapcache;
1314 	unsigned long node[MAX_NUMNODES];
1315 };
1316 
1317 struct numa_maps_private {
1318 	struct proc_maps_private proc_maps;
1319 	struct numa_maps md;
1320 };
1321 
1322 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1323 			unsigned long nr_pages)
1324 {
1325 	int count = page_mapcount(page);
1326 
1327 	md->pages += nr_pages;
1328 	if (pte_dirty || PageDirty(page))
1329 		md->dirty += nr_pages;
1330 
1331 	if (PageSwapCache(page))
1332 		md->swapcache += nr_pages;
1333 
1334 	if (PageActive(page) || PageUnevictable(page))
1335 		md->active += nr_pages;
1336 
1337 	if (PageWriteback(page))
1338 		md->writeback += nr_pages;
1339 
1340 	if (PageAnon(page))
1341 		md->anon += nr_pages;
1342 
1343 	if (count > md->mapcount_max)
1344 		md->mapcount_max = count;
1345 
1346 	md->node[page_to_nid(page)] += nr_pages;
1347 }
1348 
1349 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1350 		unsigned long addr)
1351 {
1352 	struct page *page;
1353 	int nid;
1354 
1355 	if (!pte_present(pte))
1356 		return NULL;
1357 
1358 	page = vm_normal_page(vma, addr, pte);
1359 	if (!page)
1360 		return NULL;
1361 
1362 	if (PageReserved(page))
1363 		return NULL;
1364 
1365 	nid = page_to_nid(page);
1366 	if (!node_isset(nid, node_states[N_MEMORY]))
1367 		return NULL;
1368 
1369 	return page;
1370 }
1371 
1372 static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1373 		unsigned long end, struct mm_walk *walk)
1374 {
1375 	struct numa_maps *md;
1376 	spinlock_t *ptl;
1377 	pte_t *orig_pte;
1378 	pte_t *pte;
1379 
1380 	md = walk->private;
1381 
1382 	if (pmd_trans_huge_lock(pmd, md->vma, &ptl) == 1) {
1383 		pte_t huge_pte = *(pte_t *)pmd;
1384 		struct page *page;
1385 
1386 		page = can_gather_numa_stats(huge_pte, md->vma, addr);
1387 		if (page)
1388 			gather_stats(page, md, pte_dirty(huge_pte),
1389 				     HPAGE_PMD_SIZE/PAGE_SIZE);
1390 		spin_unlock(ptl);
1391 		return 0;
1392 	}
1393 
1394 	if (pmd_trans_unstable(pmd))
1395 		return 0;
1396 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1397 	do {
1398 		struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
1399 		if (!page)
1400 			continue;
1401 		gather_stats(page, md, pte_dirty(*pte), 1);
1402 
1403 	} while (pte++, addr += PAGE_SIZE, addr != end);
1404 	pte_unmap_unlock(orig_pte, ptl);
1405 	return 0;
1406 }
1407 #ifdef CONFIG_HUGETLB_PAGE
1408 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1409 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1410 {
1411 	struct numa_maps *md;
1412 	struct page *page;
1413 
1414 	if (!pte_present(*pte))
1415 		return 0;
1416 
1417 	page = pte_page(*pte);
1418 	if (!page)
1419 		return 0;
1420 
1421 	md = walk->private;
1422 	gather_stats(page, md, pte_dirty(*pte), 1);
1423 	return 0;
1424 }
1425 
1426 #else
1427 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1428 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1429 {
1430 	return 0;
1431 }
1432 #endif
1433 
1434 /*
1435  * Display pages allocated per node and memory policy via /proc.
1436  */
1437 static int show_numa_map(struct seq_file *m, void *v, int is_pid)
1438 {
1439 	struct numa_maps_private *numa_priv = m->private;
1440 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1441 	struct vm_area_struct *vma = v;
1442 	struct numa_maps *md = &numa_priv->md;
1443 	struct file *file = vma->vm_file;
1444 	struct mm_struct *mm = vma->vm_mm;
1445 	struct mm_walk walk = {};
1446 	struct mempolicy *pol;
1447 	char buffer[64];
1448 	int nid;
1449 
1450 	if (!mm)
1451 		return 0;
1452 
1453 	/* Ensure we start with an empty set of numa_maps statistics. */
1454 	memset(md, 0, sizeof(*md));
1455 
1456 	md->vma = vma;
1457 
1458 	walk.hugetlb_entry = gather_hugetbl_stats;
1459 	walk.pmd_entry = gather_pte_stats;
1460 	walk.private = md;
1461 	walk.mm = mm;
1462 
1463 	pol = __get_vma_policy(vma, vma->vm_start);
1464 	if (pol) {
1465 		mpol_to_str(buffer, sizeof(buffer), pol);
1466 		mpol_cond_put(pol);
1467 	} else {
1468 		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1469 	}
1470 
1471 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1472 
1473 	if (file) {
1474 		seq_puts(m, " file=");
1475 		seq_path(m, &file->f_path, "\n\t= ");
1476 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1477 		seq_puts(m, " heap");
1478 	} else {
1479 		pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
1480 		if (tid != 0) {
1481 			/*
1482 			 * Thread stack in /proc/PID/task/TID/maps or
1483 			 * the main process stack.
1484 			 */
1485 			if (!is_pid || (vma->vm_start <= mm->start_stack &&
1486 			    vma->vm_end >= mm->start_stack))
1487 				seq_puts(m, " stack");
1488 			else
1489 				seq_printf(m, " stack:%d", tid);
1490 		}
1491 	}
1492 
1493 	if (is_vm_hugetlb_page(vma))
1494 		seq_puts(m, " huge");
1495 
1496 	walk_page_range(vma->vm_start, vma->vm_end, &walk);
1497 
1498 	if (!md->pages)
1499 		goto out;
1500 
1501 	if (md->anon)
1502 		seq_printf(m, " anon=%lu", md->anon);
1503 
1504 	if (md->dirty)
1505 		seq_printf(m, " dirty=%lu", md->dirty);
1506 
1507 	if (md->pages != md->anon && md->pages != md->dirty)
1508 		seq_printf(m, " mapped=%lu", md->pages);
1509 
1510 	if (md->mapcount_max > 1)
1511 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
1512 
1513 	if (md->swapcache)
1514 		seq_printf(m, " swapcache=%lu", md->swapcache);
1515 
1516 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1517 		seq_printf(m, " active=%lu", md->active);
1518 
1519 	if (md->writeback)
1520 		seq_printf(m, " writeback=%lu", md->writeback);
1521 
1522 	for_each_node_state(nid, N_MEMORY)
1523 		if (md->node[nid])
1524 			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
1525 out:
1526 	seq_putc(m, '\n');
1527 	m_cache_vma(m, vma);
1528 	return 0;
1529 }
1530 
1531 static int show_pid_numa_map(struct seq_file *m, void *v)
1532 {
1533 	return show_numa_map(m, v, 1);
1534 }
1535 
1536 static int show_tid_numa_map(struct seq_file *m, void *v)
1537 {
1538 	return show_numa_map(m, v, 0);
1539 }
1540 
1541 static const struct seq_operations proc_pid_numa_maps_op = {
1542 	.start  = m_start,
1543 	.next   = m_next,
1544 	.stop   = m_stop,
1545 	.show   = show_pid_numa_map,
1546 };
1547 
1548 static const struct seq_operations proc_tid_numa_maps_op = {
1549 	.start  = m_start,
1550 	.next   = m_next,
1551 	.stop   = m_stop,
1552 	.show   = show_tid_numa_map,
1553 };
1554 
1555 static int numa_maps_open(struct inode *inode, struct file *file,
1556 			  const struct seq_operations *ops)
1557 {
1558 	return proc_maps_open(inode, file, ops,
1559 				sizeof(struct numa_maps_private));
1560 }
1561 
1562 static int pid_numa_maps_open(struct inode *inode, struct file *file)
1563 {
1564 	return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1565 }
1566 
1567 static int tid_numa_maps_open(struct inode *inode, struct file *file)
1568 {
1569 	return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1570 }
1571 
1572 const struct file_operations proc_pid_numa_maps_operations = {
1573 	.open		= pid_numa_maps_open,
1574 	.read		= seq_read,
1575 	.llseek		= seq_lseek,
1576 	.release	= proc_map_release,
1577 };
1578 
1579 const struct file_operations proc_tid_numa_maps_operations = {
1580 	.open		= tid_numa_maps_open,
1581 	.read		= seq_read,
1582 	.llseek		= seq_lseek,
1583 	.release	= proc_map_release,
1584 };
1585 #endif /* CONFIG_NUMA */
1586