xref: /openbmc/linux/fs/proc/task_mmu.c (revision 7dd65feb)
1 #include <linux/mm.h>
2 #include <linux/hugetlb.h>
3 #include <linux/mount.h>
4 #include <linux/seq_file.h>
5 #include <linux/highmem.h>
6 #include <linux/ptrace.h>
7 #include <linux/pagemap.h>
8 #include <linux/mempolicy.h>
9 #include <linux/swap.h>
10 #include <linux/swapops.h>
11 
12 #include <asm/elf.h>
13 #include <asm/uaccess.h>
14 #include <asm/tlbflush.h>
15 #include "internal.h"
16 
17 void task_mem(struct seq_file *m, struct mm_struct *mm)
18 {
19 	unsigned long data, text, lib;
20 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
21 
22 	/*
23 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
24 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
25 	 * collector of these hiwater stats must therefore get total_vm
26 	 * and rss too, which will usually be the higher.  Barriers? not
27 	 * worth the effort, such snapshots can always be inconsistent.
28 	 */
29 	hiwater_vm = total_vm = mm->total_vm;
30 	if (hiwater_vm < mm->hiwater_vm)
31 		hiwater_vm = mm->hiwater_vm;
32 	hiwater_rss = total_rss = get_mm_rss(mm);
33 	if (hiwater_rss < mm->hiwater_rss)
34 		hiwater_rss = mm->hiwater_rss;
35 
36 	data = mm->total_vm - mm->shared_vm - mm->stack_vm;
37 	text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
38 	lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
39 	seq_printf(m,
40 		"VmPeak:\t%8lu kB\n"
41 		"VmSize:\t%8lu kB\n"
42 		"VmLck:\t%8lu kB\n"
43 		"VmHWM:\t%8lu kB\n"
44 		"VmRSS:\t%8lu kB\n"
45 		"VmData:\t%8lu kB\n"
46 		"VmStk:\t%8lu kB\n"
47 		"VmExe:\t%8lu kB\n"
48 		"VmLib:\t%8lu kB\n"
49 		"VmPTE:\t%8lu kB\n",
50 		hiwater_vm << (PAGE_SHIFT-10),
51 		(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
52 		mm->locked_vm << (PAGE_SHIFT-10),
53 		hiwater_rss << (PAGE_SHIFT-10),
54 		total_rss << (PAGE_SHIFT-10),
55 		data << (PAGE_SHIFT-10),
56 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
57 		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
58 }
59 
60 unsigned long task_vsize(struct mm_struct *mm)
61 {
62 	return PAGE_SIZE * mm->total_vm;
63 }
64 
65 int task_statm(struct mm_struct *mm, int *shared, int *text,
66 	       int *data, int *resident)
67 {
68 	*shared = get_mm_counter(mm, file_rss);
69 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
70 								>> PAGE_SHIFT;
71 	*data = mm->total_vm - mm->shared_vm;
72 	*resident = *shared + get_mm_counter(mm, anon_rss);
73 	return mm->total_vm;
74 }
75 
76 static void pad_len_spaces(struct seq_file *m, int len)
77 {
78 	len = 25 + sizeof(void*) * 6 - len;
79 	if (len < 1)
80 		len = 1;
81 	seq_printf(m, "%*c", len, ' ');
82 }
83 
84 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
85 {
86 	if (vma && vma != priv->tail_vma) {
87 		struct mm_struct *mm = vma->vm_mm;
88 		up_read(&mm->mmap_sem);
89 		mmput(mm);
90 	}
91 }
92 
93 static void *m_start(struct seq_file *m, loff_t *pos)
94 {
95 	struct proc_maps_private *priv = m->private;
96 	unsigned long last_addr = m->version;
97 	struct mm_struct *mm;
98 	struct vm_area_struct *vma, *tail_vma = NULL;
99 	loff_t l = *pos;
100 
101 	/* Clear the per syscall fields in priv */
102 	priv->task = NULL;
103 	priv->tail_vma = NULL;
104 
105 	/*
106 	 * We remember last_addr rather than next_addr to hit with
107 	 * mmap_cache most of the time. We have zero last_addr at
108 	 * the beginning and also after lseek. We will have -1 last_addr
109 	 * after the end of the vmas.
110 	 */
111 
112 	if (last_addr == -1UL)
113 		return NULL;
114 
115 	priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
116 	if (!priv->task)
117 		return NULL;
118 
119 	mm = mm_for_maps(priv->task);
120 	if (!mm)
121 		return NULL;
122 	down_read(&mm->mmap_sem);
123 
124 	tail_vma = get_gate_vma(priv->task);
125 	priv->tail_vma = tail_vma;
126 
127 	/* Start with last addr hint */
128 	vma = find_vma(mm, last_addr);
129 	if (last_addr && vma) {
130 		vma = vma->vm_next;
131 		goto out;
132 	}
133 
134 	/*
135 	 * Check the vma index is within the range and do
136 	 * sequential scan until m_index.
137 	 */
138 	vma = NULL;
139 	if ((unsigned long)l < mm->map_count) {
140 		vma = mm->mmap;
141 		while (l-- && vma)
142 			vma = vma->vm_next;
143 		goto out;
144 	}
145 
146 	if (l != mm->map_count)
147 		tail_vma = NULL; /* After gate vma */
148 
149 out:
150 	if (vma)
151 		return vma;
152 
153 	/* End of vmas has been reached */
154 	m->version = (tail_vma != NULL)? 0: -1UL;
155 	up_read(&mm->mmap_sem);
156 	mmput(mm);
157 	return tail_vma;
158 }
159 
160 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
161 {
162 	struct proc_maps_private *priv = m->private;
163 	struct vm_area_struct *vma = v;
164 	struct vm_area_struct *tail_vma = priv->tail_vma;
165 
166 	(*pos)++;
167 	if (vma && (vma != tail_vma) && vma->vm_next)
168 		return vma->vm_next;
169 	vma_stop(priv, vma);
170 	return (vma != tail_vma)? tail_vma: NULL;
171 }
172 
173 static void m_stop(struct seq_file *m, void *v)
174 {
175 	struct proc_maps_private *priv = m->private;
176 	struct vm_area_struct *vma = v;
177 
178 	vma_stop(priv, vma);
179 	if (priv->task)
180 		put_task_struct(priv->task);
181 }
182 
183 static int do_maps_open(struct inode *inode, struct file *file,
184 			const struct seq_operations *ops)
185 {
186 	struct proc_maps_private *priv;
187 	int ret = -ENOMEM;
188 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
189 	if (priv) {
190 		priv->pid = proc_pid(inode);
191 		ret = seq_open(file, ops);
192 		if (!ret) {
193 			struct seq_file *m = file->private_data;
194 			m->private = priv;
195 		} else {
196 			kfree(priv);
197 		}
198 	}
199 	return ret;
200 }
201 
202 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
203 {
204 	struct mm_struct *mm = vma->vm_mm;
205 	struct file *file = vma->vm_file;
206 	int flags = vma->vm_flags;
207 	unsigned long ino = 0;
208 	unsigned long long pgoff = 0;
209 	dev_t dev = 0;
210 	int len;
211 
212 	if (file) {
213 		struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
214 		dev = inode->i_sb->s_dev;
215 		ino = inode->i_ino;
216 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
217 	}
218 
219 	seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
220 			vma->vm_start,
221 			vma->vm_end,
222 			flags & VM_READ ? 'r' : '-',
223 			flags & VM_WRITE ? 'w' : '-',
224 			flags & VM_EXEC ? 'x' : '-',
225 			flags & VM_MAYSHARE ? 's' : 'p',
226 			pgoff,
227 			MAJOR(dev), MINOR(dev), ino, &len);
228 
229 	/*
230 	 * Print the dentry name for named mappings, and a
231 	 * special [heap] marker for the heap:
232 	 */
233 	if (file) {
234 		pad_len_spaces(m, len);
235 		seq_path(m, &file->f_path, "\n");
236 	} else {
237 		const char *name = arch_vma_name(vma);
238 		if (!name) {
239 			if (mm) {
240 				if (vma->vm_start <= mm->start_brk &&
241 						vma->vm_end >= mm->brk) {
242 					name = "[heap]";
243 				} else if (vma->vm_start <= mm->start_stack &&
244 					   vma->vm_end >= mm->start_stack) {
245 					name = "[stack]";
246 				} else {
247 					unsigned long stack_start;
248 					struct proc_maps_private *pmp;
249 
250 					pmp = m->private;
251 					stack_start = pmp->task->stack_start;
252 
253 					if (vma->vm_start <= stack_start &&
254 					    vma->vm_end >= stack_start) {
255 						pad_len_spaces(m, len);
256 						seq_printf(m,
257 						 "[threadstack:%08lx]",
258 #ifdef CONFIG_STACK_GROWSUP
259 						 vma->vm_end - stack_start
260 #else
261 						 stack_start - vma->vm_start
262 #endif
263 						);
264 					}
265 				}
266 			} else {
267 				name = "[vdso]";
268 			}
269 		}
270 		if (name) {
271 			pad_len_spaces(m, len);
272 			seq_puts(m, name);
273 		}
274 	}
275 	seq_putc(m, '\n');
276 }
277 
278 static int show_map(struct seq_file *m, void *v)
279 {
280 	struct vm_area_struct *vma = v;
281 	struct proc_maps_private *priv = m->private;
282 	struct task_struct *task = priv->task;
283 
284 	show_map_vma(m, vma);
285 
286 	if (m->count < m->size)  /* vma is copied successfully */
287 		m->version = (vma != get_gate_vma(task))? vma->vm_start: 0;
288 	return 0;
289 }
290 
291 static const struct seq_operations proc_pid_maps_op = {
292 	.start	= m_start,
293 	.next	= m_next,
294 	.stop	= m_stop,
295 	.show	= show_map
296 };
297 
298 static int maps_open(struct inode *inode, struct file *file)
299 {
300 	return do_maps_open(inode, file, &proc_pid_maps_op);
301 }
302 
303 const struct file_operations proc_maps_operations = {
304 	.open		= maps_open,
305 	.read		= seq_read,
306 	.llseek		= seq_lseek,
307 	.release	= seq_release_private,
308 };
309 
310 /*
311  * Proportional Set Size(PSS): my share of RSS.
312  *
313  * PSS of a process is the count of pages it has in memory, where each
314  * page is divided by the number of processes sharing it.  So if a
315  * process has 1000 pages all to itself, and 1000 shared with one other
316  * process, its PSS will be 1500.
317  *
318  * To keep (accumulated) division errors low, we adopt a 64bit
319  * fixed-point pss counter to minimize division errors. So (pss >>
320  * PSS_SHIFT) would be the real byte count.
321  *
322  * A shift of 12 before division means (assuming 4K page size):
323  * 	- 1M 3-user-pages add up to 8KB errors;
324  * 	- supports mapcount up to 2^24, or 16M;
325  * 	- supports PSS up to 2^52 bytes, or 4PB.
326  */
327 #define PSS_SHIFT 12
328 
329 #ifdef CONFIG_PROC_PAGE_MONITOR
330 struct mem_size_stats {
331 	struct vm_area_struct *vma;
332 	unsigned long resident;
333 	unsigned long shared_clean;
334 	unsigned long shared_dirty;
335 	unsigned long private_clean;
336 	unsigned long private_dirty;
337 	unsigned long referenced;
338 	unsigned long swap;
339 	u64 pss;
340 };
341 
342 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
343 			   struct mm_walk *walk)
344 {
345 	struct mem_size_stats *mss = walk->private;
346 	struct vm_area_struct *vma = mss->vma;
347 	pte_t *pte, ptent;
348 	spinlock_t *ptl;
349 	struct page *page;
350 	int mapcount;
351 
352 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
353 	for (; addr != end; pte++, addr += PAGE_SIZE) {
354 		ptent = *pte;
355 
356 		if (is_swap_pte(ptent)) {
357 			mss->swap += PAGE_SIZE;
358 			continue;
359 		}
360 
361 		if (!pte_present(ptent))
362 			continue;
363 
364 		mss->resident += PAGE_SIZE;
365 
366 		page = vm_normal_page(vma, addr, ptent);
367 		if (!page)
368 			continue;
369 
370 		/* Accumulate the size in pages that have been accessed. */
371 		if (pte_young(ptent) || PageReferenced(page))
372 			mss->referenced += PAGE_SIZE;
373 		mapcount = page_mapcount(page);
374 		if (mapcount >= 2) {
375 			if (pte_dirty(ptent))
376 				mss->shared_dirty += PAGE_SIZE;
377 			else
378 				mss->shared_clean += PAGE_SIZE;
379 			mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
380 		} else {
381 			if (pte_dirty(ptent))
382 				mss->private_dirty += PAGE_SIZE;
383 			else
384 				mss->private_clean += PAGE_SIZE;
385 			mss->pss += (PAGE_SIZE << PSS_SHIFT);
386 		}
387 	}
388 	pte_unmap_unlock(pte - 1, ptl);
389 	cond_resched();
390 	return 0;
391 }
392 
393 static int show_smap(struct seq_file *m, void *v)
394 {
395 	struct proc_maps_private *priv = m->private;
396 	struct task_struct *task = priv->task;
397 	struct vm_area_struct *vma = v;
398 	struct mem_size_stats mss;
399 	struct mm_walk smaps_walk = {
400 		.pmd_entry = smaps_pte_range,
401 		.mm = vma->vm_mm,
402 		.private = &mss,
403 	};
404 
405 	memset(&mss, 0, sizeof mss);
406 	mss.vma = vma;
407 	if (vma->vm_mm && !is_vm_hugetlb_page(vma))
408 		walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
409 
410 	show_map_vma(m, vma);
411 
412 	seq_printf(m,
413 		   "Size:           %8lu kB\n"
414 		   "Rss:            %8lu kB\n"
415 		   "Pss:            %8lu kB\n"
416 		   "Shared_Clean:   %8lu kB\n"
417 		   "Shared_Dirty:   %8lu kB\n"
418 		   "Private_Clean:  %8lu kB\n"
419 		   "Private_Dirty:  %8lu kB\n"
420 		   "Referenced:     %8lu kB\n"
421 		   "Swap:           %8lu kB\n"
422 		   "KernelPageSize: %8lu kB\n"
423 		   "MMUPageSize:    %8lu kB\n",
424 		   (vma->vm_end - vma->vm_start) >> 10,
425 		   mss.resident >> 10,
426 		   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
427 		   mss.shared_clean  >> 10,
428 		   mss.shared_dirty  >> 10,
429 		   mss.private_clean >> 10,
430 		   mss.private_dirty >> 10,
431 		   mss.referenced >> 10,
432 		   mss.swap >> 10,
433 		   vma_kernel_pagesize(vma) >> 10,
434 		   vma_mmu_pagesize(vma) >> 10);
435 
436 	if (m->count < m->size)  /* vma is copied successfully */
437 		m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
438 	return 0;
439 }
440 
441 static const struct seq_operations proc_pid_smaps_op = {
442 	.start	= m_start,
443 	.next	= m_next,
444 	.stop	= m_stop,
445 	.show	= show_smap
446 };
447 
448 static int smaps_open(struct inode *inode, struct file *file)
449 {
450 	return do_maps_open(inode, file, &proc_pid_smaps_op);
451 }
452 
453 const struct file_operations proc_smaps_operations = {
454 	.open		= smaps_open,
455 	.read		= seq_read,
456 	.llseek		= seq_lseek,
457 	.release	= seq_release_private,
458 };
459 
460 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
461 				unsigned long end, struct mm_walk *walk)
462 {
463 	struct vm_area_struct *vma = walk->private;
464 	pte_t *pte, ptent;
465 	spinlock_t *ptl;
466 	struct page *page;
467 
468 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
469 	for (; addr != end; pte++, addr += PAGE_SIZE) {
470 		ptent = *pte;
471 		if (!pte_present(ptent))
472 			continue;
473 
474 		page = vm_normal_page(vma, addr, ptent);
475 		if (!page)
476 			continue;
477 
478 		/* Clear accessed and referenced bits. */
479 		ptep_test_and_clear_young(vma, addr, pte);
480 		ClearPageReferenced(page);
481 	}
482 	pte_unmap_unlock(pte - 1, ptl);
483 	cond_resched();
484 	return 0;
485 }
486 
487 #define CLEAR_REFS_ALL 1
488 #define CLEAR_REFS_ANON 2
489 #define CLEAR_REFS_MAPPED 3
490 
491 static ssize_t clear_refs_write(struct file *file, const char __user *buf,
492 				size_t count, loff_t *ppos)
493 {
494 	struct task_struct *task;
495 	char buffer[PROC_NUMBUF];
496 	struct mm_struct *mm;
497 	struct vm_area_struct *vma;
498 	long type;
499 
500 	memset(buffer, 0, sizeof(buffer));
501 	if (count > sizeof(buffer) - 1)
502 		count = sizeof(buffer) - 1;
503 	if (copy_from_user(buffer, buf, count))
504 		return -EFAULT;
505 	if (strict_strtol(strstrip(buffer), 10, &type))
506 		return -EINVAL;
507 	if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
508 		return -EINVAL;
509 	task = get_proc_task(file->f_path.dentry->d_inode);
510 	if (!task)
511 		return -ESRCH;
512 	mm = get_task_mm(task);
513 	if (mm) {
514 		struct mm_walk clear_refs_walk = {
515 			.pmd_entry = clear_refs_pte_range,
516 			.mm = mm,
517 		};
518 		down_read(&mm->mmap_sem);
519 		for (vma = mm->mmap; vma; vma = vma->vm_next) {
520 			clear_refs_walk.private = vma;
521 			if (is_vm_hugetlb_page(vma))
522 				continue;
523 			/*
524 			 * Writing 1 to /proc/pid/clear_refs affects all pages.
525 			 *
526 			 * Writing 2 to /proc/pid/clear_refs only affects
527 			 * Anonymous pages.
528 			 *
529 			 * Writing 3 to /proc/pid/clear_refs only affects file
530 			 * mapped pages.
531 			 */
532 			if (type == CLEAR_REFS_ANON && vma->vm_file)
533 				continue;
534 			if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
535 				continue;
536 			walk_page_range(vma->vm_start, vma->vm_end,
537 					&clear_refs_walk);
538 		}
539 		flush_tlb_mm(mm);
540 		up_read(&mm->mmap_sem);
541 		mmput(mm);
542 	}
543 	put_task_struct(task);
544 
545 	return count;
546 }
547 
548 const struct file_operations proc_clear_refs_operations = {
549 	.write		= clear_refs_write,
550 };
551 
552 struct pagemapread {
553 	u64 __user *out, *end;
554 };
555 
556 #define PM_ENTRY_BYTES      sizeof(u64)
557 #define PM_STATUS_BITS      3
558 #define PM_STATUS_OFFSET    (64 - PM_STATUS_BITS)
559 #define PM_STATUS_MASK      (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
560 #define PM_STATUS(nr)       (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
561 #define PM_PSHIFT_BITS      6
562 #define PM_PSHIFT_OFFSET    (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
563 #define PM_PSHIFT_MASK      (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
564 #define PM_PSHIFT(x)        (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
565 #define PM_PFRAME_MASK      ((1LL << PM_PSHIFT_OFFSET) - 1)
566 #define PM_PFRAME(x)        ((x) & PM_PFRAME_MASK)
567 
568 #define PM_PRESENT          PM_STATUS(4LL)
569 #define PM_SWAP             PM_STATUS(2LL)
570 #define PM_NOT_PRESENT      PM_PSHIFT(PAGE_SHIFT)
571 #define PM_END_OF_BUFFER    1
572 
573 static int add_to_pagemap(unsigned long addr, u64 pfn,
574 			  struct pagemapread *pm)
575 {
576 	if (put_user(pfn, pm->out))
577 		return -EFAULT;
578 	pm->out++;
579 	if (pm->out >= pm->end)
580 		return PM_END_OF_BUFFER;
581 	return 0;
582 }
583 
584 static int pagemap_pte_hole(unsigned long start, unsigned long end,
585 				struct mm_walk *walk)
586 {
587 	struct pagemapread *pm = walk->private;
588 	unsigned long addr;
589 	int err = 0;
590 	for (addr = start; addr < end; addr += PAGE_SIZE) {
591 		err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
592 		if (err)
593 			break;
594 	}
595 	return err;
596 }
597 
598 static u64 swap_pte_to_pagemap_entry(pte_t pte)
599 {
600 	swp_entry_t e = pte_to_swp_entry(pte);
601 	return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
602 }
603 
604 static u64 pte_to_pagemap_entry(pte_t pte)
605 {
606 	u64 pme = 0;
607 	if (is_swap_pte(pte))
608 		pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
609 			| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
610 	else if (pte_present(pte))
611 		pme = PM_PFRAME(pte_pfn(pte))
612 			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
613 	return pme;
614 }
615 
616 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
617 			     struct mm_walk *walk)
618 {
619 	struct vm_area_struct *vma;
620 	struct pagemapread *pm = walk->private;
621 	pte_t *pte;
622 	int err = 0;
623 
624 	/* find the first VMA at or above 'addr' */
625 	vma = find_vma(walk->mm, addr);
626 	for (; addr != end; addr += PAGE_SIZE) {
627 		u64 pfn = PM_NOT_PRESENT;
628 
629 		/* check to see if we've left 'vma' behind
630 		 * and need a new, higher one */
631 		if (vma && (addr >= vma->vm_end))
632 			vma = find_vma(walk->mm, addr);
633 
634 		/* check that 'vma' actually covers this address,
635 		 * and that it isn't a huge page vma */
636 		if (vma && (vma->vm_start <= addr) &&
637 		    !is_vm_hugetlb_page(vma)) {
638 			pte = pte_offset_map(pmd, addr);
639 			pfn = pte_to_pagemap_entry(*pte);
640 			/* unmap before userspace copy */
641 			pte_unmap(pte);
642 		}
643 		err = add_to_pagemap(addr, pfn, pm);
644 		if (err)
645 			return err;
646 	}
647 
648 	cond_resched();
649 
650 	return err;
651 }
652 
653 static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
654 {
655 	u64 pme = 0;
656 	if (pte_present(pte))
657 		pme = PM_PFRAME(pte_pfn(pte) + offset)
658 			| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
659 	return pme;
660 }
661 
662 static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
663 				 unsigned long end, struct mm_walk *walk)
664 {
665 	struct vm_area_struct *vma;
666 	struct pagemapread *pm = walk->private;
667 	struct hstate *hs = NULL;
668 	int err = 0;
669 
670 	vma = find_vma(walk->mm, addr);
671 	if (vma)
672 		hs = hstate_vma(vma);
673 	for (; addr != end; addr += PAGE_SIZE) {
674 		u64 pfn = PM_NOT_PRESENT;
675 
676 		if (vma && (addr >= vma->vm_end)) {
677 			vma = find_vma(walk->mm, addr);
678 			if (vma)
679 				hs = hstate_vma(vma);
680 		}
681 
682 		if (vma && (vma->vm_start <= addr) && is_vm_hugetlb_page(vma)) {
683 			/* calculate pfn of the "raw" page in the hugepage. */
684 			int offset = (addr & ~huge_page_mask(hs)) >> PAGE_SHIFT;
685 			pfn = huge_pte_to_pagemap_entry(*pte, offset);
686 		}
687 		err = add_to_pagemap(addr, pfn, pm);
688 		if (err)
689 			return err;
690 	}
691 
692 	cond_resched();
693 
694 	return err;
695 }
696 
697 /*
698  * /proc/pid/pagemap - an array mapping virtual pages to pfns
699  *
700  * For each page in the address space, this file contains one 64-bit entry
701  * consisting of the following:
702  *
703  * Bits 0-55  page frame number (PFN) if present
704  * Bits 0-4   swap type if swapped
705  * Bits 5-55  swap offset if swapped
706  * Bits 55-60 page shift (page size = 1<<page shift)
707  * Bit  61    reserved for future use
708  * Bit  62    page swapped
709  * Bit  63    page present
710  *
711  * If the page is not present but in swap, then the PFN contains an
712  * encoding of the swap file number and the page's offset into the
713  * swap. Unmapped pages return a null PFN. This allows determining
714  * precisely which pages are mapped (or in swap) and comparing mapped
715  * pages between processes.
716  *
717  * Efficient users of this interface will use /proc/pid/maps to
718  * determine which areas of memory are actually mapped and llseek to
719  * skip over unmapped regions.
720  */
721 static ssize_t pagemap_read(struct file *file, char __user *buf,
722 			    size_t count, loff_t *ppos)
723 {
724 	struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
725 	struct page **pages, *page;
726 	unsigned long uaddr, uend;
727 	struct mm_struct *mm;
728 	struct pagemapread pm;
729 	int pagecount;
730 	int ret = -ESRCH;
731 	struct mm_walk pagemap_walk = {};
732 	unsigned long src;
733 	unsigned long svpfn;
734 	unsigned long start_vaddr;
735 	unsigned long end_vaddr;
736 
737 	if (!task)
738 		goto out;
739 
740 	ret = -EACCES;
741 	if (!ptrace_may_access(task, PTRACE_MODE_READ))
742 		goto out_task;
743 
744 	ret = -EINVAL;
745 	/* file position must be aligned */
746 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
747 		goto out_task;
748 
749 	ret = 0;
750 
751 	if (!count)
752 		goto out_task;
753 
754 	mm = get_task_mm(task);
755 	if (!mm)
756 		goto out_task;
757 
758 
759 	uaddr = (unsigned long)buf & PAGE_MASK;
760 	uend = (unsigned long)(buf + count);
761 	pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
762 	ret = 0;
763 	if (pagecount == 0)
764 		goto out_mm;
765 	pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
766 	ret = -ENOMEM;
767 	if (!pages)
768 		goto out_mm;
769 
770 	down_read(&current->mm->mmap_sem);
771 	ret = get_user_pages(current, current->mm, uaddr, pagecount,
772 			     1, 0, pages, NULL);
773 	up_read(&current->mm->mmap_sem);
774 
775 	if (ret < 0)
776 		goto out_free;
777 
778 	if (ret != pagecount) {
779 		pagecount = ret;
780 		ret = -EFAULT;
781 		goto out_pages;
782 	}
783 
784 	pm.out = (u64 __user *)buf;
785 	pm.end = (u64 __user *)(buf + count);
786 
787 	pagemap_walk.pmd_entry = pagemap_pte_range;
788 	pagemap_walk.pte_hole = pagemap_pte_hole;
789 	pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
790 	pagemap_walk.mm = mm;
791 	pagemap_walk.private = &pm;
792 
793 	src = *ppos;
794 	svpfn = src / PM_ENTRY_BYTES;
795 	start_vaddr = svpfn << PAGE_SHIFT;
796 	end_vaddr = TASK_SIZE_OF(task);
797 
798 	/* watch out for wraparound */
799 	if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
800 		start_vaddr = end_vaddr;
801 
802 	/*
803 	 * The odds are that this will stop walking way
804 	 * before end_vaddr, because the length of the
805 	 * user buffer is tracked in "pm", and the walk
806 	 * will stop when we hit the end of the buffer.
807 	 */
808 	ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk);
809 	if (ret == PM_END_OF_BUFFER)
810 		ret = 0;
811 	/* don't need mmap_sem for these, but this looks cleaner */
812 	*ppos += (char __user *)pm.out - buf;
813 	if (!ret)
814 		ret = (char __user *)pm.out - buf;
815 
816 out_pages:
817 	for (; pagecount; pagecount--) {
818 		page = pages[pagecount-1];
819 		if (!PageReserved(page))
820 			SetPageDirty(page);
821 		page_cache_release(page);
822 	}
823 out_free:
824 	kfree(pages);
825 out_mm:
826 	mmput(mm);
827 out_task:
828 	put_task_struct(task);
829 out:
830 	return ret;
831 }
832 
833 const struct file_operations proc_pagemap_operations = {
834 	.llseek		= mem_lseek, /* borrow this */
835 	.read		= pagemap_read,
836 };
837 #endif /* CONFIG_PROC_PAGE_MONITOR */
838 
839 #ifdef CONFIG_NUMA
840 extern int show_numa_map(struct seq_file *m, void *v);
841 
842 static const struct seq_operations proc_pid_numa_maps_op = {
843         .start  = m_start,
844         .next   = m_next,
845         .stop   = m_stop,
846         .show   = show_numa_map,
847 };
848 
849 static int numa_maps_open(struct inode *inode, struct file *file)
850 {
851 	return do_maps_open(inode, file, &proc_pid_numa_maps_op);
852 }
853 
854 const struct file_operations proc_numa_maps_operations = {
855 	.open		= numa_maps_open,
856 	.read		= seq_read,
857 	.llseek		= seq_lseek,
858 	.release	= seq_release_private,
859 };
860 #endif
861