1 #include <linux/mm.h> 2 #include <linux/vmacache.h> 3 #include <linux/hugetlb.h> 4 #include <linux/huge_mm.h> 5 #include <linux/mount.h> 6 #include <linux/seq_file.h> 7 #include <linux/highmem.h> 8 #include <linux/ptrace.h> 9 #include <linux/slab.h> 10 #include <linux/pagemap.h> 11 #include <linux/mempolicy.h> 12 #include <linux/rmap.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/mmu_notifier.h> 16 #include <linux/page_idle.h> 17 #include <linux/shmem_fs.h> 18 19 #include <asm/elf.h> 20 #include <asm/uaccess.h> 21 #include <asm/tlbflush.h> 22 #include "internal.h" 23 24 void task_mem(struct seq_file *m, struct mm_struct *mm) 25 { 26 unsigned long text, lib, swap, ptes, pmds, anon, file, shmem; 27 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 28 29 anon = get_mm_counter(mm, MM_ANONPAGES); 30 file = get_mm_counter(mm, MM_FILEPAGES); 31 shmem = get_mm_counter(mm, MM_SHMEMPAGES); 32 33 /* 34 * Note: to minimize their overhead, mm maintains hiwater_vm and 35 * hiwater_rss only when about to *lower* total_vm or rss. Any 36 * collector of these hiwater stats must therefore get total_vm 37 * and rss too, which will usually be the higher. Barriers? not 38 * worth the effort, such snapshots can always be inconsistent. 39 */ 40 hiwater_vm = total_vm = mm->total_vm; 41 if (hiwater_vm < mm->hiwater_vm) 42 hiwater_vm = mm->hiwater_vm; 43 hiwater_rss = total_rss = anon + file + shmem; 44 if (hiwater_rss < mm->hiwater_rss) 45 hiwater_rss = mm->hiwater_rss; 46 47 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 48 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 49 swap = get_mm_counter(mm, MM_SWAPENTS); 50 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); 51 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm); 52 seq_printf(m, 53 "VmPeak:\t%8lu kB\n" 54 "VmSize:\t%8lu kB\n" 55 "VmLck:\t%8lu kB\n" 56 "VmPin:\t%8lu kB\n" 57 "VmHWM:\t%8lu kB\n" 58 "VmRSS:\t%8lu kB\n" 59 "RssAnon:\t%8lu kB\n" 60 "RssFile:\t%8lu kB\n" 61 "RssShmem:\t%8lu kB\n" 62 "VmData:\t%8lu kB\n" 63 "VmStk:\t%8lu kB\n" 64 "VmExe:\t%8lu kB\n" 65 "VmLib:\t%8lu kB\n" 66 "VmPTE:\t%8lu kB\n" 67 "VmPMD:\t%8lu kB\n" 68 "VmSwap:\t%8lu kB\n", 69 hiwater_vm << (PAGE_SHIFT-10), 70 total_vm << (PAGE_SHIFT-10), 71 mm->locked_vm << (PAGE_SHIFT-10), 72 mm->pinned_vm << (PAGE_SHIFT-10), 73 hiwater_rss << (PAGE_SHIFT-10), 74 total_rss << (PAGE_SHIFT-10), 75 anon << (PAGE_SHIFT-10), 76 file << (PAGE_SHIFT-10), 77 shmem << (PAGE_SHIFT-10), 78 mm->data_vm << (PAGE_SHIFT-10), 79 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 80 ptes >> 10, 81 pmds >> 10, 82 swap << (PAGE_SHIFT-10)); 83 hugetlb_report_usage(m, mm); 84 } 85 86 unsigned long task_vsize(struct mm_struct *mm) 87 { 88 return PAGE_SIZE * mm->total_vm; 89 } 90 91 unsigned long task_statm(struct mm_struct *mm, 92 unsigned long *shared, unsigned long *text, 93 unsigned long *data, unsigned long *resident) 94 { 95 *shared = get_mm_counter(mm, MM_FILEPAGES) + 96 get_mm_counter(mm, MM_SHMEMPAGES); 97 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 98 >> PAGE_SHIFT; 99 *data = mm->data_vm + mm->stack_vm; 100 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 101 return mm->total_vm; 102 } 103 104 #ifdef CONFIG_NUMA 105 /* 106 * Save get_task_policy() for show_numa_map(). 107 */ 108 static void hold_task_mempolicy(struct proc_maps_private *priv) 109 { 110 struct task_struct *task = priv->task; 111 112 task_lock(task); 113 priv->task_mempolicy = get_task_policy(task); 114 mpol_get(priv->task_mempolicy); 115 task_unlock(task); 116 } 117 static void release_task_mempolicy(struct proc_maps_private *priv) 118 { 119 mpol_put(priv->task_mempolicy); 120 } 121 #else 122 static void hold_task_mempolicy(struct proc_maps_private *priv) 123 { 124 } 125 static void release_task_mempolicy(struct proc_maps_private *priv) 126 { 127 } 128 #endif 129 130 static void vma_stop(struct proc_maps_private *priv) 131 { 132 struct mm_struct *mm = priv->mm; 133 134 release_task_mempolicy(priv); 135 up_read(&mm->mmap_sem); 136 mmput(mm); 137 } 138 139 static struct vm_area_struct * 140 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) 141 { 142 if (vma == priv->tail_vma) 143 return NULL; 144 return vma->vm_next ?: priv->tail_vma; 145 } 146 147 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) 148 { 149 if (m->count < m->size) /* vma is copied successfully */ 150 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL; 151 } 152 153 static void *m_start(struct seq_file *m, loff_t *ppos) 154 { 155 struct proc_maps_private *priv = m->private; 156 unsigned long last_addr = m->version; 157 struct mm_struct *mm; 158 struct vm_area_struct *vma; 159 unsigned int pos = *ppos; 160 161 /* See m_cache_vma(). Zero at the start or after lseek. */ 162 if (last_addr == -1UL) 163 return NULL; 164 165 priv->task = get_proc_task(priv->inode); 166 if (!priv->task) 167 return ERR_PTR(-ESRCH); 168 169 mm = priv->mm; 170 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) 171 return NULL; 172 173 down_read(&mm->mmap_sem); 174 hold_task_mempolicy(priv); 175 priv->tail_vma = get_gate_vma(mm); 176 177 if (last_addr) { 178 vma = find_vma(mm, last_addr); 179 if (vma && (vma = m_next_vma(priv, vma))) 180 return vma; 181 } 182 183 m->version = 0; 184 if (pos < mm->map_count) { 185 for (vma = mm->mmap; pos; pos--) { 186 m->version = vma->vm_start; 187 vma = vma->vm_next; 188 } 189 return vma; 190 } 191 192 /* we do not bother to update m->version in this case */ 193 if (pos == mm->map_count && priv->tail_vma) 194 return priv->tail_vma; 195 196 vma_stop(priv); 197 return NULL; 198 } 199 200 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 201 { 202 struct proc_maps_private *priv = m->private; 203 struct vm_area_struct *next; 204 205 (*pos)++; 206 next = m_next_vma(priv, v); 207 if (!next) 208 vma_stop(priv); 209 return next; 210 } 211 212 static void m_stop(struct seq_file *m, void *v) 213 { 214 struct proc_maps_private *priv = m->private; 215 216 if (!IS_ERR_OR_NULL(v)) 217 vma_stop(priv); 218 if (priv->task) { 219 put_task_struct(priv->task); 220 priv->task = NULL; 221 } 222 } 223 224 static int proc_maps_open(struct inode *inode, struct file *file, 225 const struct seq_operations *ops, int psize) 226 { 227 struct proc_maps_private *priv = __seq_open_private(file, ops, psize); 228 229 if (!priv) 230 return -ENOMEM; 231 232 priv->inode = inode; 233 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 234 if (IS_ERR(priv->mm)) { 235 int err = PTR_ERR(priv->mm); 236 237 seq_release_private(inode, file); 238 return err; 239 } 240 241 return 0; 242 } 243 244 static int proc_map_release(struct inode *inode, struct file *file) 245 { 246 struct seq_file *seq = file->private_data; 247 struct proc_maps_private *priv = seq->private; 248 249 if (priv->mm) 250 mmdrop(priv->mm); 251 252 return seq_release_private(inode, file); 253 } 254 255 static int do_maps_open(struct inode *inode, struct file *file, 256 const struct seq_operations *ops) 257 { 258 return proc_maps_open(inode, file, ops, 259 sizeof(struct proc_maps_private)); 260 } 261 262 static pid_t pid_of_stack(struct proc_maps_private *priv, 263 struct vm_area_struct *vma, bool is_pid) 264 { 265 struct inode *inode = priv->inode; 266 struct task_struct *task; 267 pid_t ret = 0; 268 269 rcu_read_lock(); 270 task = pid_task(proc_pid(inode), PIDTYPE_PID); 271 if (task) { 272 task = task_of_stack(task, vma, is_pid); 273 if (task) 274 ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); 275 } 276 rcu_read_unlock(); 277 278 return ret; 279 } 280 281 static void 282 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) 283 { 284 struct mm_struct *mm = vma->vm_mm; 285 struct file *file = vma->vm_file; 286 struct proc_maps_private *priv = m->private; 287 vm_flags_t flags = vma->vm_flags; 288 unsigned long ino = 0; 289 unsigned long long pgoff = 0; 290 unsigned long start, end; 291 dev_t dev = 0; 292 const char *name = NULL; 293 294 if (file) { 295 struct inode *inode = file_inode(vma->vm_file); 296 dev = inode->i_sb->s_dev; 297 ino = inode->i_ino; 298 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 299 } 300 301 /* We don't show the stack guard page in /proc/maps */ 302 start = vma->vm_start; 303 if (stack_guard_page_start(vma, start)) 304 start += PAGE_SIZE; 305 end = vma->vm_end; 306 if (stack_guard_page_end(vma, end)) 307 end -= PAGE_SIZE; 308 309 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 310 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", 311 start, 312 end, 313 flags & VM_READ ? 'r' : '-', 314 flags & VM_WRITE ? 'w' : '-', 315 flags & VM_EXEC ? 'x' : '-', 316 flags & VM_MAYSHARE ? 's' : 'p', 317 pgoff, 318 MAJOR(dev), MINOR(dev), ino); 319 320 /* 321 * Print the dentry name for named mappings, and a 322 * special [heap] marker for the heap: 323 */ 324 if (file) { 325 seq_pad(m, ' '); 326 seq_file_path(m, file, "\n"); 327 goto done; 328 } 329 330 if (vma->vm_ops && vma->vm_ops->name) { 331 name = vma->vm_ops->name(vma); 332 if (name) 333 goto done; 334 } 335 336 name = arch_vma_name(vma); 337 if (!name) { 338 pid_t tid; 339 340 if (!mm) { 341 name = "[vdso]"; 342 goto done; 343 } 344 345 if (vma->vm_start <= mm->brk && 346 vma->vm_end >= mm->start_brk) { 347 name = "[heap]"; 348 goto done; 349 } 350 351 tid = pid_of_stack(priv, vma, is_pid); 352 if (tid != 0) { 353 /* 354 * Thread stack in /proc/PID/task/TID/maps or 355 * the main process stack. 356 */ 357 if (!is_pid || (vma->vm_start <= mm->start_stack && 358 vma->vm_end >= mm->start_stack)) { 359 name = "[stack]"; 360 } else { 361 /* Thread stack in /proc/PID/maps */ 362 seq_pad(m, ' '); 363 seq_printf(m, "[stack:%d]", tid); 364 } 365 } 366 } 367 368 done: 369 if (name) { 370 seq_pad(m, ' '); 371 seq_puts(m, name); 372 } 373 seq_putc(m, '\n'); 374 } 375 376 static int show_map(struct seq_file *m, void *v, int is_pid) 377 { 378 show_map_vma(m, v, is_pid); 379 m_cache_vma(m, v); 380 return 0; 381 } 382 383 static int show_pid_map(struct seq_file *m, void *v) 384 { 385 return show_map(m, v, 1); 386 } 387 388 static int show_tid_map(struct seq_file *m, void *v) 389 { 390 return show_map(m, v, 0); 391 } 392 393 static const struct seq_operations proc_pid_maps_op = { 394 .start = m_start, 395 .next = m_next, 396 .stop = m_stop, 397 .show = show_pid_map 398 }; 399 400 static const struct seq_operations proc_tid_maps_op = { 401 .start = m_start, 402 .next = m_next, 403 .stop = m_stop, 404 .show = show_tid_map 405 }; 406 407 static int pid_maps_open(struct inode *inode, struct file *file) 408 { 409 return do_maps_open(inode, file, &proc_pid_maps_op); 410 } 411 412 static int tid_maps_open(struct inode *inode, struct file *file) 413 { 414 return do_maps_open(inode, file, &proc_tid_maps_op); 415 } 416 417 const struct file_operations proc_pid_maps_operations = { 418 .open = pid_maps_open, 419 .read = seq_read, 420 .llseek = seq_lseek, 421 .release = proc_map_release, 422 }; 423 424 const struct file_operations proc_tid_maps_operations = { 425 .open = tid_maps_open, 426 .read = seq_read, 427 .llseek = seq_lseek, 428 .release = proc_map_release, 429 }; 430 431 /* 432 * Proportional Set Size(PSS): my share of RSS. 433 * 434 * PSS of a process is the count of pages it has in memory, where each 435 * page is divided by the number of processes sharing it. So if a 436 * process has 1000 pages all to itself, and 1000 shared with one other 437 * process, its PSS will be 1500. 438 * 439 * To keep (accumulated) division errors low, we adopt a 64bit 440 * fixed-point pss counter to minimize division errors. So (pss >> 441 * PSS_SHIFT) would be the real byte count. 442 * 443 * A shift of 12 before division means (assuming 4K page size): 444 * - 1M 3-user-pages add up to 8KB errors; 445 * - supports mapcount up to 2^24, or 16M; 446 * - supports PSS up to 2^52 bytes, or 4PB. 447 */ 448 #define PSS_SHIFT 12 449 450 #ifdef CONFIG_PROC_PAGE_MONITOR 451 struct mem_size_stats { 452 unsigned long resident; 453 unsigned long shared_clean; 454 unsigned long shared_dirty; 455 unsigned long private_clean; 456 unsigned long private_dirty; 457 unsigned long referenced; 458 unsigned long anonymous; 459 unsigned long anonymous_thp; 460 unsigned long swap; 461 unsigned long shared_hugetlb; 462 unsigned long private_hugetlb; 463 u64 pss; 464 u64 swap_pss; 465 bool check_shmem_swap; 466 }; 467 468 static void smaps_account(struct mem_size_stats *mss, struct page *page, 469 bool compound, bool young, bool dirty) 470 { 471 int i, nr = compound ? 1 << compound_order(page) : 1; 472 unsigned long size = nr * PAGE_SIZE; 473 474 if (PageAnon(page)) 475 mss->anonymous += size; 476 477 mss->resident += size; 478 /* Accumulate the size in pages that have been accessed. */ 479 if (young || page_is_young(page) || PageReferenced(page)) 480 mss->referenced += size; 481 482 /* 483 * page_count(page) == 1 guarantees the page is mapped exactly once. 484 * If any subpage of the compound page mapped with PTE it would elevate 485 * page_count(). 486 */ 487 if (page_count(page) == 1) { 488 if (dirty || PageDirty(page)) 489 mss->private_dirty += size; 490 else 491 mss->private_clean += size; 492 mss->pss += (u64)size << PSS_SHIFT; 493 return; 494 } 495 496 for (i = 0; i < nr; i++, page++) { 497 int mapcount = page_mapcount(page); 498 499 if (mapcount >= 2) { 500 if (dirty || PageDirty(page)) 501 mss->shared_dirty += PAGE_SIZE; 502 else 503 mss->shared_clean += PAGE_SIZE; 504 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 505 } else { 506 if (dirty || PageDirty(page)) 507 mss->private_dirty += PAGE_SIZE; 508 else 509 mss->private_clean += PAGE_SIZE; 510 mss->pss += PAGE_SIZE << PSS_SHIFT; 511 } 512 } 513 } 514 515 #ifdef CONFIG_SHMEM 516 static int smaps_pte_hole(unsigned long addr, unsigned long end, 517 struct mm_walk *walk) 518 { 519 struct mem_size_stats *mss = walk->private; 520 521 mss->swap += shmem_partial_swap_usage( 522 walk->vma->vm_file->f_mapping, addr, end); 523 524 return 0; 525 } 526 #endif 527 528 static void smaps_pte_entry(pte_t *pte, unsigned long addr, 529 struct mm_walk *walk) 530 { 531 struct mem_size_stats *mss = walk->private; 532 struct vm_area_struct *vma = walk->vma; 533 struct page *page = NULL; 534 535 if (pte_present(*pte)) { 536 page = vm_normal_page(vma, addr, *pte); 537 } else if (is_swap_pte(*pte)) { 538 swp_entry_t swpent = pte_to_swp_entry(*pte); 539 540 if (!non_swap_entry(swpent)) { 541 int mapcount; 542 543 mss->swap += PAGE_SIZE; 544 mapcount = swp_swapcount(swpent); 545 if (mapcount >= 2) { 546 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; 547 548 do_div(pss_delta, mapcount); 549 mss->swap_pss += pss_delta; 550 } else { 551 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; 552 } 553 } else if (is_migration_entry(swpent)) 554 page = migration_entry_to_page(swpent); 555 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap 556 && pte_none(*pte))) { 557 page = find_get_entry(vma->vm_file->f_mapping, 558 linear_page_index(vma, addr)); 559 if (!page) 560 return; 561 562 if (radix_tree_exceptional_entry(page)) 563 mss->swap += PAGE_SIZE; 564 else 565 page_cache_release(page); 566 567 return; 568 } 569 570 if (!page) 571 return; 572 573 smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); 574 } 575 576 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 577 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 578 struct mm_walk *walk) 579 { 580 struct mem_size_stats *mss = walk->private; 581 struct vm_area_struct *vma = walk->vma; 582 struct page *page; 583 584 /* FOLL_DUMP will return -EFAULT on huge zero page */ 585 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); 586 if (IS_ERR_OR_NULL(page)) 587 return; 588 mss->anonymous_thp += HPAGE_PMD_SIZE; 589 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); 590 } 591 #else 592 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 593 struct mm_walk *walk) 594 { 595 } 596 #endif 597 598 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 599 struct mm_walk *walk) 600 { 601 struct vm_area_struct *vma = walk->vma; 602 pte_t *pte; 603 spinlock_t *ptl; 604 605 ptl = pmd_trans_huge_lock(pmd, vma); 606 if (ptl) { 607 smaps_pmd_entry(pmd, addr, walk); 608 spin_unlock(ptl); 609 return 0; 610 } 611 612 if (pmd_trans_unstable(pmd)) 613 return 0; 614 /* 615 * The mmap_sem held all the way back in m_start() is what 616 * keeps khugepaged out of here and from collapsing things 617 * in here. 618 */ 619 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 620 for (; addr != end; pte++, addr += PAGE_SIZE) 621 smaps_pte_entry(pte, addr, walk); 622 pte_unmap_unlock(pte - 1, ptl); 623 cond_resched(); 624 return 0; 625 } 626 627 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) 628 { 629 /* 630 * Don't forget to update Documentation/ on changes. 631 */ 632 static const char mnemonics[BITS_PER_LONG][2] = { 633 /* 634 * In case if we meet a flag we don't know about. 635 */ 636 [0 ... (BITS_PER_LONG-1)] = "??", 637 638 [ilog2(VM_READ)] = "rd", 639 [ilog2(VM_WRITE)] = "wr", 640 [ilog2(VM_EXEC)] = "ex", 641 [ilog2(VM_SHARED)] = "sh", 642 [ilog2(VM_MAYREAD)] = "mr", 643 [ilog2(VM_MAYWRITE)] = "mw", 644 [ilog2(VM_MAYEXEC)] = "me", 645 [ilog2(VM_MAYSHARE)] = "ms", 646 [ilog2(VM_GROWSDOWN)] = "gd", 647 [ilog2(VM_PFNMAP)] = "pf", 648 [ilog2(VM_DENYWRITE)] = "dw", 649 #ifdef CONFIG_X86_INTEL_MPX 650 [ilog2(VM_MPX)] = "mp", 651 #endif 652 [ilog2(VM_LOCKED)] = "lo", 653 [ilog2(VM_IO)] = "io", 654 [ilog2(VM_SEQ_READ)] = "sr", 655 [ilog2(VM_RAND_READ)] = "rr", 656 [ilog2(VM_DONTCOPY)] = "dc", 657 [ilog2(VM_DONTEXPAND)] = "de", 658 [ilog2(VM_ACCOUNT)] = "ac", 659 [ilog2(VM_NORESERVE)] = "nr", 660 [ilog2(VM_HUGETLB)] = "ht", 661 [ilog2(VM_ARCH_1)] = "ar", 662 [ilog2(VM_DONTDUMP)] = "dd", 663 #ifdef CONFIG_MEM_SOFT_DIRTY 664 [ilog2(VM_SOFTDIRTY)] = "sd", 665 #endif 666 [ilog2(VM_MIXEDMAP)] = "mm", 667 [ilog2(VM_HUGEPAGE)] = "hg", 668 [ilog2(VM_NOHUGEPAGE)] = "nh", 669 [ilog2(VM_MERGEABLE)] = "mg", 670 [ilog2(VM_UFFD_MISSING)]= "um", 671 [ilog2(VM_UFFD_WP)] = "uw", 672 }; 673 size_t i; 674 675 seq_puts(m, "VmFlags: "); 676 for (i = 0; i < BITS_PER_LONG; i++) { 677 if (vma->vm_flags & (1UL << i)) { 678 seq_printf(m, "%c%c ", 679 mnemonics[i][0], mnemonics[i][1]); 680 } 681 } 682 seq_putc(m, '\n'); 683 } 684 685 #ifdef CONFIG_HUGETLB_PAGE 686 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, 687 unsigned long addr, unsigned long end, 688 struct mm_walk *walk) 689 { 690 struct mem_size_stats *mss = walk->private; 691 struct vm_area_struct *vma = walk->vma; 692 struct page *page = NULL; 693 694 if (pte_present(*pte)) { 695 page = vm_normal_page(vma, addr, *pte); 696 } else if (is_swap_pte(*pte)) { 697 swp_entry_t swpent = pte_to_swp_entry(*pte); 698 699 if (is_migration_entry(swpent)) 700 page = migration_entry_to_page(swpent); 701 } 702 if (page) { 703 int mapcount = page_mapcount(page); 704 705 if (mapcount >= 2) 706 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); 707 else 708 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); 709 } 710 return 0; 711 } 712 #endif /* HUGETLB_PAGE */ 713 714 static int show_smap(struct seq_file *m, void *v, int is_pid) 715 { 716 struct vm_area_struct *vma = v; 717 struct mem_size_stats mss; 718 struct mm_walk smaps_walk = { 719 .pmd_entry = smaps_pte_range, 720 #ifdef CONFIG_HUGETLB_PAGE 721 .hugetlb_entry = smaps_hugetlb_range, 722 #endif 723 .mm = vma->vm_mm, 724 .private = &mss, 725 }; 726 727 memset(&mss, 0, sizeof mss); 728 729 #ifdef CONFIG_SHMEM 730 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { 731 /* 732 * For shared or readonly shmem mappings we know that all 733 * swapped out pages belong to the shmem object, and we can 734 * obtain the swap value much more efficiently. For private 735 * writable mappings, we might have COW pages that are 736 * not affected by the parent swapped out pages of the shmem 737 * object, so we have to distinguish them during the page walk. 738 * Unless we know that the shmem object (or the part mapped by 739 * our VMA) has no swapped out pages at all. 740 */ 741 unsigned long shmem_swapped = shmem_swap_usage(vma); 742 743 if (!shmem_swapped || (vma->vm_flags & VM_SHARED) || 744 !(vma->vm_flags & VM_WRITE)) { 745 mss.swap = shmem_swapped; 746 } else { 747 mss.check_shmem_swap = true; 748 smaps_walk.pte_hole = smaps_pte_hole; 749 } 750 } 751 #endif 752 753 /* mmap_sem is held in m_start */ 754 walk_page_vma(vma, &smaps_walk); 755 756 show_map_vma(m, vma, is_pid); 757 758 seq_printf(m, 759 "Size: %8lu kB\n" 760 "Rss: %8lu kB\n" 761 "Pss: %8lu kB\n" 762 "Shared_Clean: %8lu kB\n" 763 "Shared_Dirty: %8lu kB\n" 764 "Private_Clean: %8lu kB\n" 765 "Private_Dirty: %8lu kB\n" 766 "Referenced: %8lu kB\n" 767 "Anonymous: %8lu kB\n" 768 "AnonHugePages: %8lu kB\n" 769 "Shared_Hugetlb: %8lu kB\n" 770 "Private_Hugetlb: %7lu kB\n" 771 "Swap: %8lu kB\n" 772 "SwapPss: %8lu kB\n" 773 "KernelPageSize: %8lu kB\n" 774 "MMUPageSize: %8lu kB\n" 775 "Locked: %8lu kB\n", 776 (vma->vm_end - vma->vm_start) >> 10, 777 mss.resident >> 10, 778 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), 779 mss.shared_clean >> 10, 780 mss.shared_dirty >> 10, 781 mss.private_clean >> 10, 782 mss.private_dirty >> 10, 783 mss.referenced >> 10, 784 mss.anonymous >> 10, 785 mss.anonymous_thp >> 10, 786 mss.shared_hugetlb >> 10, 787 mss.private_hugetlb >> 10, 788 mss.swap >> 10, 789 (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)), 790 vma_kernel_pagesize(vma) >> 10, 791 vma_mmu_pagesize(vma) >> 10, 792 (vma->vm_flags & VM_LOCKED) ? 793 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); 794 795 show_smap_vma_flags(m, vma); 796 m_cache_vma(m, vma); 797 return 0; 798 } 799 800 static int show_pid_smap(struct seq_file *m, void *v) 801 { 802 return show_smap(m, v, 1); 803 } 804 805 static int show_tid_smap(struct seq_file *m, void *v) 806 { 807 return show_smap(m, v, 0); 808 } 809 810 static const struct seq_operations proc_pid_smaps_op = { 811 .start = m_start, 812 .next = m_next, 813 .stop = m_stop, 814 .show = show_pid_smap 815 }; 816 817 static const struct seq_operations proc_tid_smaps_op = { 818 .start = m_start, 819 .next = m_next, 820 .stop = m_stop, 821 .show = show_tid_smap 822 }; 823 824 static int pid_smaps_open(struct inode *inode, struct file *file) 825 { 826 return do_maps_open(inode, file, &proc_pid_smaps_op); 827 } 828 829 static int tid_smaps_open(struct inode *inode, struct file *file) 830 { 831 return do_maps_open(inode, file, &proc_tid_smaps_op); 832 } 833 834 const struct file_operations proc_pid_smaps_operations = { 835 .open = pid_smaps_open, 836 .read = seq_read, 837 .llseek = seq_lseek, 838 .release = proc_map_release, 839 }; 840 841 const struct file_operations proc_tid_smaps_operations = { 842 .open = tid_smaps_open, 843 .read = seq_read, 844 .llseek = seq_lseek, 845 .release = proc_map_release, 846 }; 847 848 enum clear_refs_types { 849 CLEAR_REFS_ALL = 1, 850 CLEAR_REFS_ANON, 851 CLEAR_REFS_MAPPED, 852 CLEAR_REFS_SOFT_DIRTY, 853 CLEAR_REFS_MM_HIWATER_RSS, 854 CLEAR_REFS_LAST, 855 }; 856 857 struct clear_refs_private { 858 enum clear_refs_types type; 859 }; 860 861 #ifdef CONFIG_MEM_SOFT_DIRTY 862 static inline void clear_soft_dirty(struct vm_area_struct *vma, 863 unsigned long addr, pte_t *pte) 864 { 865 /* 866 * The soft-dirty tracker uses #PF-s to catch writes 867 * to pages, so write-protect the pte as well. See the 868 * Documentation/vm/soft-dirty.txt for full description 869 * of how soft-dirty works. 870 */ 871 pte_t ptent = *pte; 872 873 if (pte_present(ptent)) { 874 ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte); 875 ptent = pte_wrprotect(ptent); 876 ptent = pte_clear_soft_dirty(ptent); 877 ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent); 878 } else if (is_swap_pte(ptent)) { 879 ptent = pte_swp_clear_soft_dirty(ptent); 880 set_pte_at(vma->vm_mm, addr, pte, ptent); 881 } 882 } 883 #else 884 static inline void clear_soft_dirty(struct vm_area_struct *vma, 885 unsigned long addr, pte_t *pte) 886 { 887 } 888 #endif 889 890 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 891 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 892 unsigned long addr, pmd_t *pmdp) 893 { 894 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 895 896 pmd = pmd_wrprotect(pmd); 897 pmd = pmd_clear_soft_dirty(pmd); 898 899 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 900 } 901 #else 902 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 903 unsigned long addr, pmd_t *pmdp) 904 { 905 } 906 #endif 907 908 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 909 unsigned long end, struct mm_walk *walk) 910 { 911 struct clear_refs_private *cp = walk->private; 912 struct vm_area_struct *vma = walk->vma; 913 pte_t *pte, ptent; 914 spinlock_t *ptl; 915 struct page *page; 916 917 ptl = pmd_trans_huge_lock(pmd, vma); 918 if (ptl) { 919 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 920 clear_soft_dirty_pmd(vma, addr, pmd); 921 goto out; 922 } 923 924 page = pmd_page(*pmd); 925 926 /* Clear accessed and referenced bits. */ 927 pmdp_test_and_clear_young(vma, addr, pmd); 928 test_and_clear_page_young(page); 929 ClearPageReferenced(page); 930 out: 931 spin_unlock(ptl); 932 return 0; 933 } 934 935 if (pmd_trans_unstable(pmd)) 936 return 0; 937 938 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 939 for (; addr != end; pte++, addr += PAGE_SIZE) { 940 ptent = *pte; 941 942 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 943 clear_soft_dirty(vma, addr, pte); 944 continue; 945 } 946 947 if (!pte_present(ptent)) 948 continue; 949 950 page = vm_normal_page(vma, addr, ptent); 951 if (!page) 952 continue; 953 954 /* Clear accessed and referenced bits. */ 955 ptep_test_and_clear_young(vma, addr, pte); 956 test_and_clear_page_young(page); 957 ClearPageReferenced(page); 958 } 959 pte_unmap_unlock(pte - 1, ptl); 960 cond_resched(); 961 return 0; 962 } 963 964 static int clear_refs_test_walk(unsigned long start, unsigned long end, 965 struct mm_walk *walk) 966 { 967 struct clear_refs_private *cp = walk->private; 968 struct vm_area_struct *vma = walk->vma; 969 970 if (vma->vm_flags & VM_PFNMAP) 971 return 1; 972 973 /* 974 * Writing 1 to /proc/pid/clear_refs affects all pages. 975 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. 976 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. 977 * Writing 4 to /proc/pid/clear_refs affects all pages. 978 */ 979 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) 980 return 1; 981 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) 982 return 1; 983 return 0; 984 } 985 986 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 987 size_t count, loff_t *ppos) 988 { 989 struct task_struct *task; 990 char buffer[PROC_NUMBUF]; 991 struct mm_struct *mm; 992 struct vm_area_struct *vma; 993 enum clear_refs_types type; 994 int itype; 995 int rv; 996 997 memset(buffer, 0, sizeof(buffer)); 998 if (count > sizeof(buffer) - 1) 999 count = sizeof(buffer) - 1; 1000 if (copy_from_user(buffer, buf, count)) 1001 return -EFAULT; 1002 rv = kstrtoint(strstrip(buffer), 10, &itype); 1003 if (rv < 0) 1004 return rv; 1005 type = (enum clear_refs_types)itype; 1006 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) 1007 return -EINVAL; 1008 1009 task = get_proc_task(file_inode(file)); 1010 if (!task) 1011 return -ESRCH; 1012 mm = get_task_mm(task); 1013 if (mm) { 1014 struct clear_refs_private cp = { 1015 .type = type, 1016 }; 1017 struct mm_walk clear_refs_walk = { 1018 .pmd_entry = clear_refs_pte_range, 1019 .test_walk = clear_refs_test_walk, 1020 .mm = mm, 1021 .private = &cp, 1022 }; 1023 1024 if (type == CLEAR_REFS_MM_HIWATER_RSS) { 1025 /* 1026 * Writing 5 to /proc/pid/clear_refs resets the peak 1027 * resident set size to this mm's current rss value. 1028 */ 1029 down_write(&mm->mmap_sem); 1030 reset_mm_hiwater_rss(mm); 1031 up_write(&mm->mmap_sem); 1032 goto out_mm; 1033 } 1034 1035 down_read(&mm->mmap_sem); 1036 if (type == CLEAR_REFS_SOFT_DIRTY) { 1037 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1038 if (!(vma->vm_flags & VM_SOFTDIRTY)) 1039 continue; 1040 up_read(&mm->mmap_sem); 1041 down_write(&mm->mmap_sem); 1042 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1043 vma->vm_flags &= ~VM_SOFTDIRTY; 1044 vma_set_page_prot(vma); 1045 } 1046 downgrade_write(&mm->mmap_sem); 1047 break; 1048 } 1049 mmu_notifier_invalidate_range_start(mm, 0, -1); 1050 } 1051 walk_page_range(0, ~0UL, &clear_refs_walk); 1052 if (type == CLEAR_REFS_SOFT_DIRTY) 1053 mmu_notifier_invalidate_range_end(mm, 0, -1); 1054 flush_tlb_mm(mm); 1055 up_read(&mm->mmap_sem); 1056 out_mm: 1057 mmput(mm); 1058 } 1059 put_task_struct(task); 1060 1061 return count; 1062 } 1063 1064 const struct file_operations proc_clear_refs_operations = { 1065 .write = clear_refs_write, 1066 .llseek = noop_llseek, 1067 }; 1068 1069 typedef struct { 1070 u64 pme; 1071 } pagemap_entry_t; 1072 1073 struct pagemapread { 1074 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ 1075 pagemap_entry_t *buffer; 1076 bool show_pfn; 1077 }; 1078 1079 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 1080 #define PAGEMAP_WALK_MASK (PMD_MASK) 1081 1082 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) 1083 #define PM_PFRAME_BITS 55 1084 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) 1085 #define PM_SOFT_DIRTY BIT_ULL(55) 1086 #define PM_MMAP_EXCLUSIVE BIT_ULL(56) 1087 #define PM_FILE BIT_ULL(61) 1088 #define PM_SWAP BIT_ULL(62) 1089 #define PM_PRESENT BIT_ULL(63) 1090 1091 #define PM_END_OF_BUFFER 1 1092 1093 static inline pagemap_entry_t make_pme(u64 frame, u64 flags) 1094 { 1095 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; 1096 } 1097 1098 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, 1099 struct pagemapread *pm) 1100 { 1101 pm->buffer[pm->pos++] = *pme; 1102 if (pm->pos >= pm->len) 1103 return PM_END_OF_BUFFER; 1104 return 0; 1105 } 1106 1107 static int pagemap_pte_hole(unsigned long start, unsigned long end, 1108 struct mm_walk *walk) 1109 { 1110 struct pagemapread *pm = walk->private; 1111 unsigned long addr = start; 1112 int err = 0; 1113 1114 while (addr < end) { 1115 struct vm_area_struct *vma = find_vma(walk->mm, addr); 1116 pagemap_entry_t pme = make_pme(0, 0); 1117 /* End of address space hole, which we mark as non-present. */ 1118 unsigned long hole_end; 1119 1120 if (vma) 1121 hole_end = min(end, vma->vm_start); 1122 else 1123 hole_end = end; 1124 1125 for (; addr < hole_end; addr += PAGE_SIZE) { 1126 err = add_to_pagemap(addr, &pme, pm); 1127 if (err) 1128 goto out; 1129 } 1130 1131 if (!vma) 1132 break; 1133 1134 /* Addresses in the VMA. */ 1135 if (vma->vm_flags & VM_SOFTDIRTY) 1136 pme = make_pme(0, PM_SOFT_DIRTY); 1137 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { 1138 err = add_to_pagemap(addr, &pme, pm); 1139 if (err) 1140 goto out; 1141 } 1142 } 1143 out: 1144 return err; 1145 } 1146 1147 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, 1148 struct vm_area_struct *vma, unsigned long addr, pte_t pte) 1149 { 1150 u64 frame = 0, flags = 0; 1151 struct page *page = NULL; 1152 1153 if (pte_present(pte)) { 1154 if (pm->show_pfn) 1155 frame = pte_pfn(pte); 1156 flags |= PM_PRESENT; 1157 page = vm_normal_page(vma, addr, pte); 1158 if (pte_soft_dirty(pte)) 1159 flags |= PM_SOFT_DIRTY; 1160 } else if (is_swap_pte(pte)) { 1161 swp_entry_t entry; 1162 if (pte_swp_soft_dirty(pte)) 1163 flags |= PM_SOFT_DIRTY; 1164 entry = pte_to_swp_entry(pte); 1165 frame = swp_type(entry) | 1166 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 1167 flags |= PM_SWAP; 1168 if (is_migration_entry(entry)) 1169 page = migration_entry_to_page(entry); 1170 } 1171 1172 if (page && !PageAnon(page)) 1173 flags |= PM_FILE; 1174 if (page && page_mapcount(page) == 1) 1175 flags |= PM_MMAP_EXCLUSIVE; 1176 if (vma->vm_flags & VM_SOFTDIRTY) 1177 flags |= PM_SOFT_DIRTY; 1178 1179 return make_pme(frame, flags); 1180 } 1181 1182 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, 1183 struct mm_walk *walk) 1184 { 1185 struct vm_area_struct *vma = walk->vma; 1186 struct pagemapread *pm = walk->private; 1187 spinlock_t *ptl; 1188 pte_t *pte, *orig_pte; 1189 int err = 0; 1190 1191 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1192 ptl = pmd_trans_huge_lock(pmdp, vma); 1193 if (ptl) { 1194 u64 flags = 0, frame = 0; 1195 pmd_t pmd = *pmdp; 1196 1197 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd)) 1198 flags |= PM_SOFT_DIRTY; 1199 1200 /* 1201 * Currently pmd for thp is always present because thp 1202 * can not be swapped-out, migrated, or HWPOISONed 1203 * (split in such cases instead.) 1204 * This if-check is just to prepare for future implementation. 1205 */ 1206 if (pmd_present(pmd)) { 1207 struct page *page = pmd_page(pmd); 1208 1209 if (page_mapcount(page) == 1) 1210 flags |= PM_MMAP_EXCLUSIVE; 1211 1212 flags |= PM_PRESENT; 1213 if (pm->show_pfn) 1214 frame = pmd_pfn(pmd) + 1215 ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1216 } 1217 1218 for (; addr != end; addr += PAGE_SIZE) { 1219 pagemap_entry_t pme = make_pme(frame, flags); 1220 1221 err = add_to_pagemap(addr, &pme, pm); 1222 if (err) 1223 break; 1224 if (pm->show_pfn && (flags & PM_PRESENT)) 1225 frame++; 1226 } 1227 spin_unlock(ptl); 1228 return err; 1229 } 1230 1231 if (pmd_trans_unstable(pmdp)) 1232 return 0; 1233 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1234 1235 /* 1236 * We can assume that @vma always points to a valid one and @end never 1237 * goes beyond vma->vm_end. 1238 */ 1239 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); 1240 for (; addr < end; pte++, addr += PAGE_SIZE) { 1241 pagemap_entry_t pme; 1242 1243 pme = pte_to_pagemap_entry(pm, vma, addr, *pte); 1244 err = add_to_pagemap(addr, &pme, pm); 1245 if (err) 1246 break; 1247 } 1248 pte_unmap_unlock(orig_pte, ptl); 1249 1250 cond_resched(); 1251 1252 return err; 1253 } 1254 1255 #ifdef CONFIG_HUGETLB_PAGE 1256 /* This function walks within one hugetlb entry in the single call */ 1257 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, 1258 unsigned long addr, unsigned long end, 1259 struct mm_walk *walk) 1260 { 1261 struct pagemapread *pm = walk->private; 1262 struct vm_area_struct *vma = walk->vma; 1263 u64 flags = 0, frame = 0; 1264 int err = 0; 1265 pte_t pte; 1266 1267 if (vma->vm_flags & VM_SOFTDIRTY) 1268 flags |= PM_SOFT_DIRTY; 1269 1270 pte = huge_ptep_get(ptep); 1271 if (pte_present(pte)) { 1272 struct page *page = pte_page(pte); 1273 1274 if (!PageAnon(page)) 1275 flags |= PM_FILE; 1276 1277 if (page_mapcount(page) == 1) 1278 flags |= PM_MMAP_EXCLUSIVE; 1279 1280 flags |= PM_PRESENT; 1281 if (pm->show_pfn) 1282 frame = pte_pfn(pte) + 1283 ((addr & ~hmask) >> PAGE_SHIFT); 1284 } 1285 1286 for (; addr != end; addr += PAGE_SIZE) { 1287 pagemap_entry_t pme = make_pme(frame, flags); 1288 1289 err = add_to_pagemap(addr, &pme, pm); 1290 if (err) 1291 return err; 1292 if (pm->show_pfn && (flags & PM_PRESENT)) 1293 frame++; 1294 } 1295 1296 cond_resched(); 1297 1298 return err; 1299 } 1300 #endif /* HUGETLB_PAGE */ 1301 1302 /* 1303 * /proc/pid/pagemap - an array mapping virtual pages to pfns 1304 * 1305 * For each page in the address space, this file contains one 64-bit entry 1306 * consisting of the following: 1307 * 1308 * Bits 0-54 page frame number (PFN) if present 1309 * Bits 0-4 swap type if swapped 1310 * Bits 5-54 swap offset if swapped 1311 * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt) 1312 * Bit 56 page exclusively mapped 1313 * Bits 57-60 zero 1314 * Bit 61 page is file-page or shared-anon 1315 * Bit 62 page swapped 1316 * Bit 63 page present 1317 * 1318 * If the page is not present but in swap, then the PFN contains an 1319 * encoding of the swap file number and the page's offset into the 1320 * swap. Unmapped pages return a null PFN. This allows determining 1321 * precisely which pages are mapped (or in swap) and comparing mapped 1322 * pages between processes. 1323 * 1324 * Efficient users of this interface will use /proc/pid/maps to 1325 * determine which areas of memory are actually mapped and llseek to 1326 * skip over unmapped regions. 1327 */ 1328 static ssize_t pagemap_read(struct file *file, char __user *buf, 1329 size_t count, loff_t *ppos) 1330 { 1331 struct mm_struct *mm = file->private_data; 1332 struct pagemapread pm; 1333 struct mm_walk pagemap_walk = {}; 1334 unsigned long src; 1335 unsigned long svpfn; 1336 unsigned long start_vaddr; 1337 unsigned long end_vaddr; 1338 int ret = 0, copied = 0; 1339 1340 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) 1341 goto out; 1342 1343 ret = -EINVAL; 1344 /* file position must be aligned */ 1345 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 1346 goto out_mm; 1347 1348 ret = 0; 1349 if (!count) 1350 goto out_mm; 1351 1352 /* do not disclose physical addresses: attack vector */ 1353 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); 1354 1355 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1356 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); 1357 ret = -ENOMEM; 1358 if (!pm.buffer) 1359 goto out_mm; 1360 1361 pagemap_walk.pmd_entry = pagemap_pmd_range; 1362 pagemap_walk.pte_hole = pagemap_pte_hole; 1363 #ifdef CONFIG_HUGETLB_PAGE 1364 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 1365 #endif 1366 pagemap_walk.mm = mm; 1367 pagemap_walk.private = ± 1368 1369 src = *ppos; 1370 svpfn = src / PM_ENTRY_BYTES; 1371 start_vaddr = svpfn << PAGE_SHIFT; 1372 end_vaddr = mm->task_size; 1373 1374 /* watch out for wraparound */ 1375 if (svpfn > mm->task_size >> PAGE_SHIFT) 1376 start_vaddr = end_vaddr; 1377 1378 /* 1379 * The odds are that this will stop walking way 1380 * before end_vaddr, because the length of the 1381 * user buffer is tracked in "pm", and the walk 1382 * will stop when we hit the end of the buffer. 1383 */ 1384 ret = 0; 1385 while (count && (start_vaddr < end_vaddr)) { 1386 int len; 1387 unsigned long end; 1388 1389 pm.pos = 0; 1390 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; 1391 /* overflow ? */ 1392 if (end < start_vaddr || end > end_vaddr) 1393 end = end_vaddr; 1394 down_read(&mm->mmap_sem); 1395 ret = walk_page_range(start_vaddr, end, &pagemap_walk); 1396 up_read(&mm->mmap_sem); 1397 start_vaddr = end; 1398 1399 len = min(count, PM_ENTRY_BYTES * pm.pos); 1400 if (copy_to_user(buf, pm.buffer, len)) { 1401 ret = -EFAULT; 1402 goto out_free; 1403 } 1404 copied += len; 1405 buf += len; 1406 count -= len; 1407 } 1408 *ppos += copied; 1409 if (!ret || ret == PM_END_OF_BUFFER) 1410 ret = copied; 1411 1412 out_free: 1413 kfree(pm.buffer); 1414 out_mm: 1415 mmput(mm); 1416 out: 1417 return ret; 1418 } 1419 1420 static int pagemap_open(struct inode *inode, struct file *file) 1421 { 1422 struct mm_struct *mm; 1423 1424 mm = proc_mem_open(inode, PTRACE_MODE_READ); 1425 if (IS_ERR(mm)) 1426 return PTR_ERR(mm); 1427 file->private_data = mm; 1428 return 0; 1429 } 1430 1431 static int pagemap_release(struct inode *inode, struct file *file) 1432 { 1433 struct mm_struct *mm = file->private_data; 1434 1435 if (mm) 1436 mmdrop(mm); 1437 return 0; 1438 } 1439 1440 const struct file_operations proc_pagemap_operations = { 1441 .llseek = mem_lseek, /* borrow this */ 1442 .read = pagemap_read, 1443 .open = pagemap_open, 1444 .release = pagemap_release, 1445 }; 1446 #endif /* CONFIG_PROC_PAGE_MONITOR */ 1447 1448 #ifdef CONFIG_NUMA 1449 1450 struct numa_maps { 1451 unsigned long pages; 1452 unsigned long anon; 1453 unsigned long active; 1454 unsigned long writeback; 1455 unsigned long mapcount_max; 1456 unsigned long dirty; 1457 unsigned long swapcache; 1458 unsigned long node[MAX_NUMNODES]; 1459 }; 1460 1461 struct numa_maps_private { 1462 struct proc_maps_private proc_maps; 1463 struct numa_maps md; 1464 }; 1465 1466 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, 1467 unsigned long nr_pages) 1468 { 1469 int count = page_mapcount(page); 1470 1471 md->pages += nr_pages; 1472 if (pte_dirty || PageDirty(page)) 1473 md->dirty += nr_pages; 1474 1475 if (PageSwapCache(page)) 1476 md->swapcache += nr_pages; 1477 1478 if (PageActive(page) || PageUnevictable(page)) 1479 md->active += nr_pages; 1480 1481 if (PageWriteback(page)) 1482 md->writeback += nr_pages; 1483 1484 if (PageAnon(page)) 1485 md->anon += nr_pages; 1486 1487 if (count > md->mapcount_max) 1488 md->mapcount_max = count; 1489 1490 md->node[page_to_nid(page)] += nr_pages; 1491 } 1492 1493 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, 1494 unsigned long addr) 1495 { 1496 struct page *page; 1497 int nid; 1498 1499 if (!pte_present(pte)) 1500 return NULL; 1501 1502 page = vm_normal_page(vma, addr, pte); 1503 if (!page) 1504 return NULL; 1505 1506 if (PageReserved(page)) 1507 return NULL; 1508 1509 nid = page_to_nid(page); 1510 if (!node_isset(nid, node_states[N_MEMORY])) 1511 return NULL; 1512 1513 return page; 1514 } 1515 1516 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1517 unsigned long end, struct mm_walk *walk) 1518 { 1519 struct numa_maps *md = walk->private; 1520 struct vm_area_struct *vma = walk->vma; 1521 spinlock_t *ptl; 1522 pte_t *orig_pte; 1523 pte_t *pte; 1524 1525 ptl = pmd_trans_huge_lock(pmd, vma); 1526 if (ptl) { 1527 pte_t huge_pte = *(pte_t *)pmd; 1528 struct page *page; 1529 1530 page = can_gather_numa_stats(huge_pte, vma, addr); 1531 if (page) 1532 gather_stats(page, md, pte_dirty(huge_pte), 1533 HPAGE_PMD_SIZE/PAGE_SIZE); 1534 spin_unlock(ptl); 1535 return 0; 1536 } 1537 1538 if (pmd_trans_unstable(pmd)) 1539 return 0; 1540 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1541 do { 1542 struct page *page = can_gather_numa_stats(*pte, vma, addr); 1543 if (!page) 1544 continue; 1545 gather_stats(page, md, pte_dirty(*pte), 1); 1546 1547 } while (pte++, addr += PAGE_SIZE, addr != end); 1548 pte_unmap_unlock(orig_pte, ptl); 1549 return 0; 1550 } 1551 #ifdef CONFIG_HUGETLB_PAGE 1552 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 1553 unsigned long addr, unsigned long end, struct mm_walk *walk) 1554 { 1555 struct numa_maps *md; 1556 struct page *page; 1557 1558 if (!pte_present(*pte)) 1559 return 0; 1560 1561 page = pte_page(*pte); 1562 if (!page) 1563 return 0; 1564 1565 md = walk->private; 1566 gather_stats(page, md, pte_dirty(*pte), 1); 1567 return 0; 1568 } 1569 1570 #else 1571 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 1572 unsigned long addr, unsigned long end, struct mm_walk *walk) 1573 { 1574 return 0; 1575 } 1576 #endif 1577 1578 /* 1579 * Display pages allocated per node and memory policy via /proc. 1580 */ 1581 static int show_numa_map(struct seq_file *m, void *v, int is_pid) 1582 { 1583 struct numa_maps_private *numa_priv = m->private; 1584 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; 1585 struct vm_area_struct *vma = v; 1586 struct numa_maps *md = &numa_priv->md; 1587 struct file *file = vma->vm_file; 1588 struct mm_struct *mm = vma->vm_mm; 1589 struct mm_walk walk = { 1590 .hugetlb_entry = gather_hugetlb_stats, 1591 .pmd_entry = gather_pte_stats, 1592 .private = md, 1593 .mm = mm, 1594 }; 1595 struct mempolicy *pol; 1596 char buffer[64]; 1597 int nid; 1598 1599 if (!mm) 1600 return 0; 1601 1602 /* Ensure we start with an empty set of numa_maps statistics. */ 1603 memset(md, 0, sizeof(*md)); 1604 1605 pol = __get_vma_policy(vma, vma->vm_start); 1606 if (pol) { 1607 mpol_to_str(buffer, sizeof(buffer), pol); 1608 mpol_cond_put(pol); 1609 } else { 1610 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); 1611 } 1612 1613 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1614 1615 if (file) { 1616 seq_puts(m, " file="); 1617 seq_file_path(m, file, "\n\t= "); 1618 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1619 seq_puts(m, " heap"); 1620 } else { 1621 pid_t tid = pid_of_stack(proc_priv, vma, is_pid); 1622 if (tid != 0) { 1623 /* 1624 * Thread stack in /proc/PID/task/TID/maps or 1625 * the main process stack. 1626 */ 1627 if (!is_pid || (vma->vm_start <= mm->start_stack && 1628 vma->vm_end >= mm->start_stack)) 1629 seq_puts(m, " stack"); 1630 else 1631 seq_printf(m, " stack:%d", tid); 1632 } 1633 } 1634 1635 if (is_vm_hugetlb_page(vma)) 1636 seq_puts(m, " huge"); 1637 1638 /* mmap_sem is held by m_start */ 1639 walk_page_vma(vma, &walk); 1640 1641 if (!md->pages) 1642 goto out; 1643 1644 if (md->anon) 1645 seq_printf(m, " anon=%lu", md->anon); 1646 1647 if (md->dirty) 1648 seq_printf(m, " dirty=%lu", md->dirty); 1649 1650 if (md->pages != md->anon && md->pages != md->dirty) 1651 seq_printf(m, " mapped=%lu", md->pages); 1652 1653 if (md->mapcount_max > 1) 1654 seq_printf(m, " mapmax=%lu", md->mapcount_max); 1655 1656 if (md->swapcache) 1657 seq_printf(m, " swapcache=%lu", md->swapcache); 1658 1659 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 1660 seq_printf(m, " active=%lu", md->active); 1661 1662 if (md->writeback) 1663 seq_printf(m, " writeback=%lu", md->writeback); 1664 1665 for_each_node_state(nid, N_MEMORY) 1666 if (md->node[nid]) 1667 seq_printf(m, " N%d=%lu", nid, md->node[nid]); 1668 1669 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); 1670 out: 1671 seq_putc(m, '\n'); 1672 m_cache_vma(m, vma); 1673 return 0; 1674 } 1675 1676 static int show_pid_numa_map(struct seq_file *m, void *v) 1677 { 1678 return show_numa_map(m, v, 1); 1679 } 1680 1681 static int show_tid_numa_map(struct seq_file *m, void *v) 1682 { 1683 return show_numa_map(m, v, 0); 1684 } 1685 1686 static const struct seq_operations proc_pid_numa_maps_op = { 1687 .start = m_start, 1688 .next = m_next, 1689 .stop = m_stop, 1690 .show = show_pid_numa_map, 1691 }; 1692 1693 static const struct seq_operations proc_tid_numa_maps_op = { 1694 .start = m_start, 1695 .next = m_next, 1696 .stop = m_stop, 1697 .show = show_tid_numa_map, 1698 }; 1699 1700 static int numa_maps_open(struct inode *inode, struct file *file, 1701 const struct seq_operations *ops) 1702 { 1703 return proc_maps_open(inode, file, ops, 1704 sizeof(struct numa_maps_private)); 1705 } 1706 1707 static int pid_numa_maps_open(struct inode *inode, struct file *file) 1708 { 1709 return numa_maps_open(inode, file, &proc_pid_numa_maps_op); 1710 } 1711 1712 static int tid_numa_maps_open(struct inode *inode, struct file *file) 1713 { 1714 return numa_maps_open(inode, file, &proc_tid_numa_maps_op); 1715 } 1716 1717 const struct file_operations proc_pid_numa_maps_operations = { 1718 .open = pid_numa_maps_open, 1719 .read = seq_read, 1720 .llseek = seq_lseek, 1721 .release = proc_map_release, 1722 }; 1723 1724 const struct file_operations proc_tid_numa_maps_operations = { 1725 .open = tid_numa_maps_open, 1726 .read = seq_read, 1727 .llseek = seq_lseek, 1728 .release = proc_map_release, 1729 }; 1730 #endif /* CONFIG_NUMA */ 1731