1 #include <linux/mm.h> 2 #include <linux/vmacache.h> 3 #include <linux/hugetlb.h> 4 #include <linux/huge_mm.h> 5 #include <linux/mount.h> 6 #include <linux/seq_file.h> 7 #include <linux/highmem.h> 8 #include <linux/ptrace.h> 9 #include <linux/slab.h> 10 #include <linux/pagemap.h> 11 #include <linux/mempolicy.h> 12 #include <linux/rmap.h> 13 #include <linux/swap.h> 14 #include <linux/sched/mm.h> 15 #include <linux/swapops.h> 16 #include <linux/mmu_notifier.h> 17 #include <linux/page_idle.h> 18 #include <linux/shmem_fs.h> 19 #include <linux/uaccess.h> 20 21 #include <asm/elf.h> 22 #include <asm/tlb.h> 23 #include <asm/tlbflush.h> 24 #include "internal.h" 25 26 void task_mem(struct seq_file *m, struct mm_struct *mm) 27 { 28 unsigned long text, lib, swap, ptes, pmds, anon, file, shmem; 29 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 30 31 anon = get_mm_counter(mm, MM_ANONPAGES); 32 file = get_mm_counter(mm, MM_FILEPAGES); 33 shmem = get_mm_counter(mm, MM_SHMEMPAGES); 34 35 /* 36 * Note: to minimize their overhead, mm maintains hiwater_vm and 37 * hiwater_rss only when about to *lower* total_vm or rss. Any 38 * collector of these hiwater stats must therefore get total_vm 39 * and rss too, which will usually be the higher. Barriers? not 40 * worth the effort, such snapshots can always be inconsistent. 41 */ 42 hiwater_vm = total_vm = mm->total_vm; 43 if (hiwater_vm < mm->hiwater_vm) 44 hiwater_vm = mm->hiwater_vm; 45 hiwater_rss = total_rss = anon + file + shmem; 46 if (hiwater_rss < mm->hiwater_rss) 47 hiwater_rss = mm->hiwater_rss; 48 49 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 50 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 51 swap = get_mm_counter(mm, MM_SWAPENTS); 52 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); 53 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm); 54 seq_printf(m, 55 "VmPeak:\t%8lu kB\n" 56 "VmSize:\t%8lu kB\n" 57 "VmLck:\t%8lu kB\n" 58 "VmPin:\t%8lu kB\n" 59 "VmHWM:\t%8lu kB\n" 60 "VmRSS:\t%8lu kB\n" 61 "RssAnon:\t%8lu kB\n" 62 "RssFile:\t%8lu kB\n" 63 "RssShmem:\t%8lu kB\n" 64 "VmData:\t%8lu kB\n" 65 "VmStk:\t%8lu kB\n" 66 "VmExe:\t%8lu kB\n" 67 "VmLib:\t%8lu kB\n" 68 "VmPTE:\t%8lu kB\n" 69 "VmPMD:\t%8lu kB\n" 70 "VmSwap:\t%8lu kB\n", 71 hiwater_vm << (PAGE_SHIFT-10), 72 total_vm << (PAGE_SHIFT-10), 73 mm->locked_vm << (PAGE_SHIFT-10), 74 mm->pinned_vm << (PAGE_SHIFT-10), 75 hiwater_rss << (PAGE_SHIFT-10), 76 total_rss << (PAGE_SHIFT-10), 77 anon << (PAGE_SHIFT-10), 78 file << (PAGE_SHIFT-10), 79 shmem << (PAGE_SHIFT-10), 80 mm->data_vm << (PAGE_SHIFT-10), 81 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 82 ptes >> 10, 83 pmds >> 10, 84 swap << (PAGE_SHIFT-10)); 85 hugetlb_report_usage(m, mm); 86 } 87 88 unsigned long task_vsize(struct mm_struct *mm) 89 { 90 return PAGE_SIZE * mm->total_vm; 91 } 92 93 unsigned long task_statm(struct mm_struct *mm, 94 unsigned long *shared, unsigned long *text, 95 unsigned long *data, unsigned long *resident) 96 { 97 *shared = get_mm_counter(mm, MM_FILEPAGES) + 98 get_mm_counter(mm, MM_SHMEMPAGES); 99 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 100 >> PAGE_SHIFT; 101 *data = mm->data_vm + mm->stack_vm; 102 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 103 return mm->total_vm; 104 } 105 106 #ifdef CONFIG_NUMA 107 /* 108 * Save get_task_policy() for show_numa_map(). 109 */ 110 static void hold_task_mempolicy(struct proc_maps_private *priv) 111 { 112 struct task_struct *task = priv->task; 113 114 task_lock(task); 115 priv->task_mempolicy = get_task_policy(task); 116 mpol_get(priv->task_mempolicy); 117 task_unlock(task); 118 } 119 static void release_task_mempolicy(struct proc_maps_private *priv) 120 { 121 mpol_put(priv->task_mempolicy); 122 } 123 #else 124 static void hold_task_mempolicy(struct proc_maps_private *priv) 125 { 126 } 127 static void release_task_mempolicy(struct proc_maps_private *priv) 128 { 129 } 130 #endif 131 132 static void vma_stop(struct proc_maps_private *priv) 133 { 134 struct mm_struct *mm = priv->mm; 135 136 release_task_mempolicy(priv); 137 up_read(&mm->mmap_sem); 138 mmput(mm); 139 } 140 141 static struct vm_area_struct * 142 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) 143 { 144 if (vma == priv->tail_vma) 145 return NULL; 146 return vma->vm_next ?: priv->tail_vma; 147 } 148 149 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) 150 { 151 if (m->count < m->size) /* vma is copied successfully */ 152 m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL; 153 } 154 155 static void *m_start(struct seq_file *m, loff_t *ppos) 156 { 157 struct proc_maps_private *priv = m->private; 158 unsigned long last_addr = m->version; 159 struct mm_struct *mm; 160 struct vm_area_struct *vma; 161 unsigned int pos = *ppos; 162 163 /* See m_cache_vma(). Zero at the start or after lseek. */ 164 if (last_addr == -1UL) 165 return NULL; 166 167 priv->task = get_proc_task(priv->inode); 168 if (!priv->task) 169 return ERR_PTR(-ESRCH); 170 171 mm = priv->mm; 172 if (!mm || !mmget_not_zero(mm)) 173 return NULL; 174 175 down_read(&mm->mmap_sem); 176 hold_task_mempolicy(priv); 177 priv->tail_vma = get_gate_vma(mm); 178 179 if (last_addr) { 180 vma = find_vma(mm, last_addr - 1); 181 if (vma && vma->vm_start <= last_addr) 182 vma = m_next_vma(priv, vma); 183 if (vma) 184 return vma; 185 } 186 187 m->version = 0; 188 if (pos < mm->map_count) { 189 for (vma = mm->mmap; pos; pos--) { 190 m->version = vma->vm_start; 191 vma = vma->vm_next; 192 } 193 return vma; 194 } 195 196 /* we do not bother to update m->version in this case */ 197 if (pos == mm->map_count && priv->tail_vma) 198 return priv->tail_vma; 199 200 vma_stop(priv); 201 return NULL; 202 } 203 204 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 205 { 206 struct proc_maps_private *priv = m->private; 207 struct vm_area_struct *next; 208 209 (*pos)++; 210 next = m_next_vma(priv, v); 211 if (!next) 212 vma_stop(priv); 213 return next; 214 } 215 216 static void m_stop(struct seq_file *m, void *v) 217 { 218 struct proc_maps_private *priv = m->private; 219 220 if (!IS_ERR_OR_NULL(v)) 221 vma_stop(priv); 222 if (priv->task) { 223 put_task_struct(priv->task); 224 priv->task = NULL; 225 } 226 } 227 228 static int proc_maps_open(struct inode *inode, struct file *file, 229 const struct seq_operations *ops, int psize) 230 { 231 struct proc_maps_private *priv = __seq_open_private(file, ops, psize); 232 233 if (!priv) 234 return -ENOMEM; 235 236 priv->inode = inode; 237 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 238 if (IS_ERR(priv->mm)) { 239 int err = PTR_ERR(priv->mm); 240 241 seq_release_private(inode, file); 242 return err; 243 } 244 245 return 0; 246 } 247 248 static int proc_map_release(struct inode *inode, struct file *file) 249 { 250 struct seq_file *seq = file->private_data; 251 struct proc_maps_private *priv = seq->private; 252 253 if (priv->mm) 254 mmdrop(priv->mm); 255 256 kfree(priv->rollup); 257 return seq_release_private(inode, file); 258 } 259 260 static int do_maps_open(struct inode *inode, struct file *file, 261 const struct seq_operations *ops) 262 { 263 return proc_maps_open(inode, file, ops, 264 sizeof(struct proc_maps_private)); 265 } 266 267 /* 268 * Indicate if the VMA is a stack for the given task; for 269 * /proc/PID/maps that is the stack of the main task. 270 */ 271 static int is_stack(struct vm_area_struct *vma) 272 { 273 /* 274 * We make no effort to guess what a given thread considers to be 275 * its "stack". It's not even well-defined for programs written 276 * languages like Go. 277 */ 278 return vma->vm_start <= vma->vm_mm->start_stack && 279 vma->vm_end >= vma->vm_mm->start_stack; 280 } 281 282 static void show_vma_header_prefix(struct seq_file *m, 283 unsigned long start, unsigned long end, 284 vm_flags_t flags, unsigned long long pgoff, 285 dev_t dev, unsigned long ino) 286 { 287 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 288 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", 289 start, 290 end, 291 flags & VM_READ ? 'r' : '-', 292 flags & VM_WRITE ? 'w' : '-', 293 flags & VM_EXEC ? 'x' : '-', 294 flags & VM_MAYSHARE ? 's' : 'p', 295 pgoff, 296 MAJOR(dev), MINOR(dev), ino); 297 } 298 299 static void 300 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) 301 { 302 struct mm_struct *mm = vma->vm_mm; 303 struct file *file = vma->vm_file; 304 vm_flags_t flags = vma->vm_flags; 305 unsigned long ino = 0; 306 unsigned long long pgoff = 0; 307 unsigned long start, end; 308 dev_t dev = 0; 309 const char *name = NULL; 310 311 if (file) { 312 struct inode *inode = file_inode(vma->vm_file); 313 dev = inode->i_sb->s_dev; 314 ino = inode->i_ino; 315 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 316 } 317 318 start = vma->vm_start; 319 end = vma->vm_end; 320 show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino); 321 322 /* 323 * Print the dentry name for named mappings, and a 324 * special [heap] marker for the heap: 325 */ 326 if (file) { 327 seq_pad(m, ' '); 328 seq_file_path(m, file, "\n"); 329 goto done; 330 } 331 332 if (vma->vm_ops && vma->vm_ops->name) { 333 name = vma->vm_ops->name(vma); 334 if (name) 335 goto done; 336 } 337 338 name = arch_vma_name(vma); 339 if (!name) { 340 if (!mm) { 341 name = "[vdso]"; 342 goto done; 343 } 344 345 if (vma->vm_start <= mm->brk && 346 vma->vm_end >= mm->start_brk) { 347 name = "[heap]"; 348 goto done; 349 } 350 351 if (is_stack(vma)) 352 name = "[stack]"; 353 } 354 355 done: 356 if (name) { 357 seq_pad(m, ' '); 358 seq_puts(m, name); 359 } 360 seq_putc(m, '\n'); 361 } 362 363 static int show_map(struct seq_file *m, void *v, int is_pid) 364 { 365 show_map_vma(m, v, is_pid); 366 m_cache_vma(m, v); 367 return 0; 368 } 369 370 static int show_pid_map(struct seq_file *m, void *v) 371 { 372 return show_map(m, v, 1); 373 } 374 375 static int show_tid_map(struct seq_file *m, void *v) 376 { 377 return show_map(m, v, 0); 378 } 379 380 static const struct seq_operations proc_pid_maps_op = { 381 .start = m_start, 382 .next = m_next, 383 .stop = m_stop, 384 .show = show_pid_map 385 }; 386 387 static const struct seq_operations proc_tid_maps_op = { 388 .start = m_start, 389 .next = m_next, 390 .stop = m_stop, 391 .show = show_tid_map 392 }; 393 394 static int pid_maps_open(struct inode *inode, struct file *file) 395 { 396 return do_maps_open(inode, file, &proc_pid_maps_op); 397 } 398 399 static int tid_maps_open(struct inode *inode, struct file *file) 400 { 401 return do_maps_open(inode, file, &proc_tid_maps_op); 402 } 403 404 const struct file_operations proc_pid_maps_operations = { 405 .open = pid_maps_open, 406 .read = seq_read, 407 .llseek = seq_lseek, 408 .release = proc_map_release, 409 }; 410 411 const struct file_operations proc_tid_maps_operations = { 412 .open = tid_maps_open, 413 .read = seq_read, 414 .llseek = seq_lseek, 415 .release = proc_map_release, 416 }; 417 418 /* 419 * Proportional Set Size(PSS): my share of RSS. 420 * 421 * PSS of a process is the count of pages it has in memory, where each 422 * page is divided by the number of processes sharing it. So if a 423 * process has 1000 pages all to itself, and 1000 shared with one other 424 * process, its PSS will be 1500. 425 * 426 * To keep (accumulated) division errors low, we adopt a 64bit 427 * fixed-point pss counter to minimize division errors. So (pss >> 428 * PSS_SHIFT) would be the real byte count. 429 * 430 * A shift of 12 before division means (assuming 4K page size): 431 * - 1M 3-user-pages add up to 8KB errors; 432 * - supports mapcount up to 2^24, or 16M; 433 * - supports PSS up to 2^52 bytes, or 4PB. 434 */ 435 #define PSS_SHIFT 12 436 437 #ifdef CONFIG_PROC_PAGE_MONITOR 438 struct mem_size_stats { 439 bool first; 440 unsigned long resident; 441 unsigned long shared_clean; 442 unsigned long shared_dirty; 443 unsigned long private_clean; 444 unsigned long private_dirty; 445 unsigned long referenced; 446 unsigned long anonymous; 447 unsigned long lazyfree; 448 unsigned long anonymous_thp; 449 unsigned long shmem_thp; 450 unsigned long swap; 451 unsigned long shared_hugetlb; 452 unsigned long private_hugetlb; 453 unsigned long first_vma_start; 454 u64 pss; 455 u64 pss_locked; 456 u64 swap_pss; 457 bool check_shmem_swap; 458 }; 459 460 static void smaps_account(struct mem_size_stats *mss, struct page *page, 461 bool compound, bool young, bool dirty) 462 { 463 int i, nr = compound ? 1 << compound_order(page) : 1; 464 unsigned long size = nr * PAGE_SIZE; 465 466 if (PageAnon(page)) { 467 mss->anonymous += size; 468 if (!PageSwapBacked(page) && !dirty && !PageDirty(page)) 469 mss->lazyfree += size; 470 } 471 472 mss->resident += size; 473 /* Accumulate the size in pages that have been accessed. */ 474 if (young || page_is_young(page) || PageReferenced(page)) 475 mss->referenced += size; 476 477 /* 478 * page_count(page) == 1 guarantees the page is mapped exactly once. 479 * If any subpage of the compound page mapped with PTE it would elevate 480 * page_count(). 481 */ 482 if (page_count(page) == 1) { 483 if (dirty || PageDirty(page)) 484 mss->private_dirty += size; 485 else 486 mss->private_clean += size; 487 mss->pss += (u64)size << PSS_SHIFT; 488 return; 489 } 490 491 for (i = 0; i < nr; i++, page++) { 492 int mapcount = page_mapcount(page); 493 494 if (mapcount >= 2) { 495 if (dirty || PageDirty(page)) 496 mss->shared_dirty += PAGE_SIZE; 497 else 498 mss->shared_clean += PAGE_SIZE; 499 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 500 } else { 501 if (dirty || PageDirty(page)) 502 mss->private_dirty += PAGE_SIZE; 503 else 504 mss->private_clean += PAGE_SIZE; 505 mss->pss += PAGE_SIZE << PSS_SHIFT; 506 } 507 } 508 } 509 510 #ifdef CONFIG_SHMEM 511 static int smaps_pte_hole(unsigned long addr, unsigned long end, 512 struct mm_walk *walk) 513 { 514 struct mem_size_stats *mss = walk->private; 515 516 mss->swap += shmem_partial_swap_usage( 517 walk->vma->vm_file->f_mapping, addr, end); 518 519 return 0; 520 } 521 #endif 522 523 static void smaps_pte_entry(pte_t *pte, unsigned long addr, 524 struct mm_walk *walk) 525 { 526 struct mem_size_stats *mss = walk->private; 527 struct vm_area_struct *vma = walk->vma; 528 struct page *page = NULL; 529 530 if (pte_present(*pte)) { 531 page = vm_normal_page(vma, addr, *pte); 532 } else if (is_swap_pte(*pte)) { 533 swp_entry_t swpent = pte_to_swp_entry(*pte); 534 535 if (!non_swap_entry(swpent)) { 536 int mapcount; 537 538 mss->swap += PAGE_SIZE; 539 mapcount = swp_swapcount(swpent); 540 if (mapcount >= 2) { 541 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; 542 543 do_div(pss_delta, mapcount); 544 mss->swap_pss += pss_delta; 545 } else { 546 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; 547 } 548 } else if (is_migration_entry(swpent)) 549 page = migration_entry_to_page(swpent); 550 else if (is_device_private_entry(swpent)) 551 page = device_private_entry_to_page(swpent); 552 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap 553 && pte_none(*pte))) { 554 page = find_get_entry(vma->vm_file->f_mapping, 555 linear_page_index(vma, addr)); 556 if (!page) 557 return; 558 559 if (radix_tree_exceptional_entry(page)) 560 mss->swap += PAGE_SIZE; 561 else 562 put_page(page); 563 564 return; 565 } 566 567 if (!page) 568 return; 569 570 smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); 571 } 572 573 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 574 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 575 struct mm_walk *walk) 576 { 577 struct mem_size_stats *mss = walk->private; 578 struct vm_area_struct *vma = walk->vma; 579 struct page *page; 580 581 /* FOLL_DUMP will return -EFAULT on huge zero page */ 582 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); 583 if (IS_ERR_OR_NULL(page)) 584 return; 585 if (PageAnon(page)) 586 mss->anonymous_thp += HPAGE_PMD_SIZE; 587 else if (PageSwapBacked(page)) 588 mss->shmem_thp += HPAGE_PMD_SIZE; 589 else if (is_zone_device_page(page)) 590 /* pass */; 591 else 592 VM_BUG_ON_PAGE(1, page); 593 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); 594 } 595 #else 596 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 597 struct mm_walk *walk) 598 { 599 } 600 #endif 601 602 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 603 struct mm_walk *walk) 604 { 605 struct vm_area_struct *vma = walk->vma; 606 pte_t *pte; 607 spinlock_t *ptl; 608 609 ptl = pmd_trans_huge_lock(pmd, vma); 610 if (ptl) { 611 if (pmd_present(*pmd)) 612 smaps_pmd_entry(pmd, addr, walk); 613 spin_unlock(ptl); 614 goto out; 615 } 616 617 if (pmd_trans_unstable(pmd)) 618 goto out; 619 /* 620 * The mmap_sem held all the way back in m_start() is what 621 * keeps khugepaged out of here and from collapsing things 622 * in here. 623 */ 624 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 625 for (; addr != end; pte++, addr += PAGE_SIZE) 626 smaps_pte_entry(pte, addr, walk); 627 pte_unmap_unlock(pte - 1, ptl); 628 out: 629 cond_resched(); 630 return 0; 631 } 632 633 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) 634 { 635 /* 636 * Don't forget to update Documentation/ on changes. 637 */ 638 static const char mnemonics[BITS_PER_LONG][2] = { 639 /* 640 * In case if we meet a flag we don't know about. 641 */ 642 [0 ... (BITS_PER_LONG-1)] = "??", 643 644 [ilog2(VM_READ)] = "rd", 645 [ilog2(VM_WRITE)] = "wr", 646 [ilog2(VM_EXEC)] = "ex", 647 [ilog2(VM_SHARED)] = "sh", 648 [ilog2(VM_MAYREAD)] = "mr", 649 [ilog2(VM_MAYWRITE)] = "mw", 650 [ilog2(VM_MAYEXEC)] = "me", 651 [ilog2(VM_MAYSHARE)] = "ms", 652 [ilog2(VM_GROWSDOWN)] = "gd", 653 [ilog2(VM_PFNMAP)] = "pf", 654 [ilog2(VM_DENYWRITE)] = "dw", 655 #ifdef CONFIG_X86_INTEL_MPX 656 [ilog2(VM_MPX)] = "mp", 657 #endif 658 [ilog2(VM_LOCKED)] = "lo", 659 [ilog2(VM_IO)] = "io", 660 [ilog2(VM_SEQ_READ)] = "sr", 661 [ilog2(VM_RAND_READ)] = "rr", 662 [ilog2(VM_DONTCOPY)] = "dc", 663 [ilog2(VM_DONTEXPAND)] = "de", 664 [ilog2(VM_ACCOUNT)] = "ac", 665 [ilog2(VM_NORESERVE)] = "nr", 666 [ilog2(VM_HUGETLB)] = "ht", 667 [ilog2(VM_ARCH_1)] = "ar", 668 [ilog2(VM_WIPEONFORK)] = "wf", 669 [ilog2(VM_DONTDUMP)] = "dd", 670 #ifdef CONFIG_MEM_SOFT_DIRTY 671 [ilog2(VM_SOFTDIRTY)] = "sd", 672 #endif 673 [ilog2(VM_MIXEDMAP)] = "mm", 674 [ilog2(VM_HUGEPAGE)] = "hg", 675 [ilog2(VM_NOHUGEPAGE)] = "nh", 676 [ilog2(VM_MERGEABLE)] = "mg", 677 [ilog2(VM_UFFD_MISSING)]= "um", 678 [ilog2(VM_UFFD_WP)] = "uw", 679 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 680 /* These come out via ProtectionKey: */ 681 [ilog2(VM_PKEY_BIT0)] = "", 682 [ilog2(VM_PKEY_BIT1)] = "", 683 [ilog2(VM_PKEY_BIT2)] = "", 684 [ilog2(VM_PKEY_BIT3)] = "", 685 #endif 686 }; 687 size_t i; 688 689 seq_puts(m, "VmFlags: "); 690 for (i = 0; i < BITS_PER_LONG; i++) { 691 if (!mnemonics[i][0]) 692 continue; 693 if (vma->vm_flags & (1UL << i)) { 694 seq_printf(m, "%c%c ", 695 mnemonics[i][0], mnemonics[i][1]); 696 } 697 } 698 seq_putc(m, '\n'); 699 } 700 701 #ifdef CONFIG_HUGETLB_PAGE 702 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, 703 unsigned long addr, unsigned long end, 704 struct mm_walk *walk) 705 { 706 struct mem_size_stats *mss = walk->private; 707 struct vm_area_struct *vma = walk->vma; 708 struct page *page = NULL; 709 710 if (pte_present(*pte)) { 711 page = vm_normal_page(vma, addr, *pte); 712 } else if (is_swap_pte(*pte)) { 713 swp_entry_t swpent = pte_to_swp_entry(*pte); 714 715 if (is_migration_entry(swpent)) 716 page = migration_entry_to_page(swpent); 717 else if (is_device_private_entry(swpent)) 718 page = device_private_entry_to_page(swpent); 719 } 720 if (page) { 721 int mapcount = page_mapcount(page); 722 723 if (mapcount >= 2) 724 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); 725 else 726 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); 727 } 728 return 0; 729 } 730 #endif /* HUGETLB_PAGE */ 731 732 void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma) 733 { 734 } 735 736 static int show_smap(struct seq_file *m, void *v, int is_pid) 737 { 738 struct proc_maps_private *priv = m->private; 739 struct vm_area_struct *vma = v; 740 struct mem_size_stats mss_stack; 741 struct mem_size_stats *mss; 742 struct mm_walk smaps_walk = { 743 .pmd_entry = smaps_pte_range, 744 #ifdef CONFIG_HUGETLB_PAGE 745 .hugetlb_entry = smaps_hugetlb_range, 746 #endif 747 .mm = vma->vm_mm, 748 }; 749 int ret = 0; 750 bool rollup_mode; 751 bool last_vma; 752 753 if (priv->rollup) { 754 rollup_mode = true; 755 mss = priv->rollup; 756 if (mss->first) { 757 mss->first_vma_start = vma->vm_start; 758 mss->first = false; 759 } 760 last_vma = !m_next_vma(priv, vma); 761 } else { 762 rollup_mode = false; 763 memset(&mss_stack, 0, sizeof(mss_stack)); 764 mss = &mss_stack; 765 } 766 767 smaps_walk.private = mss; 768 769 #ifdef CONFIG_SHMEM 770 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { 771 /* 772 * For shared or readonly shmem mappings we know that all 773 * swapped out pages belong to the shmem object, and we can 774 * obtain the swap value much more efficiently. For private 775 * writable mappings, we might have COW pages that are 776 * not affected by the parent swapped out pages of the shmem 777 * object, so we have to distinguish them during the page walk. 778 * Unless we know that the shmem object (or the part mapped by 779 * our VMA) has no swapped out pages at all. 780 */ 781 unsigned long shmem_swapped = shmem_swap_usage(vma); 782 783 if (!shmem_swapped || (vma->vm_flags & VM_SHARED) || 784 !(vma->vm_flags & VM_WRITE)) { 785 mss->swap = shmem_swapped; 786 } else { 787 mss->check_shmem_swap = true; 788 smaps_walk.pte_hole = smaps_pte_hole; 789 } 790 } 791 #endif 792 793 /* mmap_sem is held in m_start */ 794 walk_page_vma(vma, &smaps_walk); 795 if (vma->vm_flags & VM_LOCKED) 796 mss->pss_locked += mss->pss; 797 798 if (!rollup_mode) { 799 show_map_vma(m, vma, is_pid); 800 } else if (last_vma) { 801 show_vma_header_prefix( 802 m, mss->first_vma_start, vma->vm_end, 0, 0, 0, 0); 803 seq_pad(m, ' '); 804 seq_puts(m, "[rollup]\n"); 805 } else { 806 ret = SEQ_SKIP; 807 } 808 809 if (!rollup_mode) 810 seq_printf(m, 811 "Size: %8lu kB\n" 812 "KernelPageSize: %8lu kB\n" 813 "MMUPageSize: %8lu kB\n", 814 (vma->vm_end - vma->vm_start) >> 10, 815 vma_kernel_pagesize(vma) >> 10, 816 vma_mmu_pagesize(vma) >> 10); 817 818 819 if (!rollup_mode || last_vma) 820 seq_printf(m, 821 "Rss: %8lu kB\n" 822 "Pss: %8lu kB\n" 823 "Shared_Clean: %8lu kB\n" 824 "Shared_Dirty: %8lu kB\n" 825 "Private_Clean: %8lu kB\n" 826 "Private_Dirty: %8lu kB\n" 827 "Referenced: %8lu kB\n" 828 "Anonymous: %8lu kB\n" 829 "LazyFree: %8lu kB\n" 830 "AnonHugePages: %8lu kB\n" 831 "ShmemPmdMapped: %8lu kB\n" 832 "Shared_Hugetlb: %8lu kB\n" 833 "Private_Hugetlb: %7lu kB\n" 834 "Swap: %8lu kB\n" 835 "SwapPss: %8lu kB\n" 836 "Locked: %8lu kB\n", 837 mss->resident >> 10, 838 (unsigned long)(mss->pss >> (10 + PSS_SHIFT)), 839 mss->shared_clean >> 10, 840 mss->shared_dirty >> 10, 841 mss->private_clean >> 10, 842 mss->private_dirty >> 10, 843 mss->referenced >> 10, 844 mss->anonymous >> 10, 845 mss->lazyfree >> 10, 846 mss->anonymous_thp >> 10, 847 mss->shmem_thp >> 10, 848 mss->shared_hugetlb >> 10, 849 mss->private_hugetlb >> 10, 850 mss->swap >> 10, 851 (unsigned long)(mss->swap_pss >> (10 + PSS_SHIFT)), 852 (unsigned long)(mss->pss >> (10 + PSS_SHIFT))); 853 854 if (!rollup_mode) { 855 arch_show_smap(m, vma); 856 show_smap_vma_flags(m, vma); 857 } 858 m_cache_vma(m, vma); 859 return ret; 860 } 861 862 static int show_pid_smap(struct seq_file *m, void *v) 863 { 864 return show_smap(m, v, 1); 865 } 866 867 static int show_tid_smap(struct seq_file *m, void *v) 868 { 869 return show_smap(m, v, 0); 870 } 871 872 static const struct seq_operations proc_pid_smaps_op = { 873 .start = m_start, 874 .next = m_next, 875 .stop = m_stop, 876 .show = show_pid_smap 877 }; 878 879 static const struct seq_operations proc_tid_smaps_op = { 880 .start = m_start, 881 .next = m_next, 882 .stop = m_stop, 883 .show = show_tid_smap 884 }; 885 886 static int pid_smaps_open(struct inode *inode, struct file *file) 887 { 888 return do_maps_open(inode, file, &proc_pid_smaps_op); 889 } 890 891 static int pid_smaps_rollup_open(struct inode *inode, struct file *file) 892 { 893 struct seq_file *seq; 894 struct proc_maps_private *priv; 895 int ret = do_maps_open(inode, file, &proc_pid_smaps_op); 896 897 if (ret < 0) 898 return ret; 899 seq = file->private_data; 900 priv = seq->private; 901 priv->rollup = kzalloc(sizeof(*priv->rollup), GFP_KERNEL); 902 if (!priv->rollup) { 903 proc_map_release(inode, file); 904 return -ENOMEM; 905 } 906 priv->rollup->first = true; 907 return 0; 908 } 909 910 static int tid_smaps_open(struct inode *inode, struct file *file) 911 { 912 return do_maps_open(inode, file, &proc_tid_smaps_op); 913 } 914 915 const struct file_operations proc_pid_smaps_operations = { 916 .open = pid_smaps_open, 917 .read = seq_read, 918 .llseek = seq_lseek, 919 .release = proc_map_release, 920 }; 921 922 const struct file_operations proc_pid_smaps_rollup_operations = { 923 .open = pid_smaps_rollup_open, 924 .read = seq_read, 925 .llseek = seq_lseek, 926 .release = proc_map_release, 927 }; 928 929 const struct file_operations proc_tid_smaps_operations = { 930 .open = tid_smaps_open, 931 .read = seq_read, 932 .llseek = seq_lseek, 933 .release = proc_map_release, 934 }; 935 936 enum clear_refs_types { 937 CLEAR_REFS_ALL = 1, 938 CLEAR_REFS_ANON, 939 CLEAR_REFS_MAPPED, 940 CLEAR_REFS_SOFT_DIRTY, 941 CLEAR_REFS_MM_HIWATER_RSS, 942 CLEAR_REFS_LAST, 943 }; 944 945 struct clear_refs_private { 946 enum clear_refs_types type; 947 }; 948 949 #ifdef CONFIG_MEM_SOFT_DIRTY 950 static inline void clear_soft_dirty(struct vm_area_struct *vma, 951 unsigned long addr, pte_t *pte) 952 { 953 /* 954 * The soft-dirty tracker uses #PF-s to catch writes 955 * to pages, so write-protect the pte as well. See the 956 * Documentation/vm/soft-dirty.txt for full description 957 * of how soft-dirty works. 958 */ 959 pte_t ptent = *pte; 960 961 if (pte_present(ptent)) { 962 ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte); 963 ptent = pte_wrprotect(ptent); 964 ptent = pte_clear_soft_dirty(ptent); 965 ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent); 966 } else if (is_swap_pte(ptent)) { 967 ptent = pte_swp_clear_soft_dirty(ptent); 968 set_pte_at(vma->vm_mm, addr, pte, ptent); 969 } 970 } 971 #else 972 static inline void clear_soft_dirty(struct vm_area_struct *vma, 973 unsigned long addr, pte_t *pte) 974 { 975 } 976 #endif 977 978 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 979 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 980 unsigned long addr, pmd_t *pmdp) 981 { 982 pmd_t pmd = *pmdp; 983 984 if (pmd_present(pmd)) { 985 /* See comment in change_huge_pmd() */ 986 pmdp_invalidate(vma, addr, pmdp); 987 if (pmd_dirty(*pmdp)) 988 pmd = pmd_mkdirty(pmd); 989 if (pmd_young(*pmdp)) 990 pmd = pmd_mkyoung(pmd); 991 992 pmd = pmd_wrprotect(pmd); 993 pmd = pmd_clear_soft_dirty(pmd); 994 995 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 996 } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { 997 pmd = pmd_swp_clear_soft_dirty(pmd); 998 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 999 } 1000 } 1001 #else 1002 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 1003 unsigned long addr, pmd_t *pmdp) 1004 { 1005 } 1006 #endif 1007 1008 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 1009 unsigned long end, struct mm_walk *walk) 1010 { 1011 struct clear_refs_private *cp = walk->private; 1012 struct vm_area_struct *vma = walk->vma; 1013 pte_t *pte, ptent; 1014 spinlock_t *ptl; 1015 struct page *page; 1016 1017 ptl = pmd_trans_huge_lock(pmd, vma); 1018 if (ptl) { 1019 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 1020 clear_soft_dirty_pmd(vma, addr, pmd); 1021 goto out; 1022 } 1023 1024 if (!pmd_present(*pmd)) 1025 goto out; 1026 1027 page = pmd_page(*pmd); 1028 1029 /* Clear accessed and referenced bits. */ 1030 pmdp_test_and_clear_young(vma, addr, pmd); 1031 test_and_clear_page_young(page); 1032 ClearPageReferenced(page); 1033 out: 1034 spin_unlock(ptl); 1035 return 0; 1036 } 1037 1038 if (pmd_trans_unstable(pmd)) 1039 return 0; 1040 1041 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1042 for (; addr != end; pte++, addr += PAGE_SIZE) { 1043 ptent = *pte; 1044 1045 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 1046 clear_soft_dirty(vma, addr, pte); 1047 continue; 1048 } 1049 1050 if (!pte_present(ptent)) 1051 continue; 1052 1053 page = vm_normal_page(vma, addr, ptent); 1054 if (!page) 1055 continue; 1056 1057 /* Clear accessed and referenced bits. */ 1058 ptep_test_and_clear_young(vma, addr, pte); 1059 test_and_clear_page_young(page); 1060 ClearPageReferenced(page); 1061 } 1062 pte_unmap_unlock(pte - 1, ptl); 1063 cond_resched(); 1064 return 0; 1065 } 1066 1067 static int clear_refs_test_walk(unsigned long start, unsigned long end, 1068 struct mm_walk *walk) 1069 { 1070 struct clear_refs_private *cp = walk->private; 1071 struct vm_area_struct *vma = walk->vma; 1072 1073 if (vma->vm_flags & VM_PFNMAP) 1074 return 1; 1075 1076 /* 1077 * Writing 1 to /proc/pid/clear_refs affects all pages. 1078 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. 1079 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. 1080 * Writing 4 to /proc/pid/clear_refs affects all pages. 1081 */ 1082 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) 1083 return 1; 1084 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) 1085 return 1; 1086 return 0; 1087 } 1088 1089 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 1090 size_t count, loff_t *ppos) 1091 { 1092 struct task_struct *task; 1093 char buffer[PROC_NUMBUF]; 1094 struct mm_struct *mm; 1095 struct vm_area_struct *vma; 1096 enum clear_refs_types type; 1097 struct mmu_gather tlb; 1098 int itype; 1099 int rv; 1100 1101 memset(buffer, 0, sizeof(buffer)); 1102 if (count > sizeof(buffer) - 1) 1103 count = sizeof(buffer) - 1; 1104 if (copy_from_user(buffer, buf, count)) 1105 return -EFAULT; 1106 rv = kstrtoint(strstrip(buffer), 10, &itype); 1107 if (rv < 0) 1108 return rv; 1109 type = (enum clear_refs_types)itype; 1110 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) 1111 return -EINVAL; 1112 1113 task = get_proc_task(file_inode(file)); 1114 if (!task) 1115 return -ESRCH; 1116 mm = get_task_mm(task); 1117 if (mm) { 1118 struct clear_refs_private cp = { 1119 .type = type, 1120 }; 1121 struct mm_walk clear_refs_walk = { 1122 .pmd_entry = clear_refs_pte_range, 1123 .test_walk = clear_refs_test_walk, 1124 .mm = mm, 1125 .private = &cp, 1126 }; 1127 1128 if (type == CLEAR_REFS_MM_HIWATER_RSS) { 1129 if (down_write_killable(&mm->mmap_sem)) { 1130 count = -EINTR; 1131 goto out_mm; 1132 } 1133 1134 /* 1135 * Writing 5 to /proc/pid/clear_refs resets the peak 1136 * resident set size to this mm's current rss value. 1137 */ 1138 reset_mm_hiwater_rss(mm); 1139 up_write(&mm->mmap_sem); 1140 goto out_mm; 1141 } 1142 1143 down_read(&mm->mmap_sem); 1144 tlb_gather_mmu(&tlb, mm, 0, -1); 1145 if (type == CLEAR_REFS_SOFT_DIRTY) { 1146 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1147 if (!(vma->vm_flags & VM_SOFTDIRTY)) 1148 continue; 1149 up_read(&mm->mmap_sem); 1150 if (down_write_killable(&mm->mmap_sem)) { 1151 count = -EINTR; 1152 goto out_mm; 1153 } 1154 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1155 vma->vm_flags &= ~VM_SOFTDIRTY; 1156 vma_set_page_prot(vma); 1157 } 1158 downgrade_write(&mm->mmap_sem); 1159 break; 1160 } 1161 mmu_notifier_invalidate_range_start(mm, 0, -1); 1162 } 1163 walk_page_range(0, mm->highest_vm_end, &clear_refs_walk); 1164 if (type == CLEAR_REFS_SOFT_DIRTY) 1165 mmu_notifier_invalidate_range_end(mm, 0, -1); 1166 tlb_finish_mmu(&tlb, 0, -1); 1167 up_read(&mm->mmap_sem); 1168 out_mm: 1169 mmput(mm); 1170 } 1171 put_task_struct(task); 1172 1173 return count; 1174 } 1175 1176 const struct file_operations proc_clear_refs_operations = { 1177 .write = clear_refs_write, 1178 .llseek = noop_llseek, 1179 }; 1180 1181 typedef struct { 1182 u64 pme; 1183 } pagemap_entry_t; 1184 1185 struct pagemapread { 1186 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ 1187 pagemap_entry_t *buffer; 1188 bool show_pfn; 1189 }; 1190 1191 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 1192 #define PAGEMAP_WALK_MASK (PMD_MASK) 1193 1194 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) 1195 #define PM_PFRAME_BITS 55 1196 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) 1197 #define PM_SOFT_DIRTY BIT_ULL(55) 1198 #define PM_MMAP_EXCLUSIVE BIT_ULL(56) 1199 #define PM_FILE BIT_ULL(61) 1200 #define PM_SWAP BIT_ULL(62) 1201 #define PM_PRESENT BIT_ULL(63) 1202 1203 #define PM_END_OF_BUFFER 1 1204 1205 static inline pagemap_entry_t make_pme(u64 frame, u64 flags) 1206 { 1207 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; 1208 } 1209 1210 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, 1211 struct pagemapread *pm) 1212 { 1213 pm->buffer[pm->pos++] = *pme; 1214 if (pm->pos >= pm->len) 1215 return PM_END_OF_BUFFER; 1216 return 0; 1217 } 1218 1219 static int pagemap_pte_hole(unsigned long start, unsigned long end, 1220 struct mm_walk *walk) 1221 { 1222 struct pagemapread *pm = walk->private; 1223 unsigned long addr = start; 1224 int err = 0; 1225 1226 while (addr < end) { 1227 struct vm_area_struct *vma = find_vma(walk->mm, addr); 1228 pagemap_entry_t pme = make_pme(0, 0); 1229 /* End of address space hole, which we mark as non-present. */ 1230 unsigned long hole_end; 1231 1232 if (vma) 1233 hole_end = min(end, vma->vm_start); 1234 else 1235 hole_end = end; 1236 1237 for (; addr < hole_end; addr += PAGE_SIZE) { 1238 err = add_to_pagemap(addr, &pme, pm); 1239 if (err) 1240 goto out; 1241 } 1242 1243 if (!vma) 1244 break; 1245 1246 /* Addresses in the VMA. */ 1247 if (vma->vm_flags & VM_SOFTDIRTY) 1248 pme = make_pme(0, PM_SOFT_DIRTY); 1249 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { 1250 err = add_to_pagemap(addr, &pme, pm); 1251 if (err) 1252 goto out; 1253 } 1254 } 1255 out: 1256 return err; 1257 } 1258 1259 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, 1260 struct vm_area_struct *vma, unsigned long addr, pte_t pte) 1261 { 1262 u64 frame = 0, flags = 0; 1263 struct page *page = NULL; 1264 1265 if (pte_present(pte)) { 1266 if (pm->show_pfn) 1267 frame = pte_pfn(pte); 1268 flags |= PM_PRESENT; 1269 page = _vm_normal_page(vma, addr, pte, true); 1270 if (pte_soft_dirty(pte)) 1271 flags |= PM_SOFT_DIRTY; 1272 } else if (is_swap_pte(pte)) { 1273 swp_entry_t entry; 1274 if (pte_swp_soft_dirty(pte)) 1275 flags |= PM_SOFT_DIRTY; 1276 entry = pte_to_swp_entry(pte); 1277 frame = swp_type(entry) | 1278 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 1279 flags |= PM_SWAP; 1280 if (is_migration_entry(entry)) 1281 page = migration_entry_to_page(entry); 1282 1283 if (is_device_private_entry(entry)) 1284 page = device_private_entry_to_page(entry); 1285 } 1286 1287 if (page && !PageAnon(page)) 1288 flags |= PM_FILE; 1289 if (page && page_mapcount(page) == 1) 1290 flags |= PM_MMAP_EXCLUSIVE; 1291 if (vma->vm_flags & VM_SOFTDIRTY) 1292 flags |= PM_SOFT_DIRTY; 1293 1294 return make_pme(frame, flags); 1295 } 1296 1297 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, 1298 struct mm_walk *walk) 1299 { 1300 struct vm_area_struct *vma = walk->vma; 1301 struct pagemapread *pm = walk->private; 1302 spinlock_t *ptl; 1303 pte_t *pte, *orig_pte; 1304 int err = 0; 1305 1306 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1307 ptl = pmd_trans_huge_lock(pmdp, vma); 1308 if (ptl) { 1309 u64 flags = 0, frame = 0; 1310 pmd_t pmd = *pmdp; 1311 struct page *page = NULL; 1312 1313 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd)) 1314 flags |= PM_SOFT_DIRTY; 1315 1316 if (pmd_present(pmd)) { 1317 page = pmd_page(pmd); 1318 1319 flags |= PM_PRESENT; 1320 if (pm->show_pfn) 1321 frame = pmd_pfn(pmd) + 1322 ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1323 } 1324 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 1325 else if (is_swap_pmd(pmd)) { 1326 swp_entry_t entry = pmd_to_swp_entry(pmd); 1327 1328 frame = swp_type(entry) | 1329 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 1330 flags |= PM_SWAP; 1331 VM_BUG_ON(!is_pmd_migration_entry(pmd)); 1332 page = migration_entry_to_page(entry); 1333 } 1334 #endif 1335 1336 if (page && page_mapcount(page) == 1) 1337 flags |= PM_MMAP_EXCLUSIVE; 1338 1339 for (; addr != end; addr += PAGE_SIZE) { 1340 pagemap_entry_t pme = make_pme(frame, flags); 1341 1342 err = add_to_pagemap(addr, &pme, pm); 1343 if (err) 1344 break; 1345 if (pm->show_pfn && (flags & PM_PRESENT)) 1346 frame++; 1347 } 1348 spin_unlock(ptl); 1349 return err; 1350 } 1351 1352 if (pmd_trans_unstable(pmdp)) 1353 return 0; 1354 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1355 1356 /* 1357 * We can assume that @vma always points to a valid one and @end never 1358 * goes beyond vma->vm_end. 1359 */ 1360 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); 1361 for (; addr < end; pte++, addr += PAGE_SIZE) { 1362 pagemap_entry_t pme; 1363 1364 pme = pte_to_pagemap_entry(pm, vma, addr, *pte); 1365 err = add_to_pagemap(addr, &pme, pm); 1366 if (err) 1367 break; 1368 } 1369 pte_unmap_unlock(orig_pte, ptl); 1370 1371 cond_resched(); 1372 1373 return err; 1374 } 1375 1376 #ifdef CONFIG_HUGETLB_PAGE 1377 /* This function walks within one hugetlb entry in the single call */ 1378 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, 1379 unsigned long addr, unsigned long end, 1380 struct mm_walk *walk) 1381 { 1382 struct pagemapread *pm = walk->private; 1383 struct vm_area_struct *vma = walk->vma; 1384 u64 flags = 0, frame = 0; 1385 int err = 0; 1386 pte_t pte; 1387 1388 if (vma->vm_flags & VM_SOFTDIRTY) 1389 flags |= PM_SOFT_DIRTY; 1390 1391 pte = huge_ptep_get(ptep); 1392 if (pte_present(pte)) { 1393 struct page *page = pte_page(pte); 1394 1395 if (!PageAnon(page)) 1396 flags |= PM_FILE; 1397 1398 if (page_mapcount(page) == 1) 1399 flags |= PM_MMAP_EXCLUSIVE; 1400 1401 flags |= PM_PRESENT; 1402 if (pm->show_pfn) 1403 frame = pte_pfn(pte) + 1404 ((addr & ~hmask) >> PAGE_SHIFT); 1405 } 1406 1407 for (; addr != end; addr += PAGE_SIZE) { 1408 pagemap_entry_t pme = make_pme(frame, flags); 1409 1410 err = add_to_pagemap(addr, &pme, pm); 1411 if (err) 1412 return err; 1413 if (pm->show_pfn && (flags & PM_PRESENT)) 1414 frame++; 1415 } 1416 1417 cond_resched(); 1418 1419 return err; 1420 } 1421 #endif /* HUGETLB_PAGE */ 1422 1423 /* 1424 * /proc/pid/pagemap - an array mapping virtual pages to pfns 1425 * 1426 * For each page in the address space, this file contains one 64-bit entry 1427 * consisting of the following: 1428 * 1429 * Bits 0-54 page frame number (PFN) if present 1430 * Bits 0-4 swap type if swapped 1431 * Bits 5-54 swap offset if swapped 1432 * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt) 1433 * Bit 56 page exclusively mapped 1434 * Bits 57-60 zero 1435 * Bit 61 page is file-page or shared-anon 1436 * Bit 62 page swapped 1437 * Bit 63 page present 1438 * 1439 * If the page is not present but in swap, then the PFN contains an 1440 * encoding of the swap file number and the page's offset into the 1441 * swap. Unmapped pages return a null PFN. This allows determining 1442 * precisely which pages are mapped (or in swap) and comparing mapped 1443 * pages between processes. 1444 * 1445 * Efficient users of this interface will use /proc/pid/maps to 1446 * determine which areas of memory are actually mapped and llseek to 1447 * skip over unmapped regions. 1448 */ 1449 static ssize_t pagemap_read(struct file *file, char __user *buf, 1450 size_t count, loff_t *ppos) 1451 { 1452 struct mm_struct *mm = file->private_data; 1453 struct pagemapread pm; 1454 struct mm_walk pagemap_walk = {}; 1455 unsigned long src; 1456 unsigned long svpfn; 1457 unsigned long start_vaddr; 1458 unsigned long end_vaddr; 1459 int ret = 0, copied = 0; 1460 1461 if (!mm || !mmget_not_zero(mm)) 1462 goto out; 1463 1464 ret = -EINVAL; 1465 /* file position must be aligned */ 1466 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 1467 goto out_mm; 1468 1469 ret = 0; 1470 if (!count) 1471 goto out_mm; 1472 1473 /* do not disclose physical addresses: attack vector */ 1474 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); 1475 1476 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1477 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_KERNEL); 1478 ret = -ENOMEM; 1479 if (!pm.buffer) 1480 goto out_mm; 1481 1482 pagemap_walk.pmd_entry = pagemap_pmd_range; 1483 pagemap_walk.pte_hole = pagemap_pte_hole; 1484 #ifdef CONFIG_HUGETLB_PAGE 1485 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 1486 #endif 1487 pagemap_walk.mm = mm; 1488 pagemap_walk.private = ± 1489 1490 src = *ppos; 1491 svpfn = src / PM_ENTRY_BYTES; 1492 start_vaddr = svpfn << PAGE_SHIFT; 1493 end_vaddr = mm->task_size; 1494 1495 /* watch out for wraparound */ 1496 if (svpfn > mm->task_size >> PAGE_SHIFT) 1497 start_vaddr = end_vaddr; 1498 1499 /* 1500 * The odds are that this will stop walking way 1501 * before end_vaddr, because the length of the 1502 * user buffer is tracked in "pm", and the walk 1503 * will stop when we hit the end of the buffer. 1504 */ 1505 ret = 0; 1506 while (count && (start_vaddr < end_vaddr)) { 1507 int len; 1508 unsigned long end; 1509 1510 pm.pos = 0; 1511 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; 1512 /* overflow ? */ 1513 if (end < start_vaddr || end > end_vaddr) 1514 end = end_vaddr; 1515 down_read(&mm->mmap_sem); 1516 ret = walk_page_range(start_vaddr, end, &pagemap_walk); 1517 up_read(&mm->mmap_sem); 1518 start_vaddr = end; 1519 1520 len = min(count, PM_ENTRY_BYTES * pm.pos); 1521 if (copy_to_user(buf, pm.buffer, len)) { 1522 ret = -EFAULT; 1523 goto out_free; 1524 } 1525 copied += len; 1526 buf += len; 1527 count -= len; 1528 } 1529 *ppos += copied; 1530 if (!ret || ret == PM_END_OF_BUFFER) 1531 ret = copied; 1532 1533 out_free: 1534 kfree(pm.buffer); 1535 out_mm: 1536 mmput(mm); 1537 out: 1538 return ret; 1539 } 1540 1541 static int pagemap_open(struct inode *inode, struct file *file) 1542 { 1543 struct mm_struct *mm; 1544 1545 mm = proc_mem_open(inode, PTRACE_MODE_READ); 1546 if (IS_ERR(mm)) 1547 return PTR_ERR(mm); 1548 file->private_data = mm; 1549 return 0; 1550 } 1551 1552 static int pagemap_release(struct inode *inode, struct file *file) 1553 { 1554 struct mm_struct *mm = file->private_data; 1555 1556 if (mm) 1557 mmdrop(mm); 1558 return 0; 1559 } 1560 1561 const struct file_operations proc_pagemap_operations = { 1562 .llseek = mem_lseek, /* borrow this */ 1563 .read = pagemap_read, 1564 .open = pagemap_open, 1565 .release = pagemap_release, 1566 }; 1567 #endif /* CONFIG_PROC_PAGE_MONITOR */ 1568 1569 #ifdef CONFIG_NUMA 1570 1571 struct numa_maps { 1572 unsigned long pages; 1573 unsigned long anon; 1574 unsigned long active; 1575 unsigned long writeback; 1576 unsigned long mapcount_max; 1577 unsigned long dirty; 1578 unsigned long swapcache; 1579 unsigned long node[MAX_NUMNODES]; 1580 }; 1581 1582 struct numa_maps_private { 1583 struct proc_maps_private proc_maps; 1584 struct numa_maps md; 1585 }; 1586 1587 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, 1588 unsigned long nr_pages) 1589 { 1590 int count = page_mapcount(page); 1591 1592 md->pages += nr_pages; 1593 if (pte_dirty || PageDirty(page)) 1594 md->dirty += nr_pages; 1595 1596 if (PageSwapCache(page)) 1597 md->swapcache += nr_pages; 1598 1599 if (PageActive(page) || PageUnevictable(page)) 1600 md->active += nr_pages; 1601 1602 if (PageWriteback(page)) 1603 md->writeback += nr_pages; 1604 1605 if (PageAnon(page)) 1606 md->anon += nr_pages; 1607 1608 if (count > md->mapcount_max) 1609 md->mapcount_max = count; 1610 1611 md->node[page_to_nid(page)] += nr_pages; 1612 } 1613 1614 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, 1615 unsigned long addr) 1616 { 1617 struct page *page; 1618 int nid; 1619 1620 if (!pte_present(pte)) 1621 return NULL; 1622 1623 page = vm_normal_page(vma, addr, pte); 1624 if (!page) 1625 return NULL; 1626 1627 if (PageReserved(page)) 1628 return NULL; 1629 1630 nid = page_to_nid(page); 1631 if (!node_isset(nid, node_states[N_MEMORY])) 1632 return NULL; 1633 1634 return page; 1635 } 1636 1637 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1638 static struct page *can_gather_numa_stats_pmd(pmd_t pmd, 1639 struct vm_area_struct *vma, 1640 unsigned long addr) 1641 { 1642 struct page *page; 1643 int nid; 1644 1645 if (!pmd_present(pmd)) 1646 return NULL; 1647 1648 page = vm_normal_page_pmd(vma, addr, pmd); 1649 if (!page) 1650 return NULL; 1651 1652 if (PageReserved(page)) 1653 return NULL; 1654 1655 nid = page_to_nid(page); 1656 if (!node_isset(nid, node_states[N_MEMORY])) 1657 return NULL; 1658 1659 return page; 1660 } 1661 #endif 1662 1663 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1664 unsigned long end, struct mm_walk *walk) 1665 { 1666 struct numa_maps *md = walk->private; 1667 struct vm_area_struct *vma = walk->vma; 1668 spinlock_t *ptl; 1669 pte_t *orig_pte; 1670 pte_t *pte; 1671 1672 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1673 ptl = pmd_trans_huge_lock(pmd, vma); 1674 if (ptl) { 1675 struct page *page; 1676 1677 page = can_gather_numa_stats_pmd(*pmd, vma, addr); 1678 if (page) 1679 gather_stats(page, md, pmd_dirty(*pmd), 1680 HPAGE_PMD_SIZE/PAGE_SIZE); 1681 spin_unlock(ptl); 1682 return 0; 1683 } 1684 1685 if (pmd_trans_unstable(pmd)) 1686 return 0; 1687 #endif 1688 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1689 do { 1690 struct page *page = can_gather_numa_stats(*pte, vma, addr); 1691 if (!page) 1692 continue; 1693 gather_stats(page, md, pte_dirty(*pte), 1); 1694 1695 } while (pte++, addr += PAGE_SIZE, addr != end); 1696 pte_unmap_unlock(orig_pte, ptl); 1697 cond_resched(); 1698 return 0; 1699 } 1700 #ifdef CONFIG_HUGETLB_PAGE 1701 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 1702 unsigned long addr, unsigned long end, struct mm_walk *walk) 1703 { 1704 pte_t huge_pte = huge_ptep_get(pte); 1705 struct numa_maps *md; 1706 struct page *page; 1707 1708 if (!pte_present(huge_pte)) 1709 return 0; 1710 1711 page = pte_page(huge_pte); 1712 if (!page) 1713 return 0; 1714 1715 md = walk->private; 1716 gather_stats(page, md, pte_dirty(huge_pte), 1); 1717 return 0; 1718 } 1719 1720 #else 1721 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 1722 unsigned long addr, unsigned long end, struct mm_walk *walk) 1723 { 1724 return 0; 1725 } 1726 #endif 1727 1728 /* 1729 * Display pages allocated per node and memory policy via /proc. 1730 */ 1731 static int show_numa_map(struct seq_file *m, void *v, int is_pid) 1732 { 1733 struct numa_maps_private *numa_priv = m->private; 1734 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; 1735 struct vm_area_struct *vma = v; 1736 struct numa_maps *md = &numa_priv->md; 1737 struct file *file = vma->vm_file; 1738 struct mm_struct *mm = vma->vm_mm; 1739 struct mm_walk walk = { 1740 .hugetlb_entry = gather_hugetlb_stats, 1741 .pmd_entry = gather_pte_stats, 1742 .private = md, 1743 .mm = mm, 1744 }; 1745 struct mempolicy *pol; 1746 char buffer[64]; 1747 int nid; 1748 1749 if (!mm) 1750 return 0; 1751 1752 /* Ensure we start with an empty set of numa_maps statistics. */ 1753 memset(md, 0, sizeof(*md)); 1754 1755 pol = __get_vma_policy(vma, vma->vm_start); 1756 if (pol) { 1757 mpol_to_str(buffer, sizeof(buffer), pol); 1758 mpol_cond_put(pol); 1759 } else { 1760 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); 1761 } 1762 1763 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1764 1765 if (file) { 1766 seq_puts(m, " file="); 1767 seq_file_path(m, file, "\n\t= "); 1768 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1769 seq_puts(m, " heap"); 1770 } else if (is_stack(vma)) { 1771 seq_puts(m, " stack"); 1772 } 1773 1774 if (is_vm_hugetlb_page(vma)) 1775 seq_puts(m, " huge"); 1776 1777 /* mmap_sem is held by m_start */ 1778 walk_page_vma(vma, &walk); 1779 1780 if (!md->pages) 1781 goto out; 1782 1783 if (md->anon) 1784 seq_printf(m, " anon=%lu", md->anon); 1785 1786 if (md->dirty) 1787 seq_printf(m, " dirty=%lu", md->dirty); 1788 1789 if (md->pages != md->anon && md->pages != md->dirty) 1790 seq_printf(m, " mapped=%lu", md->pages); 1791 1792 if (md->mapcount_max > 1) 1793 seq_printf(m, " mapmax=%lu", md->mapcount_max); 1794 1795 if (md->swapcache) 1796 seq_printf(m, " swapcache=%lu", md->swapcache); 1797 1798 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 1799 seq_printf(m, " active=%lu", md->active); 1800 1801 if (md->writeback) 1802 seq_printf(m, " writeback=%lu", md->writeback); 1803 1804 for_each_node_state(nid, N_MEMORY) 1805 if (md->node[nid]) 1806 seq_printf(m, " N%d=%lu", nid, md->node[nid]); 1807 1808 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); 1809 out: 1810 seq_putc(m, '\n'); 1811 m_cache_vma(m, vma); 1812 return 0; 1813 } 1814 1815 static int show_pid_numa_map(struct seq_file *m, void *v) 1816 { 1817 return show_numa_map(m, v, 1); 1818 } 1819 1820 static int show_tid_numa_map(struct seq_file *m, void *v) 1821 { 1822 return show_numa_map(m, v, 0); 1823 } 1824 1825 static const struct seq_operations proc_pid_numa_maps_op = { 1826 .start = m_start, 1827 .next = m_next, 1828 .stop = m_stop, 1829 .show = show_pid_numa_map, 1830 }; 1831 1832 static const struct seq_operations proc_tid_numa_maps_op = { 1833 .start = m_start, 1834 .next = m_next, 1835 .stop = m_stop, 1836 .show = show_tid_numa_map, 1837 }; 1838 1839 static int numa_maps_open(struct inode *inode, struct file *file, 1840 const struct seq_operations *ops) 1841 { 1842 return proc_maps_open(inode, file, ops, 1843 sizeof(struct numa_maps_private)); 1844 } 1845 1846 static int pid_numa_maps_open(struct inode *inode, struct file *file) 1847 { 1848 return numa_maps_open(inode, file, &proc_pid_numa_maps_op); 1849 } 1850 1851 static int tid_numa_maps_open(struct inode *inode, struct file *file) 1852 { 1853 return numa_maps_open(inode, file, &proc_tid_numa_maps_op); 1854 } 1855 1856 const struct file_operations proc_pid_numa_maps_operations = { 1857 .open = pid_numa_maps_open, 1858 .read = seq_read, 1859 .llseek = seq_lseek, 1860 .release = proc_map_release, 1861 }; 1862 1863 const struct file_operations proc_tid_numa_maps_operations = { 1864 .open = tid_numa_maps_open, 1865 .read = seq_read, 1866 .llseek = seq_lseek, 1867 .release = proc_map_release, 1868 }; 1869 #endif /* CONFIG_NUMA */ 1870