1 #include <linux/mm.h> 2 #include <linux/vmacache.h> 3 #include <linux/hugetlb.h> 4 #include <linux/huge_mm.h> 5 #include <linux/mount.h> 6 #include <linux/seq_file.h> 7 #include <linux/highmem.h> 8 #include <linux/ptrace.h> 9 #include <linux/slab.h> 10 #include <linux/pagemap.h> 11 #include <linux/mempolicy.h> 12 #include <linux/rmap.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/mmu_notifier.h> 16 #include <linux/page_idle.h> 17 #include <linux/shmem_fs.h> 18 19 #include <asm/elf.h> 20 #include <asm/uaccess.h> 21 #include <asm/tlbflush.h> 22 #include "internal.h" 23 24 void task_mem(struct seq_file *m, struct mm_struct *mm) 25 { 26 unsigned long text, lib, swap, ptes, pmds, anon, file, shmem; 27 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 28 29 anon = get_mm_counter(mm, MM_ANONPAGES); 30 file = get_mm_counter(mm, MM_FILEPAGES); 31 shmem = get_mm_counter(mm, MM_SHMEMPAGES); 32 33 /* 34 * Note: to minimize their overhead, mm maintains hiwater_vm and 35 * hiwater_rss only when about to *lower* total_vm or rss. Any 36 * collector of these hiwater stats must therefore get total_vm 37 * and rss too, which will usually be the higher. Barriers? not 38 * worth the effort, such snapshots can always be inconsistent. 39 */ 40 hiwater_vm = total_vm = mm->total_vm; 41 if (hiwater_vm < mm->hiwater_vm) 42 hiwater_vm = mm->hiwater_vm; 43 hiwater_rss = total_rss = anon + file + shmem; 44 if (hiwater_rss < mm->hiwater_rss) 45 hiwater_rss = mm->hiwater_rss; 46 47 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 48 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 49 swap = get_mm_counter(mm, MM_SWAPENTS); 50 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); 51 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm); 52 seq_printf(m, 53 "VmPeak:\t%8lu kB\n" 54 "VmSize:\t%8lu kB\n" 55 "VmLck:\t%8lu kB\n" 56 "VmPin:\t%8lu kB\n" 57 "VmHWM:\t%8lu kB\n" 58 "VmRSS:\t%8lu kB\n" 59 "RssAnon:\t%8lu kB\n" 60 "RssFile:\t%8lu kB\n" 61 "RssShmem:\t%8lu kB\n" 62 "VmData:\t%8lu kB\n" 63 "VmStk:\t%8lu kB\n" 64 "VmExe:\t%8lu kB\n" 65 "VmLib:\t%8lu kB\n" 66 "VmPTE:\t%8lu kB\n" 67 "VmPMD:\t%8lu kB\n" 68 "VmSwap:\t%8lu kB\n", 69 hiwater_vm << (PAGE_SHIFT-10), 70 total_vm << (PAGE_SHIFT-10), 71 mm->locked_vm << (PAGE_SHIFT-10), 72 mm->pinned_vm << (PAGE_SHIFT-10), 73 hiwater_rss << (PAGE_SHIFT-10), 74 total_rss << (PAGE_SHIFT-10), 75 anon << (PAGE_SHIFT-10), 76 file << (PAGE_SHIFT-10), 77 shmem << (PAGE_SHIFT-10), 78 mm->data_vm << (PAGE_SHIFT-10), 79 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 80 ptes >> 10, 81 pmds >> 10, 82 swap << (PAGE_SHIFT-10)); 83 hugetlb_report_usage(m, mm); 84 } 85 86 unsigned long task_vsize(struct mm_struct *mm) 87 { 88 return PAGE_SIZE * mm->total_vm; 89 } 90 91 unsigned long task_statm(struct mm_struct *mm, 92 unsigned long *shared, unsigned long *text, 93 unsigned long *data, unsigned long *resident) 94 { 95 *shared = get_mm_counter(mm, MM_FILEPAGES) + 96 get_mm_counter(mm, MM_SHMEMPAGES); 97 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 98 >> PAGE_SHIFT; 99 *data = mm->data_vm + mm->stack_vm; 100 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 101 return mm->total_vm; 102 } 103 104 #ifdef CONFIG_NUMA 105 /* 106 * Save get_task_policy() for show_numa_map(). 107 */ 108 static void hold_task_mempolicy(struct proc_maps_private *priv) 109 { 110 struct task_struct *task = priv->task; 111 112 task_lock(task); 113 priv->task_mempolicy = get_task_policy(task); 114 mpol_get(priv->task_mempolicy); 115 task_unlock(task); 116 } 117 static void release_task_mempolicy(struct proc_maps_private *priv) 118 { 119 mpol_put(priv->task_mempolicy); 120 } 121 #else 122 static void hold_task_mempolicy(struct proc_maps_private *priv) 123 { 124 } 125 static void release_task_mempolicy(struct proc_maps_private *priv) 126 { 127 } 128 #endif 129 130 static void vma_stop(struct proc_maps_private *priv) 131 { 132 struct mm_struct *mm = priv->mm; 133 134 release_task_mempolicy(priv); 135 up_read(&mm->mmap_sem); 136 mmput(mm); 137 } 138 139 static struct vm_area_struct * 140 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) 141 { 142 if (vma == priv->tail_vma) 143 return NULL; 144 return vma->vm_next ?: priv->tail_vma; 145 } 146 147 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) 148 { 149 if (m->count < m->size) /* vma is copied successfully */ 150 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL; 151 } 152 153 static void *m_start(struct seq_file *m, loff_t *ppos) 154 { 155 struct proc_maps_private *priv = m->private; 156 unsigned long last_addr = m->version; 157 struct mm_struct *mm; 158 struct vm_area_struct *vma; 159 unsigned int pos = *ppos; 160 161 /* See m_cache_vma(). Zero at the start or after lseek. */ 162 if (last_addr == -1UL) 163 return NULL; 164 165 priv->task = get_proc_task(priv->inode); 166 if (!priv->task) 167 return ERR_PTR(-ESRCH); 168 169 mm = priv->mm; 170 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) 171 return NULL; 172 173 down_read(&mm->mmap_sem); 174 hold_task_mempolicy(priv); 175 priv->tail_vma = get_gate_vma(mm); 176 177 if (last_addr) { 178 vma = find_vma(mm, last_addr); 179 if (vma && (vma = m_next_vma(priv, vma))) 180 return vma; 181 } 182 183 m->version = 0; 184 if (pos < mm->map_count) { 185 for (vma = mm->mmap; pos; pos--) { 186 m->version = vma->vm_start; 187 vma = vma->vm_next; 188 } 189 return vma; 190 } 191 192 /* we do not bother to update m->version in this case */ 193 if (pos == mm->map_count && priv->tail_vma) 194 return priv->tail_vma; 195 196 vma_stop(priv); 197 return NULL; 198 } 199 200 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 201 { 202 struct proc_maps_private *priv = m->private; 203 struct vm_area_struct *next; 204 205 (*pos)++; 206 next = m_next_vma(priv, v); 207 if (!next) 208 vma_stop(priv); 209 return next; 210 } 211 212 static void m_stop(struct seq_file *m, void *v) 213 { 214 struct proc_maps_private *priv = m->private; 215 216 if (!IS_ERR_OR_NULL(v)) 217 vma_stop(priv); 218 if (priv->task) { 219 put_task_struct(priv->task); 220 priv->task = NULL; 221 } 222 } 223 224 static int proc_maps_open(struct inode *inode, struct file *file, 225 const struct seq_operations *ops, int psize) 226 { 227 struct proc_maps_private *priv = __seq_open_private(file, ops, psize); 228 229 if (!priv) 230 return -ENOMEM; 231 232 priv->inode = inode; 233 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 234 if (IS_ERR(priv->mm)) { 235 int err = PTR_ERR(priv->mm); 236 237 seq_release_private(inode, file); 238 return err; 239 } 240 241 return 0; 242 } 243 244 static int proc_map_release(struct inode *inode, struct file *file) 245 { 246 struct seq_file *seq = file->private_data; 247 struct proc_maps_private *priv = seq->private; 248 249 if (priv->mm) 250 mmdrop(priv->mm); 251 252 return seq_release_private(inode, file); 253 } 254 255 static int do_maps_open(struct inode *inode, struct file *file, 256 const struct seq_operations *ops) 257 { 258 return proc_maps_open(inode, file, ops, 259 sizeof(struct proc_maps_private)); 260 } 261 262 /* 263 * Indicate if the VMA is a stack for the given task; for 264 * /proc/PID/maps that is the stack of the main task. 265 */ 266 static int is_stack(struct proc_maps_private *priv, 267 struct vm_area_struct *vma, int is_pid) 268 { 269 int stack = 0; 270 271 if (is_pid) { 272 stack = vma->vm_start <= vma->vm_mm->start_stack && 273 vma->vm_end >= vma->vm_mm->start_stack; 274 } else { 275 struct inode *inode = priv->inode; 276 struct task_struct *task; 277 278 rcu_read_lock(); 279 task = pid_task(proc_pid(inode), PIDTYPE_PID); 280 if (task) 281 stack = vma_is_stack_for_task(vma, task); 282 rcu_read_unlock(); 283 } 284 return stack; 285 } 286 287 static void 288 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) 289 { 290 struct mm_struct *mm = vma->vm_mm; 291 struct file *file = vma->vm_file; 292 struct proc_maps_private *priv = m->private; 293 vm_flags_t flags = vma->vm_flags; 294 unsigned long ino = 0; 295 unsigned long long pgoff = 0; 296 unsigned long start, end; 297 dev_t dev = 0; 298 const char *name = NULL; 299 300 if (file) { 301 struct inode *inode = file_inode(vma->vm_file); 302 dev = inode->i_sb->s_dev; 303 ino = inode->i_ino; 304 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 305 } 306 307 /* We don't show the stack guard page in /proc/maps */ 308 start = vma->vm_start; 309 if (stack_guard_page_start(vma, start)) 310 start += PAGE_SIZE; 311 end = vma->vm_end; 312 if (stack_guard_page_end(vma, end)) 313 end -= PAGE_SIZE; 314 315 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 316 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", 317 start, 318 end, 319 flags & VM_READ ? 'r' : '-', 320 flags & VM_WRITE ? 'w' : '-', 321 flags & VM_EXEC ? 'x' : '-', 322 flags & VM_MAYSHARE ? 's' : 'p', 323 pgoff, 324 MAJOR(dev), MINOR(dev), ino); 325 326 /* 327 * Print the dentry name for named mappings, and a 328 * special [heap] marker for the heap: 329 */ 330 if (file) { 331 seq_pad(m, ' '); 332 seq_file_path(m, file, "\n"); 333 goto done; 334 } 335 336 if (vma->vm_ops && vma->vm_ops->name) { 337 name = vma->vm_ops->name(vma); 338 if (name) 339 goto done; 340 } 341 342 name = arch_vma_name(vma); 343 if (!name) { 344 if (!mm) { 345 name = "[vdso]"; 346 goto done; 347 } 348 349 if (vma->vm_start <= mm->brk && 350 vma->vm_end >= mm->start_brk) { 351 name = "[heap]"; 352 goto done; 353 } 354 355 if (is_stack(priv, vma, is_pid)) 356 name = "[stack]"; 357 } 358 359 done: 360 if (name) { 361 seq_pad(m, ' '); 362 seq_puts(m, name); 363 } 364 seq_putc(m, '\n'); 365 } 366 367 static int show_map(struct seq_file *m, void *v, int is_pid) 368 { 369 show_map_vma(m, v, is_pid); 370 m_cache_vma(m, v); 371 return 0; 372 } 373 374 static int show_pid_map(struct seq_file *m, void *v) 375 { 376 return show_map(m, v, 1); 377 } 378 379 static int show_tid_map(struct seq_file *m, void *v) 380 { 381 return show_map(m, v, 0); 382 } 383 384 static const struct seq_operations proc_pid_maps_op = { 385 .start = m_start, 386 .next = m_next, 387 .stop = m_stop, 388 .show = show_pid_map 389 }; 390 391 static const struct seq_operations proc_tid_maps_op = { 392 .start = m_start, 393 .next = m_next, 394 .stop = m_stop, 395 .show = show_tid_map 396 }; 397 398 static int pid_maps_open(struct inode *inode, struct file *file) 399 { 400 return do_maps_open(inode, file, &proc_pid_maps_op); 401 } 402 403 static int tid_maps_open(struct inode *inode, struct file *file) 404 { 405 return do_maps_open(inode, file, &proc_tid_maps_op); 406 } 407 408 const struct file_operations proc_pid_maps_operations = { 409 .open = pid_maps_open, 410 .read = seq_read, 411 .llseek = seq_lseek, 412 .release = proc_map_release, 413 }; 414 415 const struct file_operations proc_tid_maps_operations = { 416 .open = tid_maps_open, 417 .read = seq_read, 418 .llseek = seq_lseek, 419 .release = proc_map_release, 420 }; 421 422 /* 423 * Proportional Set Size(PSS): my share of RSS. 424 * 425 * PSS of a process is the count of pages it has in memory, where each 426 * page is divided by the number of processes sharing it. So if a 427 * process has 1000 pages all to itself, and 1000 shared with one other 428 * process, its PSS will be 1500. 429 * 430 * To keep (accumulated) division errors low, we adopt a 64bit 431 * fixed-point pss counter to minimize division errors. So (pss >> 432 * PSS_SHIFT) would be the real byte count. 433 * 434 * A shift of 12 before division means (assuming 4K page size): 435 * - 1M 3-user-pages add up to 8KB errors; 436 * - supports mapcount up to 2^24, or 16M; 437 * - supports PSS up to 2^52 bytes, or 4PB. 438 */ 439 #define PSS_SHIFT 12 440 441 #ifdef CONFIG_PROC_PAGE_MONITOR 442 struct mem_size_stats { 443 unsigned long resident; 444 unsigned long shared_clean; 445 unsigned long shared_dirty; 446 unsigned long private_clean; 447 unsigned long private_dirty; 448 unsigned long referenced; 449 unsigned long anonymous; 450 unsigned long anonymous_thp; 451 unsigned long shmem_thp; 452 unsigned long swap; 453 unsigned long shared_hugetlb; 454 unsigned long private_hugetlb; 455 u64 pss; 456 u64 swap_pss; 457 bool check_shmem_swap; 458 }; 459 460 static void smaps_account(struct mem_size_stats *mss, struct page *page, 461 bool compound, bool young, bool dirty) 462 { 463 int i, nr = compound ? 1 << compound_order(page) : 1; 464 unsigned long size = nr * PAGE_SIZE; 465 466 if (PageAnon(page)) 467 mss->anonymous += size; 468 469 mss->resident += size; 470 /* Accumulate the size in pages that have been accessed. */ 471 if (young || page_is_young(page) || PageReferenced(page)) 472 mss->referenced += size; 473 474 /* 475 * page_count(page) == 1 guarantees the page is mapped exactly once. 476 * If any subpage of the compound page mapped with PTE it would elevate 477 * page_count(). 478 */ 479 if (page_count(page) == 1) { 480 if (dirty || PageDirty(page)) 481 mss->private_dirty += size; 482 else 483 mss->private_clean += size; 484 mss->pss += (u64)size << PSS_SHIFT; 485 return; 486 } 487 488 for (i = 0; i < nr; i++, page++) { 489 int mapcount = page_mapcount(page); 490 491 if (mapcount >= 2) { 492 if (dirty || PageDirty(page)) 493 mss->shared_dirty += PAGE_SIZE; 494 else 495 mss->shared_clean += PAGE_SIZE; 496 mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; 497 } else { 498 if (dirty || PageDirty(page)) 499 mss->private_dirty += PAGE_SIZE; 500 else 501 mss->private_clean += PAGE_SIZE; 502 mss->pss += PAGE_SIZE << PSS_SHIFT; 503 } 504 } 505 } 506 507 #ifdef CONFIG_SHMEM 508 static int smaps_pte_hole(unsigned long addr, unsigned long end, 509 struct mm_walk *walk) 510 { 511 struct mem_size_stats *mss = walk->private; 512 513 mss->swap += shmem_partial_swap_usage( 514 walk->vma->vm_file->f_mapping, addr, end); 515 516 return 0; 517 } 518 #endif 519 520 static void smaps_pte_entry(pte_t *pte, unsigned long addr, 521 struct mm_walk *walk) 522 { 523 struct mem_size_stats *mss = walk->private; 524 struct vm_area_struct *vma = walk->vma; 525 struct page *page = NULL; 526 527 if (pte_present(*pte)) { 528 page = vm_normal_page(vma, addr, *pte); 529 } else if (is_swap_pte(*pte)) { 530 swp_entry_t swpent = pte_to_swp_entry(*pte); 531 532 if (!non_swap_entry(swpent)) { 533 int mapcount; 534 535 mss->swap += PAGE_SIZE; 536 mapcount = swp_swapcount(swpent); 537 if (mapcount >= 2) { 538 u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT; 539 540 do_div(pss_delta, mapcount); 541 mss->swap_pss += pss_delta; 542 } else { 543 mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT; 544 } 545 } else if (is_migration_entry(swpent)) 546 page = migration_entry_to_page(swpent); 547 } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap 548 && pte_none(*pte))) { 549 page = find_get_entry(vma->vm_file->f_mapping, 550 linear_page_index(vma, addr)); 551 if (!page) 552 return; 553 554 if (radix_tree_exceptional_entry(page)) 555 mss->swap += PAGE_SIZE; 556 else 557 put_page(page); 558 559 return; 560 } 561 562 if (!page) 563 return; 564 565 smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); 566 } 567 568 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 569 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 570 struct mm_walk *walk) 571 { 572 struct mem_size_stats *mss = walk->private; 573 struct vm_area_struct *vma = walk->vma; 574 struct page *page; 575 576 /* FOLL_DUMP will return -EFAULT on huge zero page */ 577 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); 578 if (IS_ERR_OR_NULL(page)) 579 return; 580 if (PageAnon(page)) 581 mss->anonymous_thp += HPAGE_PMD_SIZE; 582 else if (PageSwapBacked(page)) 583 mss->shmem_thp += HPAGE_PMD_SIZE; 584 else 585 VM_BUG_ON_PAGE(1, page); 586 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); 587 } 588 #else 589 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 590 struct mm_walk *walk) 591 { 592 } 593 #endif 594 595 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 596 struct mm_walk *walk) 597 { 598 struct vm_area_struct *vma = walk->vma; 599 pte_t *pte; 600 spinlock_t *ptl; 601 602 ptl = pmd_trans_huge_lock(pmd, vma); 603 if (ptl) { 604 smaps_pmd_entry(pmd, addr, walk); 605 spin_unlock(ptl); 606 return 0; 607 } 608 609 if (pmd_trans_unstable(pmd)) 610 return 0; 611 /* 612 * The mmap_sem held all the way back in m_start() is what 613 * keeps khugepaged out of here and from collapsing things 614 * in here. 615 */ 616 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 617 for (; addr != end; pte++, addr += PAGE_SIZE) 618 smaps_pte_entry(pte, addr, walk); 619 pte_unmap_unlock(pte - 1, ptl); 620 cond_resched(); 621 return 0; 622 } 623 624 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) 625 { 626 /* 627 * Don't forget to update Documentation/ on changes. 628 */ 629 static const char mnemonics[BITS_PER_LONG][2] = { 630 /* 631 * In case if we meet a flag we don't know about. 632 */ 633 [0 ... (BITS_PER_LONG-1)] = "??", 634 635 [ilog2(VM_READ)] = "rd", 636 [ilog2(VM_WRITE)] = "wr", 637 [ilog2(VM_EXEC)] = "ex", 638 [ilog2(VM_SHARED)] = "sh", 639 [ilog2(VM_MAYREAD)] = "mr", 640 [ilog2(VM_MAYWRITE)] = "mw", 641 [ilog2(VM_MAYEXEC)] = "me", 642 [ilog2(VM_MAYSHARE)] = "ms", 643 [ilog2(VM_GROWSDOWN)] = "gd", 644 [ilog2(VM_PFNMAP)] = "pf", 645 [ilog2(VM_DENYWRITE)] = "dw", 646 #ifdef CONFIG_X86_INTEL_MPX 647 [ilog2(VM_MPX)] = "mp", 648 #endif 649 [ilog2(VM_LOCKED)] = "lo", 650 [ilog2(VM_IO)] = "io", 651 [ilog2(VM_SEQ_READ)] = "sr", 652 [ilog2(VM_RAND_READ)] = "rr", 653 [ilog2(VM_DONTCOPY)] = "dc", 654 [ilog2(VM_DONTEXPAND)] = "de", 655 [ilog2(VM_ACCOUNT)] = "ac", 656 [ilog2(VM_NORESERVE)] = "nr", 657 [ilog2(VM_HUGETLB)] = "ht", 658 [ilog2(VM_ARCH_1)] = "ar", 659 [ilog2(VM_DONTDUMP)] = "dd", 660 #ifdef CONFIG_MEM_SOFT_DIRTY 661 [ilog2(VM_SOFTDIRTY)] = "sd", 662 #endif 663 [ilog2(VM_MIXEDMAP)] = "mm", 664 [ilog2(VM_HUGEPAGE)] = "hg", 665 [ilog2(VM_NOHUGEPAGE)] = "nh", 666 [ilog2(VM_MERGEABLE)] = "mg", 667 [ilog2(VM_UFFD_MISSING)]= "um", 668 [ilog2(VM_UFFD_WP)] = "uw", 669 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS 670 /* These come out via ProtectionKey: */ 671 [ilog2(VM_PKEY_BIT0)] = "", 672 [ilog2(VM_PKEY_BIT1)] = "", 673 [ilog2(VM_PKEY_BIT2)] = "", 674 [ilog2(VM_PKEY_BIT3)] = "", 675 #endif 676 }; 677 size_t i; 678 679 seq_puts(m, "VmFlags: "); 680 for (i = 0; i < BITS_PER_LONG; i++) { 681 if (!mnemonics[i][0]) 682 continue; 683 if (vma->vm_flags & (1UL << i)) { 684 seq_printf(m, "%c%c ", 685 mnemonics[i][0], mnemonics[i][1]); 686 } 687 } 688 seq_putc(m, '\n'); 689 } 690 691 #ifdef CONFIG_HUGETLB_PAGE 692 static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, 693 unsigned long addr, unsigned long end, 694 struct mm_walk *walk) 695 { 696 struct mem_size_stats *mss = walk->private; 697 struct vm_area_struct *vma = walk->vma; 698 struct page *page = NULL; 699 700 if (pte_present(*pte)) { 701 page = vm_normal_page(vma, addr, *pte); 702 } else if (is_swap_pte(*pte)) { 703 swp_entry_t swpent = pte_to_swp_entry(*pte); 704 705 if (is_migration_entry(swpent)) 706 page = migration_entry_to_page(swpent); 707 } 708 if (page) { 709 int mapcount = page_mapcount(page); 710 711 if (mapcount >= 2) 712 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); 713 else 714 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); 715 } 716 return 0; 717 } 718 #endif /* HUGETLB_PAGE */ 719 720 void __weak arch_show_smap(struct seq_file *m, struct vm_area_struct *vma) 721 { 722 } 723 724 static int show_smap(struct seq_file *m, void *v, int is_pid) 725 { 726 struct vm_area_struct *vma = v; 727 struct mem_size_stats mss; 728 struct mm_walk smaps_walk = { 729 .pmd_entry = smaps_pte_range, 730 #ifdef CONFIG_HUGETLB_PAGE 731 .hugetlb_entry = smaps_hugetlb_range, 732 #endif 733 .mm = vma->vm_mm, 734 .private = &mss, 735 }; 736 737 memset(&mss, 0, sizeof mss); 738 739 #ifdef CONFIG_SHMEM 740 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { 741 /* 742 * For shared or readonly shmem mappings we know that all 743 * swapped out pages belong to the shmem object, and we can 744 * obtain the swap value much more efficiently. For private 745 * writable mappings, we might have COW pages that are 746 * not affected by the parent swapped out pages of the shmem 747 * object, so we have to distinguish them during the page walk. 748 * Unless we know that the shmem object (or the part mapped by 749 * our VMA) has no swapped out pages at all. 750 */ 751 unsigned long shmem_swapped = shmem_swap_usage(vma); 752 753 if (!shmem_swapped || (vma->vm_flags & VM_SHARED) || 754 !(vma->vm_flags & VM_WRITE)) { 755 mss.swap = shmem_swapped; 756 } else { 757 mss.check_shmem_swap = true; 758 smaps_walk.pte_hole = smaps_pte_hole; 759 } 760 } 761 #endif 762 763 /* mmap_sem is held in m_start */ 764 walk_page_vma(vma, &smaps_walk); 765 766 show_map_vma(m, vma, is_pid); 767 768 seq_printf(m, 769 "Size: %8lu kB\n" 770 "Rss: %8lu kB\n" 771 "Pss: %8lu kB\n" 772 "Shared_Clean: %8lu kB\n" 773 "Shared_Dirty: %8lu kB\n" 774 "Private_Clean: %8lu kB\n" 775 "Private_Dirty: %8lu kB\n" 776 "Referenced: %8lu kB\n" 777 "Anonymous: %8lu kB\n" 778 "AnonHugePages: %8lu kB\n" 779 "ShmemPmdMapped: %8lu kB\n" 780 "Shared_Hugetlb: %8lu kB\n" 781 "Private_Hugetlb: %7lu kB\n" 782 "Swap: %8lu kB\n" 783 "SwapPss: %8lu kB\n" 784 "KernelPageSize: %8lu kB\n" 785 "MMUPageSize: %8lu kB\n" 786 "Locked: %8lu kB\n", 787 (vma->vm_end - vma->vm_start) >> 10, 788 mss.resident >> 10, 789 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), 790 mss.shared_clean >> 10, 791 mss.shared_dirty >> 10, 792 mss.private_clean >> 10, 793 mss.private_dirty >> 10, 794 mss.referenced >> 10, 795 mss.anonymous >> 10, 796 mss.anonymous_thp >> 10, 797 mss.shmem_thp >> 10, 798 mss.shared_hugetlb >> 10, 799 mss.private_hugetlb >> 10, 800 mss.swap >> 10, 801 (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)), 802 vma_kernel_pagesize(vma) >> 10, 803 vma_mmu_pagesize(vma) >> 10, 804 (vma->vm_flags & VM_LOCKED) ? 805 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); 806 807 arch_show_smap(m, vma); 808 show_smap_vma_flags(m, vma); 809 m_cache_vma(m, vma); 810 return 0; 811 } 812 813 static int show_pid_smap(struct seq_file *m, void *v) 814 { 815 return show_smap(m, v, 1); 816 } 817 818 static int show_tid_smap(struct seq_file *m, void *v) 819 { 820 return show_smap(m, v, 0); 821 } 822 823 static const struct seq_operations proc_pid_smaps_op = { 824 .start = m_start, 825 .next = m_next, 826 .stop = m_stop, 827 .show = show_pid_smap 828 }; 829 830 static const struct seq_operations proc_tid_smaps_op = { 831 .start = m_start, 832 .next = m_next, 833 .stop = m_stop, 834 .show = show_tid_smap 835 }; 836 837 static int pid_smaps_open(struct inode *inode, struct file *file) 838 { 839 return do_maps_open(inode, file, &proc_pid_smaps_op); 840 } 841 842 static int tid_smaps_open(struct inode *inode, struct file *file) 843 { 844 return do_maps_open(inode, file, &proc_tid_smaps_op); 845 } 846 847 const struct file_operations proc_pid_smaps_operations = { 848 .open = pid_smaps_open, 849 .read = seq_read, 850 .llseek = seq_lseek, 851 .release = proc_map_release, 852 }; 853 854 const struct file_operations proc_tid_smaps_operations = { 855 .open = tid_smaps_open, 856 .read = seq_read, 857 .llseek = seq_lseek, 858 .release = proc_map_release, 859 }; 860 861 enum clear_refs_types { 862 CLEAR_REFS_ALL = 1, 863 CLEAR_REFS_ANON, 864 CLEAR_REFS_MAPPED, 865 CLEAR_REFS_SOFT_DIRTY, 866 CLEAR_REFS_MM_HIWATER_RSS, 867 CLEAR_REFS_LAST, 868 }; 869 870 struct clear_refs_private { 871 enum clear_refs_types type; 872 }; 873 874 #ifdef CONFIG_MEM_SOFT_DIRTY 875 static inline void clear_soft_dirty(struct vm_area_struct *vma, 876 unsigned long addr, pte_t *pte) 877 { 878 /* 879 * The soft-dirty tracker uses #PF-s to catch writes 880 * to pages, so write-protect the pte as well. See the 881 * Documentation/vm/soft-dirty.txt for full description 882 * of how soft-dirty works. 883 */ 884 pte_t ptent = *pte; 885 886 if (pte_present(ptent)) { 887 ptent = ptep_modify_prot_start(vma->vm_mm, addr, pte); 888 ptent = pte_wrprotect(ptent); 889 ptent = pte_clear_soft_dirty(ptent); 890 ptep_modify_prot_commit(vma->vm_mm, addr, pte, ptent); 891 } else if (is_swap_pte(ptent)) { 892 ptent = pte_swp_clear_soft_dirty(ptent); 893 set_pte_at(vma->vm_mm, addr, pte, ptent); 894 } 895 } 896 #else 897 static inline void clear_soft_dirty(struct vm_area_struct *vma, 898 unsigned long addr, pte_t *pte) 899 { 900 } 901 #endif 902 903 #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 904 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 905 unsigned long addr, pmd_t *pmdp) 906 { 907 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); 908 909 pmd = pmd_wrprotect(pmd); 910 pmd = pmd_clear_soft_dirty(pmd); 911 912 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 913 } 914 #else 915 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 916 unsigned long addr, pmd_t *pmdp) 917 { 918 } 919 #endif 920 921 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 922 unsigned long end, struct mm_walk *walk) 923 { 924 struct clear_refs_private *cp = walk->private; 925 struct vm_area_struct *vma = walk->vma; 926 pte_t *pte, ptent; 927 spinlock_t *ptl; 928 struct page *page; 929 930 ptl = pmd_trans_huge_lock(pmd, vma); 931 if (ptl) { 932 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 933 clear_soft_dirty_pmd(vma, addr, pmd); 934 goto out; 935 } 936 937 page = pmd_page(*pmd); 938 939 /* Clear accessed and referenced bits. */ 940 pmdp_test_and_clear_young(vma, addr, pmd); 941 test_and_clear_page_young(page); 942 ClearPageReferenced(page); 943 out: 944 spin_unlock(ptl); 945 return 0; 946 } 947 948 if (pmd_trans_unstable(pmd)) 949 return 0; 950 951 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 952 for (; addr != end; pte++, addr += PAGE_SIZE) { 953 ptent = *pte; 954 955 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 956 clear_soft_dirty(vma, addr, pte); 957 continue; 958 } 959 960 if (!pte_present(ptent)) 961 continue; 962 963 page = vm_normal_page(vma, addr, ptent); 964 if (!page) 965 continue; 966 967 /* Clear accessed and referenced bits. */ 968 ptep_test_and_clear_young(vma, addr, pte); 969 test_and_clear_page_young(page); 970 ClearPageReferenced(page); 971 } 972 pte_unmap_unlock(pte - 1, ptl); 973 cond_resched(); 974 return 0; 975 } 976 977 static int clear_refs_test_walk(unsigned long start, unsigned long end, 978 struct mm_walk *walk) 979 { 980 struct clear_refs_private *cp = walk->private; 981 struct vm_area_struct *vma = walk->vma; 982 983 if (vma->vm_flags & VM_PFNMAP) 984 return 1; 985 986 /* 987 * Writing 1 to /proc/pid/clear_refs affects all pages. 988 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. 989 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. 990 * Writing 4 to /proc/pid/clear_refs affects all pages. 991 */ 992 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) 993 return 1; 994 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) 995 return 1; 996 return 0; 997 } 998 999 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 1000 size_t count, loff_t *ppos) 1001 { 1002 struct task_struct *task; 1003 char buffer[PROC_NUMBUF]; 1004 struct mm_struct *mm; 1005 struct vm_area_struct *vma; 1006 enum clear_refs_types type; 1007 int itype; 1008 int rv; 1009 1010 memset(buffer, 0, sizeof(buffer)); 1011 if (count > sizeof(buffer) - 1) 1012 count = sizeof(buffer) - 1; 1013 if (copy_from_user(buffer, buf, count)) 1014 return -EFAULT; 1015 rv = kstrtoint(strstrip(buffer), 10, &itype); 1016 if (rv < 0) 1017 return rv; 1018 type = (enum clear_refs_types)itype; 1019 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) 1020 return -EINVAL; 1021 1022 task = get_proc_task(file_inode(file)); 1023 if (!task) 1024 return -ESRCH; 1025 mm = get_task_mm(task); 1026 if (mm) { 1027 struct clear_refs_private cp = { 1028 .type = type, 1029 }; 1030 struct mm_walk clear_refs_walk = { 1031 .pmd_entry = clear_refs_pte_range, 1032 .test_walk = clear_refs_test_walk, 1033 .mm = mm, 1034 .private = &cp, 1035 }; 1036 1037 if (type == CLEAR_REFS_MM_HIWATER_RSS) { 1038 if (down_write_killable(&mm->mmap_sem)) { 1039 count = -EINTR; 1040 goto out_mm; 1041 } 1042 1043 /* 1044 * Writing 5 to /proc/pid/clear_refs resets the peak 1045 * resident set size to this mm's current rss value. 1046 */ 1047 reset_mm_hiwater_rss(mm); 1048 up_write(&mm->mmap_sem); 1049 goto out_mm; 1050 } 1051 1052 down_read(&mm->mmap_sem); 1053 if (type == CLEAR_REFS_SOFT_DIRTY) { 1054 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1055 if (!(vma->vm_flags & VM_SOFTDIRTY)) 1056 continue; 1057 up_read(&mm->mmap_sem); 1058 if (down_write_killable(&mm->mmap_sem)) { 1059 count = -EINTR; 1060 goto out_mm; 1061 } 1062 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1063 vma->vm_flags &= ~VM_SOFTDIRTY; 1064 vma_set_page_prot(vma); 1065 } 1066 downgrade_write(&mm->mmap_sem); 1067 break; 1068 } 1069 mmu_notifier_invalidate_range_start(mm, 0, -1); 1070 } 1071 walk_page_range(0, ~0UL, &clear_refs_walk); 1072 if (type == CLEAR_REFS_SOFT_DIRTY) 1073 mmu_notifier_invalidate_range_end(mm, 0, -1); 1074 flush_tlb_mm(mm); 1075 up_read(&mm->mmap_sem); 1076 out_mm: 1077 mmput(mm); 1078 } 1079 put_task_struct(task); 1080 1081 return count; 1082 } 1083 1084 const struct file_operations proc_clear_refs_operations = { 1085 .write = clear_refs_write, 1086 .llseek = noop_llseek, 1087 }; 1088 1089 typedef struct { 1090 u64 pme; 1091 } pagemap_entry_t; 1092 1093 struct pagemapread { 1094 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ 1095 pagemap_entry_t *buffer; 1096 bool show_pfn; 1097 }; 1098 1099 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 1100 #define PAGEMAP_WALK_MASK (PMD_MASK) 1101 1102 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) 1103 #define PM_PFRAME_BITS 55 1104 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) 1105 #define PM_SOFT_DIRTY BIT_ULL(55) 1106 #define PM_MMAP_EXCLUSIVE BIT_ULL(56) 1107 #define PM_FILE BIT_ULL(61) 1108 #define PM_SWAP BIT_ULL(62) 1109 #define PM_PRESENT BIT_ULL(63) 1110 1111 #define PM_END_OF_BUFFER 1 1112 1113 static inline pagemap_entry_t make_pme(u64 frame, u64 flags) 1114 { 1115 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; 1116 } 1117 1118 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, 1119 struct pagemapread *pm) 1120 { 1121 pm->buffer[pm->pos++] = *pme; 1122 if (pm->pos >= pm->len) 1123 return PM_END_OF_BUFFER; 1124 return 0; 1125 } 1126 1127 static int pagemap_pte_hole(unsigned long start, unsigned long end, 1128 struct mm_walk *walk) 1129 { 1130 struct pagemapread *pm = walk->private; 1131 unsigned long addr = start; 1132 int err = 0; 1133 1134 while (addr < end) { 1135 struct vm_area_struct *vma = find_vma(walk->mm, addr); 1136 pagemap_entry_t pme = make_pme(0, 0); 1137 /* End of address space hole, which we mark as non-present. */ 1138 unsigned long hole_end; 1139 1140 if (vma) 1141 hole_end = min(end, vma->vm_start); 1142 else 1143 hole_end = end; 1144 1145 for (; addr < hole_end; addr += PAGE_SIZE) { 1146 err = add_to_pagemap(addr, &pme, pm); 1147 if (err) 1148 goto out; 1149 } 1150 1151 if (!vma) 1152 break; 1153 1154 /* Addresses in the VMA. */ 1155 if (vma->vm_flags & VM_SOFTDIRTY) 1156 pme = make_pme(0, PM_SOFT_DIRTY); 1157 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { 1158 err = add_to_pagemap(addr, &pme, pm); 1159 if (err) 1160 goto out; 1161 } 1162 } 1163 out: 1164 return err; 1165 } 1166 1167 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, 1168 struct vm_area_struct *vma, unsigned long addr, pte_t pte) 1169 { 1170 u64 frame = 0, flags = 0; 1171 struct page *page = NULL; 1172 1173 if (pte_present(pte)) { 1174 if (pm->show_pfn) 1175 frame = pte_pfn(pte); 1176 flags |= PM_PRESENT; 1177 page = vm_normal_page(vma, addr, pte); 1178 if (pte_soft_dirty(pte)) 1179 flags |= PM_SOFT_DIRTY; 1180 } else if (is_swap_pte(pte)) { 1181 swp_entry_t entry; 1182 if (pte_swp_soft_dirty(pte)) 1183 flags |= PM_SOFT_DIRTY; 1184 entry = pte_to_swp_entry(pte); 1185 frame = swp_type(entry) | 1186 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 1187 flags |= PM_SWAP; 1188 if (is_migration_entry(entry)) 1189 page = migration_entry_to_page(entry); 1190 } 1191 1192 if (page && !PageAnon(page)) 1193 flags |= PM_FILE; 1194 if (page && page_mapcount(page) == 1) 1195 flags |= PM_MMAP_EXCLUSIVE; 1196 if (vma->vm_flags & VM_SOFTDIRTY) 1197 flags |= PM_SOFT_DIRTY; 1198 1199 return make_pme(frame, flags); 1200 } 1201 1202 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, 1203 struct mm_walk *walk) 1204 { 1205 struct vm_area_struct *vma = walk->vma; 1206 struct pagemapread *pm = walk->private; 1207 spinlock_t *ptl; 1208 pte_t *pte, *orig_pte; 1209 int err = 0; 1210 1211 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1212 ptl = pmd_trans_huge_lock(pmdp, vma); 1213 if (ptl) { 1214 u64 flags = 0, frame = 0; 1215 pmd_t pmd = *pmdp; 1216 1217 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd)) 1218 flags |= PM_SOFT_DIRTY; 1219 1220 /* 1221 * Currently pmd for thp is always present because thp 1222 * can not be swapped-out, migrated, or HWPOISONed 1223 * (split in such cases instead.) 1224 * This if-check is just to prepare for future implementation. 1225 */ 1226 if (pmd_present(pmd)) { 1227 struct page *page = pmd_page(pmd); 1228 1229 if (page_mapcount(page) == 1) 1230 flags |= PM_MMAP_EXCLUSIVE; 1231 1232 flags |= PM_PRESENT; 1233 if (pm->show_pfn) 1234 frame = pmd_pfn(pmd) + 1235 ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1236 } 1237 1238 for (; addr != end; addr += PAGE_SIZE) { 1239 pagemap_entry_t pme = make_pme(frame, flags); 1240 1241 err = add_to_pagemap(addr, &pme, pm); 1242 if (err) 1243 break; 1244 if (pm->show_pfn && (flags & PM_PRESENT)) 1245 frame++; 1246 } 1247 spin_unlock(ptl); 1248 return err; 1249 } 1250 1251 if (pmd_trans_unstable(pmdp)) 1252 return 0; 1253 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1254 1255 /* 1256 * We can assume that @vma always points to a valid one and @end never 1257 * goes beyond vma->vm_end. 1258 */ 1259 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); 1260 for (; addr < end; pte++, addr += PAGE_SIZE) { 1261 pagemap_entry_t pme; 1262 1263 pme = pte_to_pagemap_entry(pm, vma, addr, *pte); 1264 err = add_to_pagemap(addr, &pme, pm); 1265 if (err) 1266 break; 1267 } 1268 pte_unmap_unlock(orig_pte, ptl); 1269 1270 cond_resched(); 1271 1272 return err; 1273 } 1274 1275 #ifdef CONFIG_HUGETLB_PAGE 1276 /* This function walks within one hugetlb entry in the single call */ 1277 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, 1278 unsigned long addr, unsigned long end, 1279 struct mm_walk *walk) 1280 { 1281 struct pagemapread *pm = walk->private; 1282 struct vm_area_struct *vma = walk->vma; 1283 u64 flags = 0, frame = 0; 1284 int err = 0; 1285 pte_t pte; 1286 1287 if (vma->vm_flags & VM_SOFTDIRTY) 1288 flags |= PM_SOFT_DIRTY; 1289 1290 pte = huge_ptep_get(ptep); 1291 if (pte_present(pte)) { 1292 struct page *page = pte_page(pte); 1293 1294 if (!PageAnon(page)) 1295 flags |= PM_FILE; 1296 1297 if (page_mapcount(page) == 1) 1298 flags |= PM_MMAP_EXCLUSIVE; 1299 1300 flags |= PM_PRESENT; 1301 if (pm->show_pfn) 1302 frame = pte_pfn(pte) + 1303 ((addr & ~hmask) >> PAGE_SHIFT); 1304 } 1305 1306 for (; addr != end; addr += PAGE_SIZE) { 1307 pagemap_entry_t pme = make_pme(frame, flags); 1308 1309 err = add_to_pagemap(addr, &pme, pm); 1310 if (err) 1311 return err; 1312 if (pm->show_pfn && (flags & PM_PRESENT)) 1313 frame++; 1314 } 1315 1316 cond_resched(); 1317 1318 return err; 1319 } 1320 #endif /* HUGETLB_PAGE */ 1321 1322 /* 1323 * /proc/pid/pagemap - an array mapping virtual pages to pfns 1324 * 1325 * For each page in the address space, this file contains one 64-bit entry 1326 * consisting of the following: 1327 * 1328 * Bits 0-54 page frame number (PFN) if present 1329 * Bits 0-4 swap type if swapped 1330 * Bits 5-54 swap offset if swapped 1331 * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt) 1332 * Bit 56 page exclusively mapped 1333 * Bits 57-60 zero 1334 * Bit 61 page is file-page or shared-anon 1335 * Bit 62 page swapped 1336 * Bit 63 page present 1337 * 1338 * If the page is not present but in swap, then the PFN contains an 1339 * encoding of the swap file number and the page's offset into the 1340 * swap. Unmapped pages return a null PFN. This allows determining 1341 * precisely which pages are mapped (or in swap) and comparing mapped 1342 * pages between processes. 1343 * 1344 * Efficient users of this interface will use /proc/pid/maps to 1345 * determine which areas of memory are actually mapped and llseek to 1346 * skip over unmapped regions. 1347 */ 1348 static ssize_t pagemap_read(struct file *file, char __user *buf, 1349 size_t count, loff_t *ppos) 1350 { 1351 struct mm_struct *mm = file->private_data; 1352 struct pagemapread pm; 1353 struct mm_walk pagemap_walk = {}; 1354 unsigned long src; 1355 unsigned long svpfn; 1356 unsigned long start_vaddr; 1357 unsigned long end_vaddr; 1358 int ret = 0, copied = 0; 1359 1360 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) 1361 goto out; 1362 1363 ret = -EINVAL; 1364 /* file position must be aligned */ 1365 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 1366 goto out_mm; 1367 1368 ret = 0; 1369 if (!count) 1370 goto out_mm; 1371 1372 /* do not disclose physical addresses: attack vector */ 1373 pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN); 1374 1375 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1376 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); 1377 ret = -ENOMEM; 1378 if (!pm.buffer) 1379 goto out_mm; 1380 1381 pagemap_walk.pmd_entry = pagemap_pmd_range; 1382 pagemap_walk.pte_hole = pagemap_pte_hole; 1383 #ifdef CONFIG_HUGETLB_PAGE 1384 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 1385 #endif 1386 pagemap_walk.mm = mm; 1387 pagemap_walk.private = ± 1388 1389 src = *ppos; 1390 svpfn = src / PM_ENTRY_BYTES; 1391 start_vaddr = svpfn << PAGE_SHIFT; 1392 end_vaddr = mm->task_size; 1393 1394 /* watch out for wraparound */ 1395 if (svpfn > mm->task_size >> PAGE_SHIFT) 1396 start_vaddr = end_vaddr; 1397 1398 /* 1399 * The odds are that this will stop walking way 1400 * before end_vaddr, because the length of the 1401 * user buffer is tracked in "pm", and the walk 1402 * will stop when we hit the end of the buffer. 1403 */ 1404 ret = 0; 1405 while (count && (start_vaddr < end_vaddr)) { 1406 int len; 1407 unsigned long end; 1408 1409 pm.pos = 0; 1410 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; 1411 /* overflow ? */ 1412 if (end < start_vaddr || end > end_vaddr) 1413 end = end_vaddr; 1414 down_read(&mm->mmap_sem); 1415 ret = walk_page_range(start_vaddr, end, &pagemap_walk); 1416 up_read(&mm->mmap_sem); 1417 start_vaddr = end; 1418 1419 len = min(count, PM_ENTRY_BYTES * pm.pos); 1420 if (copy_to_user(buf, pm.buffer, len)) { 1421 ret = -EFAULT; 1422 goto out_free; 1423 } 1424 copied += len; 1425 buf += len; 1426 count -= len; 1427 } 1428 *ppos += copied; 1429 if (!ret || ret == PM_END_OF_BUFFER) 1430 ret = copied; 1431 1432 out_free: 1433 kfree(pm.buffer); 1434 out_mm: 1435 mmput(mm); 1436 out: 1437 return ret; 1438 } 1439 1440 static int pagemap_open(struct inode *inode, struct file *file) 1441 { 1442 struct mm_struct *mm; 1443 1444 mm = proc_mem_open(inode, PTRACE_MODE_READ); 1445 if (IS_ERR(mm)) 1446 return PTR_ERR(mm); 1447 file->private_data = mm; 1448 return 0; 1449 } 1450 1451 static int pagemap_release(struct inode *inode, struct file *file) 1452 { 1453 struct mm_struct *mm = file->private_data; 1454 1455 if (mm) 1456 mmdrop(mm); 1457 return 0; 1458 } 1459 1460 const struct file_operations proc_pagemap_operations = { 1461 .llseek = mem_lseek, /* borrow this */ 1462 .read = pagemap_read, 1463 .open = pagemap_open, 1464 .release = pagemap_release, 1465 }; 1466 #endif /* CONFIG_PROC_PAGE_MONITOR */ 1467 1468 #ifdef CONFIG_NUMA 1469 1470 struct numa_maps { 1471 unsigned long pages; 1472 unsigned long anon; 1473 unsigned long active; 1474 unsigned long writeback; 1475 unsigned long mapcount_max; 1476 unsigned long dirty; 1477 unsigned long swapcache; 1478 unsigned long node[MAX_NUMNODES]; 1479 }; 1480 1481 struct numa_maps_private { 1482 struct proc_maps_private proc_maps; 1483 struct numa_maps md; 1484 }; 1485 1486 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, 1487 unsigned long nr_pages) 1488 { 1489 int count = page_mapcount(page); 1490 1491 md->pages += nr_pages; 1492 if (pte_dirty || PageDirty(page)) 1493 md->dirty += nr_pages; 1494 1495 if (PageSwapCache(page)) 1496 md->swapcache += nr_pages; 1497 1498 if (PageActive(page) || PageUnevictable(page)) 1499 md->active += nr_pages; 1500 1501 if (PageWriteback(page)) 1502 md->writeback += nr_pages; 1503 1504 if (PageAnon(page)) 1505 md->anon += nr_pages; 1506 1507 if (count > md->mapcount_max) 1508 md->mapcount_max = count; 1509 1510 md->node[page_to_nid(page)] += nr_pages; 1511 } 1512 1513 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, 1514 unsigned long addr) 1515 { 1516 struct page *page; 1517 int nid; 1518 1519 if (!pte_present(pte)) 1520 return NULL; 1521 1522 page = vm_normal_page(vma, addr, pte); 1523 if (!page) 1524 return NULL; 1525 1526 if (PageReserved(page)) 1527 return NULL; 1528 1529 nid = page_to_nid(page); 1530 if (!node_isset(nid, node_states[N_MEMORY])) 1531 return NULL; 1532 1533 return page; 1534 } 1535 1536 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1537 static struct page *can_gather_numa_stats_pmd(pmd_t pmd, 1538 struct vm_area_struct *vma, 1539 unsigned long addr) 1540 { 1541 struct page *page; 1542 int nid; 1543 1544 if (!pmd_present(pmd)) 1545 return NULL; 1546 1547 page = vm_normal_page_pmd(vma, addr, pmd); 1548 if (!page) 1549 return NULL; 1550 1551 if (PageReserved(page)) 1552 return NULL; 1553 1554 nid = page_to_nid(page); 1555 if (!node_isset(nid, node_states[N_MEMORY])) 1556 return NULL; 1557 1558 return page; 1559 } 1560 #endif 1561 1562 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1563 unsigned long end, struct mm_walk *walk) 1564 { 1565 struct numa_maps *md = walk->private; 1566 struct vm_area_struct *vma = walk->vma; 1567 spinlock_t *ptl; 1568 pte_t *orig_pte; 1569 pte_t *pte; 1570 1571 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1572 ptl = pmd_trans_huge_lock(pmd, vma); 1573 if (ptl) { 1574 struct page *page; 1575 1576 page = can_gather_numa_stats_pmd(*pmd, vma, addr); 1577 if (page) 1578 gather_stats(page, md, pmd_dirty(*pmd), 1579 HPAGE_PMD_SIZE/PAGE_SIZE); 1580 spin_unlock(ptl); 1581 return 0; 1582 } 1583 1584 if (pmd_trans_unstable(pmd)) 1585 return 0; 1586 #endif 1587 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1588 do { 1589 struct page *page = can_gather_numa_stats(*pte, vma, addr); 1590 if (!page) 1591 continue; 1592 gather_stats(page, md, pte_dirty(*pte), 1); 1593 1594 } while (pte++, addr += PAGE_SIZE, addr != end); 1595 pte_unmap_unlock(orig_pte, ptl); 1596 return 0; 1597 } 1598 #ifdef CONFIG_HUGETLB_PAGE 1599 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 1600 unsigned long addr, unsigned long end, struct mm_walk *walk) 1601 { 1602 pte_t huge_pte = huge_ptep_get(pte); 1603 struct numa_maps *md; 1604 struct page *page; 1605 1606 if (!pte_present(huge_pte)) 1607 return 0; 1608 1609 page = pte_page(huge_pte); 1610 if (!page) 1611 return 0; 1612 1613 md = walk->private; 1614 gather_stats(page, md, pte_dirty(huge_pte), 1); 1615 return 0; 1616 } 1617 1618 #else 1619 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 1620 unsigned long addr, unsigned long end, struct mm_walk *walk) 1621 { 1622 return 0; 1623 } 1624 #endif 1625 1626 /* 1627 * Display pages allocated per node and memory policy via /proc. 1628 */ 1629 static int show_numa_map(struct seq_file *m, void *v, int is_pid) 1630 { 1631 struct numa_maps_private *numa_priv = m->private; 1632 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; 1633 struct vm_area_struct *vma = v; 1634 struct numa_maps *md = &numa_priv->md; 1635 struct file *file = vma->vm_file; 1636 struct mm_struct *mm = vma->vm_mm; 1637 struct mm_walk walk = { 1638 .hugetlb_entry = gather_hugetlb_stats, 1639 .pmd_entry = gather_pte_stats, 1640 .private = md, 1641 .mm = mm, 1642 }; 1643 struct mempolicy *pol; 1644 char buffer[64]; 1645 int nid; 1646 1647 if (!mm) 1648 return 0; 1649 1650 /* Ensure we start with an empty set of numa_maps statistics. */ 1651 memset(md, 0, sizeof(*md)); 1652 1653 pol = __get_vma_policy(vma, vma->vm_start); 1654 if (pol) { 1655 mpol_to_str(buffer, sizeof(buffer), pol); 1656 mpol_cond_put(pol); 1657 } else { 1658 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); 1659 } 1660 1661 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1662 1663 if (file) { 1664 seq_puts(m, " file="); 1665 seq_file_path(m, file, "\n\t= "); 1666 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1667 seq_puts(m, " heap"); 1668 } else if (is_stack(proc_priv, vma, is_pid)) { 1669 seq_puts(m, " stack"); 1670 } 1671 1672 if (is_vm_hugetlb_page(vma)) 1673 seq_puts(m, " huge"); 1674 1675 /* mmap_sem is held by m_start */ 1676 walk_page_vma(vma, &walk); 1677 1678 if (!md->pages) 1679 goto out; 1680 1681 if (md->anon) 1682 seq_printf(m, " anon=%lu", md->anon); 1683 1684 if (md->dirty) 1685 seq_printf(m, " dirty=%lu", md->dirty); 1686 1687 if (md->pages != md->anon && md->pages != md->dirty) 1688 seq_printf(m, " mapped=%lu", md->pages); 1689 1690 if (md->mapcount_max > 1) 1691 seq_printf(m, " mapmax=%lu", md->mapcount_max); 1692 1693 if (md->swapcache) 1694 seq_printf(m, " swapcache=%lu", md->swapcache); 1695 1696 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 1697 seq_printf(m, " active=%lu", md->active); 1698 1699 if (md->writeback) 1700 seq_printf(m, " writeback=%lu", md->writeback); 1701 1702 for_each_node_state(nid, N_MEMORY) 1703 if (md->node[nid]) 1704 seq_printf(m, " N%d=%lu", nid, md->node[nid]); 1705 1706 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); 1707 out: 1708 seq_putc(m, '\n'); 1709 m_cache_vma(m, vma); 1710 return 0; 1711 } 1712 1713 static int show_pid_numa_map(struct seq_file *m, void *v) 1714 { 1715 return show_numa_map(m, v, 1); 1716 } 1717 1718 static int show_tid_numa_map(struct seq_file *m, void *v) 1719 { 1720 return show_numa_map(m, v, 0); 1721 } 1722 1723 static const struct seq_operations proc_pid_numa_maps_op = { 1724 .start = m_start, 1725 .next = m_next, 1726 .stop = m_stop, 1727 .show = show_pid_numa_map, 1728 }; 1729 1730 static const struct seq_operations proc_tid_numa_maps_op = { 1731 .start = m_start, 1732 .next = m_next, 1733 .stop = m_stop, 1734 .show = show_tid_numa_map, 1735 }; 1736 1737 static int numa_maps_open(struct inode *inode, struct file *file, 1738 const struct seq_operations *ops) 1739 { 1740 return proc_maps_open(inode, file, ops, 1741 sizeof(struct numa_maps_private)); 1742 } 1743 1744 static int pid_numa_maps_open(struct inode *inode, struct file *file) 1745 { 1746 return numa_maps_open(inode, file, &proc_pid_numa_maps_op); 1747 } 1748 1749 static int tid_numa_maps_open(struct inode *inode, struct file *file) 1750 { 1751 return numa_maps_open(inode, file, &proc_tid_numa_maps_op); 1752 } 1753 1754 const struct file_operations proc_pid_numa_maps_operations = { 1755 .open = pid_numa_maps_open, 1756 .read = seq_read, 1757 .llseek = seq_lseek, 1758 .release = proc_map_release, 1759 }; 1760 1761 const struct file_operations proc_tid_numa_maps_operations = { 1762 .open = tid_numa_maps_open, 1763 .read = seq_read, 1764 .llseek = seq_lseek, 1765 .release = proc_map_release, 1766 }; 1767 #endif /* CONFIG_NUMA */ 1768