1 #include <linux/mm.h> 2 #include <linux/vmacache.h> 3 #include <linux/hugetlb.h> 4 #include <linux/huge_mm.h> 5 #include <linux/mount.h> 6 #include <linux/seq_file.h> 7 #include <linux/highmem.h> 8 #include <linux/ptrace.h> 9 #include <linux/slab.h> 10 #include <linux/pagemap.h> 11 #include <linux/mempolicy.h> 12 #include <linux/rmap.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/mmu_notifier.h> 16 17 #include <asm/elf.h> 18 #include <asm/uaccess.h> 19 #include <asm/tlbflush.h> 20 #include "internal.h" 21 22 void task_mem(struct seq_file *m, struct mm_struct *mm) 23 { 24 unsigned long data, text, lib, swap, ptes, pmds; 25 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 26 27 /* 28 * Note: to minimize their overhead, mm maintains hiwater_vm and 29 * hiwater_rss only when about to *lower* total_vm or rss. Any 30 * collector of these hiwater stats must therefore get total_vm 31 * and rss too, which will usually be the higher. Barriers? not 32 * worth the effort, such snapshots can always be inconsistent. 33 */ 34 hiwater_vm = total_vm = mm->total_vm; 35 if (hiwater_vm < mm->hiwater_vm) 36 hiwater_vm = mm->hiwater_vm; 37 hiwater_rss = total_rss = get_mm_rss(mm); 38 if (hiwater_rss < mm->hiwater_rss) 39 hiwater_rss = mm->hiwater_rss; 40 41 data = mm->total_vm - mm->shared_vm - mm->stack_vm; 42 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 43 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 44 swap = get_mm_counter(mm, MM_SWAPENTS); 45 ptes = PTRS_PER_PTE * sizeof(pte_t) * atomic_long_read(&mm->nr_ptes); 46 pmds = PTRS_PER_PMD * sizeof(pmd_t) * mm_nr_pmds(mm); 47 seq_printf(m, 48 "VmPeak:\t%8lu kB\n" 49 "VmSize:\t%8lu kB\n" 50 "VmLck:\t%8lu kB\n" 51 "VmPin:\t%8lu kB\n" 52 "VmHWM:\t%8lu kB\n" 53 "VmRSS:\t%8lu kB\n" 54 "VmData:\t%8lu kB\n" 55 "VmStk:\t%8lu kB\n" 56 "VmExe:\t%8lu kB\n" 57 "VmLib:\t%8lu kB\n" 58 "VmPTE:\t%8lu kB\n" 59 "VmPMD:\t%8lu kB\n" 60 "VmSwap:\t%8lu kB\n", 61 hiwater_vm << (PAGE_SHIFT-10), 62 total_vm << (PAGE_SHIFT-10), 63 mm->locked_vm << (PAGE_SHIFT-10), 64 mm->pinned_vm << (PAGE_SHIFT-10), 65 hiwater_rss << (PAGE_SHIFT-10), 66 total_rss << (PAGE_SHIFT-10), 67 data << (PAGE_SHIFT-10), 68 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 69 ptes >> 10, 70 pmds >> 10, 71 swap << (PAGE_SHIFT-10)); 72 } 73 74 unsigned long task_vsize(struct mm_struct *mm) 75 { 76 return PAGE_SIZE * mm->total_vm; 77 } 78 79 unsigned long task_statm(struct mm_struct *mm, 80 unsigned long *shared, unsigned long *text, 81 unsigned long *data, unsigned long *resident) 82 { 83 *shared = get_mm_counter(mm, MM_FILEPAGES); 84 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 85 >> PAGE_SHIFT; 86 *data = mm->total_vm - mm->shared_vm; 87 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 88 return mm->total_vm; 89 } 90 91 #ifdef CONFIG_NUMA 92 /* 93 * Save get_task_policy() for show_numa_map(). 94 */ 95 static void hold_task_mempolicy(struct proc_maps_private *priv) 96 { 97 struct task_struct *task = priv->task; 98 99 task_lock(task); 100 priv->task_mempolicy = get_task_policy(task); 101 mpol_get(priv->task_mempolicy); 102 task_unlock(task); 103 } 104 static void release_task_mempolicy(struct proc_maps_private *priv) 105 { 106 mpol_put(priv->task_mempolicy); 107 } 108 #else 109 static void hold_task_mempolicy(struct proc_maps_private *priv) 110 { 111 } 112 static void release_task_mempolicy(struct proc_maps_private *priv) 113 { 114 } 115 #endif 116 117 static void vma_stop(struct proc_maps_private *priv) 118 { 119 struct mm_struct *mm = priv->mm; 120 121 release_task_mempolicy(priv); 122 up_read(&mm->mmap_sem); 123 mmput(mm); 124 } 125 126 static struct vm_area_struct * 127 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) 128 { 129 if (vma == priv->tail_vma) 130 return NULL; 131 return vma->vm_next ?: priv->tail_vma; 132 } 133 134 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) 135 { 136 if (m->count < m->size) /* vma is copied successfully */ 137 m->version = m_next_vma(m->private, vma) ? vma->vm_start : -1UL; 138 } 139 140 static void *m_start(struct seq_file *m, loff_t *ppos) 141 { 142 struct proc_maps_private *priv = m->private; 143 unsigned long last_addr = m->version; 144 struct mm_struct *mm; 145 struct vm_area_struct *vma; 146 unsigned int pos = *ppos; 147 148 /* See m_cache_vma(). Zero at the start or after lseek. */ 149 if (last_addr == -1UL) 150 return NULL; 151 152 priv->task = get_proc_task(priv->inode); 153 if (!priv->task) 154 return ERR_PTR(-ESRCH); 155 156 mm = priv->mm; 157 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) 158 return NULL; 159 160 down_read(&mm->mmap_sem); 161 hold_task_mempolicy(priv); 162 priv->tail_vma = get_gate_vma(mm); 163 164 if (last_addr) { 165 vma = find_vma(mm, last_addr); 166 if (vma && (vma = m_next_vma(priv, vma))) 167 return vma; 168 } 169 170 m->version = 0; 171 if (pos < mm->map_count) { 172 for (vma = mm->mmap; pos; pos--) { 173 m->version = vma->vm_start; 174 vma = vma->vm_next; 175 } 176 return vma; 177 } 178 179 /* we do not bother to update m->version in this case */ 180 if (pos == mm->map_count && priv->tail_vma) 181 return priv->tail_vma; 182 183 vma_stop(priv); 184 return NULL; 185 } 186 187 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 188 { 189 struct proc_maps_private *priv = m->private; 190 struct vm_area_struct *next; 191 192 (*pos)++; 193 next = m_next_vma(priv, v); 194 if (!next) 195 vma_stop(priv); 196 return next; 197 } 198 199 static void m_stop(struct seq_file *m, void *v) 200 { 201 struct proc_maps_private *priv = m->private; 202 203 if (!IS_ERR_OR_NULL(v)) 204 vma_stop(priv); 205 if (priv->task) { 206 put_task_struct(priv->task); 207 priv->task = NULL; 208 } 209 } 210 211 static int proc_maps_open(struct inode *inode, struct file *file, 212 const struct seq_operations *ops, int psize) 213 { 214 struct proc_maps_private *priv = __seq_open_private(file, ops, psize); 215 216 if (!priv) 217 return -ENOMEM; 218 219 priv->inode = inode; 220 priv->mm = proc_mem_open(inode, PTRACE_MODE_READ); 221 if (IS_ERR(priv->mm)) { 222 int err = PTR_ERR(priv->mm); 223 224 seq_release_private(inode, file); 225 return err; 226 } 227 228 return 0; 229 } 230 231 static int proc_map_release(struct inode *inode, struct file *file) 232 { 233 struct seq_file *seq = file->private_data; 234 struct proc_maps_private *priv = seq->private; 235 236 if (priv->mm) 237 mmdrop(priv->mm); 238 239 return seq_release_private(inode, file); 240 } 241 242 static int do_maps_open(struct inode *inode, struct file *file, 243 const struct seq_operations *ops) 244 { 245 return proc_maps_open(inode, file, ops, 246 sizeof(struct proc_maps_private)); 247 } 248 249 static pid_t pid_of_stack(struct proc_maps_private *priv, 250 struct vm_area_struct *vma, bool is_pid) 251 { 252 struct inode *inode = priv->inode; 253 struct task_struct *task; 254 pid_t ret = 0; 255 256 rcu_read_lock(); 257 task = pid_task(proc_pid(inode), PIDTYPE_PID); 258 if (task) { 259 task = task_of_stack(task, vma, is_pid); 260 if (task) 261 ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); 262 } 263 rcu_read_unlock(); 264 265 return ret; 266 } 267 268 static void 269 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) 270 { 271 struct mm_struct *mm = vma->vm_mm; 272 struct file *file = vma->vm_file; 273 struct proc_maps_private *priv = m->private; 274 vm_flags_t flags = vma->vm_flags; 275 unsigned long ino = 0; 276 unsigned long long pgoff = 0; 277 unsigned long start, end; 278 dev_t dev = 0; 279 const char *name = NULL; 280 281 if (file) { 282 struct inode *inode = file_inode(vma->vm_file); 283 dev = inode->i_sb->s_dev; 284 ino = inode->i_ino; 285 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 286 } 287 288 /* We don't show the stack guard page in /proc/maps */ 289 start = vma->vm_start; 290 if (stack_guard_page_start(vma, start)) 291 start += PAGE_SIZE; 292 end = vma->vm_end; 293 if (stack_guard_page_end(vma, end)) 294 end -= PAGE_SIZE; 295 296 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 297 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", 298 start, 299 end, 300 flags & VM_READ ? 'r' : '-', 301 flags & VM_WRITE ? 'w' : '-', 302 flags & VM_EXEC ? 'x' : '-', 303 flags & VM_MAYSHARE ? 's' : 'p', 304 pgoff, 305 MAJOR(dev), MINOR(dev), ino); 306 307 /* 308 * Print the dentry name for named mappings, and a 309 * special [heap] marker for the heap: 310 */ 311 if (file) { 312 seq_pad(m, ' '); 313 seq_file_path(m, file, "\n"); 314 goto done; 315 } 316 317 if (vma->vm_ops && vma->vm_ops->name) { 318 name = vma->vm_ops->name(vma); 319 if (name) 320 goto done; 321 } 322 323 name = arch_vma_name(vma); 324 if (!name) { 325 pid_t tid; 326 327 if (!mm) { 328 name = "[vdso]"; 329 goto done; 330 } 331 332 if (vma->vm_start <= mm->brk && 333 vma->vm_end >= mm->start_brk) { 334 name = "[heap]"; 335 goto done; 336 } 337 338 tid = pid_of_stack(priv, vma, is_pid); 339 if (tid != 0) { 340 /* 341 * Thread stack in /proc/PID/task/TID/maps or 342 * the main process stack. 343 */ 344 if (!is_pid || (vma->vm_start <= mm->start_stack && 345 vma->vm_end >= mm->start_stack)) { 346 name = "[stack]"; 347 } else { 348 /* Thread stack in /proc/PID/maps */ 349 seq_pad(m, ' '); 350 seq_printf(m, "[stack:%d]", tid); 351 } 352 } 353 } 354 355 done: 356 if (name) { 357 seq_pad(m, ' '); 358 seq_puts(m, name); 359 } 360 seq_putc(m, '\n'); 361 } 362 363 static int show_map(struct seq_file *m, void *v, int is_pid) 364 { 365 show_map_vma(m, v, is_pid); 366 m_cache_vma(m, v); 367 return 0; 368 } 369 370 static int show_pid_map(struct seq_file *m, void *v) 371 { 372 return show_map(m, v, 1); 373 } 374 375 static int show_tid_map(struct seq_file *m, void *v) 376 { 377 return show_map(m, v, 0); 378 } 379 380 static const struct seq_operations proc_pid_maps_op = { 381 .start = m_start, 382 .next = m_next, 383 .stop = m_stop, 384 .show = show_pid_map 385 }; 386 387 static const struct seq_operations proc_tid_maps_op = { 388 .start = m_start, 389 .next = m_next, 390 .stop = m_stop, 391 .show = show_tid_map 392 }; 393 394 static int pid_maps_open(struct inode *inode, struct file *file) 395 { 396 return do_maps_open(inode, file, &proc_pid_maps_op); 397 } 398 399 static int tid_maps_open(struct inode *inode, struct file *file) 400 { 401 return do_maps_open(inode, file, &proc_tid_maps_op); 402 } 403 404 const struct file_operations proc_pid_maps_operations = { 405 .open = pid_maps_open, 406 .read = seq_read, 407 .llseek = seq_lseek, 408 .release = proc_map_release, 409 }; 410 411 const struct file_operations proc_tid_maps_operations = { 412 .open = tid_maps_open, 413 .read = seq_read, 414 .llseek = seq_lseek, 415 .release = proc_map_release, 416 }; 417 418 /* 419 * Proportional Set Size(PSS): my share of RSS. 420 * 421 * PSS of a process is the count of pages it has in memory, where each 422 * page is divided by the number of processes sharing it. So if a 423 * process has 1000 pages all to itself, and 1000 shared with one other 424 * process, its PSS will be 1500. 425 * 426 * To keep (accumulated) division errors low, we adopt a 64bit 427 * fixed-point pss counter to minimize division errors. So (pss >> 428 * PSS_SHIFT) would be the real byte count. 429 * 430 * A shift of 12 before division means (assuming 4K page size): 431 * - 1M 3-user-pages add up to 8KB errors; 432 * - supports mapcount up to 2^24, or 16M; 433 * - supports PSS up to 2^52 bytes, or 4PB. 434 */ 435 #define PSS_SHIFT 12 436 437 #ifdef CONFIG_PROC_PAGE_MONITOR 438 struct mem_size_stats { 439 unsigned long resident; 440 unsigned long shared_clean; 441 unsigned long shared_dirty; 442 unsigned long private_clean; 443 unsigned long private_dirty; 444 unsigned long referenced; 445 unsigned long anonymous; 446 unsigned long anonymous_thp; 447 unsigned long swap; 448 u64 pss; 449 }; 450 451 static void smaps_account(struct mem_size_stats *mss, struct page *page, 452 unsigned long size, bool young, bool dirty) 453 { 454 int mapcount; 455 456 if (PageAnon(page)) 457 mss->anonymous += size; 458 459 mss->resident += size; 460 /* Accumulate the size in pages that have been accessed. */ 461 if (young || PageReferenced(page)) 462 mss->referenced += size; 463 mapcount = page_mapcount(page); 464 if (mapcount >= 2) { 465 u64 pss_delta; 466 467 if (dirty || PageDirty(page)) 468 mss->shared_dirty += size; 469 else 470 mss->shared_clean += size; 471 pss_delta = (u64)size << PSS_SHIFT; 472 do_div(pss_delta, mapcount); 473 mss->pss += pss_delta; 474 } else { 475 if (dirty || PageDirty(page)) 476 mss->private_dirty += size; 477 else 478 mss->private_clean += size; 479 mss->pss += (u64)size << PSS_SHIFT; 480 } 481 } 482 483 static void smaps_pte_entry(pte_t *pte, unsigned long addr, 484 struct mm_walk *walk) 485 { 486 struct mem_size_stats *mss = walk->private; 487 struct vm_area_struct *vma = walk->vma; 488 struct page *page = NULL; 489 490 if (pte_present(*pte)) { 491 page = vm_normal_page(vma, addr, *pte); 492 } else if (is_swap_pte(*pte)) { 493 swp_entry_t swpent = pte_to_swp_entry(*pte); 494 495 if (!non_swap_entry(swpent)) 496 mss->swap += PAGE_SIZE; 497 else if (is_migration_entry(swpent)) 498 page = migration_entry_to_page(swpent); 499 } 500 501 if (!page) 502 return; 503 smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte)); 504 } 505 506 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 507 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 508 struct mm_walk *walk) 509 { 510 struct mem_size_stats *mss = walk->private; 511 struct vm_area_struct *vma = walk->vma; 512 struct page *page; 513 514 /* FOLL_DUMP will return -EFAULT on huge zero page */ 515 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); 516 if (IS_ERR_OR_NULL(page)) 517 return; 518 mss->anonymous_thp += HPAGE_PMD_SIZE; 519 smaps_account(mss, page, HPAGE_PMD_SIZE, 520 pmd_young(*pmd), pmd_dirty(*pmd)); 521 } 522 #else 523 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, 524 struct mm_walk *walk) 525 { 526 } 527 #endif 528 529 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 530 struct mm_walk *walk) 531 { 532 struct vm_area_struct *vma = walk->vma; 533 pte_t *pte; 534 spinlock_t *ptl; 535 536 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 537 smaps_pmd_entry(pmd, addr, walk); 538 spin_unlock(ptl); 539 return 0; 540 } 541 542 if (pmd_trans_unstable(pmd)) 543 return 0; 544 /* 545 * The mmap_sem held all the way back in m_start() is what 546 * keeps khugepaged out of here and from collapsing things 547 * in here. 548 */ 549 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 550 for (; addr != end; pte++, addr += PAGE_SIZE) 551 smaps_pte_entry(pte, addr, walk); 552 pte_unmap_unlock(pte - 1, ptl); 553 cond_resched(); 554 return 0; 555 } 556 557 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) 558 { 559 /* 560 * Don't forget to update Documentation/ on changes. 561 */ 562 static const char mnemonics[BITS_PER_LONG][2] = { 563 /* 564 * In case if we meet a flag we don't know about. 565 */ 566 [0 ... (BITS_PER_LONG-1)] = "??", 567 568 [ilog2(VM_READ)] = "rd", 569 [ilog2(VM_WRITE)] = "wr", 570 [ilog2(VM_EXEC)] = "ex", 571 [ilog2(VM_SHARED)] = "sh", 572 [ilog2(VM_MAYREAD)] = "mr", 573 [ilog2(VM_MAYWRITE)] = "mw", 574 [ilog2(VM_MAYEXEC)] = "me", 575 [ilog2(VM_MAYSHARE)] = "ms", 576 [ilog2(VM_GROWSDOWN)] = "gd", 577 [ilog2(VM_PFNMAP)] = "pf", 578 [ilog2(VM_DENYWRITE)] = "dw", 579 #ifdef CONFIG_X86_INTEL_MPX 580 [ilog2(VM_MPX)] = "mp", 581 #endif 582 [ilog2(VM_LOCKED)] = "lo", 583 [ilog2(VM_IO)] = "io", 584 [ilog2(VM_SEQ_READ)] = "sr", 585 [ilog2(VM_RAND_READ)] = "rr", 586 [ilog2(VM_DONTCOPY)] = "dc", 587 [ilog2(VM_DONTEXPAND)] = "de", 588 [ilog2(VM_ACCOUNT)] = "ac", 589 [ilog2(VM_NORESERVE)] = "nr", 590 [ilog2(VM_HUGETLB)] = "ht", 591 [ilog2(VM_ARCH_1)] = "ar", 592 [ilog2(VM_DONTDUMP)] = "dd", 593 #ifdef CONFIG_MEM_SOFT_DIRTY 594 [ilog2(VM_SOFTDIRTY)] = "sd", 595 #endif 596 [ilog2(VM_MIXEDMAP)] = "mm", 597 [ilog2(VM_HUGEPAGE)] = "hg", 598 [ilog2(VM_NOHUGEPAGE)] = "nh", 599 [ilog2(VM_MERGEABLE)] = "mg", 600 [ilog2(VM_UFFD_MISSING)]= "um", 601 [ilog2(VM_UFFD_WP)] = "uw", 602 }; 603 size_t i; 604 605 seq_puts(m, "VmFlags: "); 606 for (i = 0; i < BITS_PER_LONG; i++) { 607 if (vma->vm_flags & (1UL << i)) { 608 seq_printf(m, "%c%c ", 609 mnemonics[i][0], mnemonics[i][1]); 610 } 611 } 612 seq_putc(m, '\n'); 613 } 614 615 static int show_smap(struct seq_file *m, void *v, int is_pid) 616 { 617 struct vm_area_struct *vma = v; 618 struct mem_size_stats mss; 619 struct mm_walk smaps_walk = { 620 .pmd_entry = smaps_pte_range, 621 .mm = vma->vm_mm, 622 .private = &mss, 623 }; 624 625 memset(&mss, 0, sizeof mss); 626 /* mmap_sem is held in m_start */ 627 walk_page_vma(vma, &smaps_walk); 628 629 show_map_vma(m, vma, is_pid); 630 631 seq_printf(m, 632 "Size: %8lu kB\n" 633 "Rss: %8lu kB\n" 634 "Pss: %8lu kB\n" 635 "Shared_Clean: %8lu kB\n" 636 "Shared_Dirty: %8lu kB\n" 637 "Private_Clean: %8lu kB\n" 638 "Private_Dirty: %8lu kB\n" 639 "Referenced: %8lu kB\n" 640 "Anonymous: %8lu kB\n" 641 "AnonHugePages: %8lu kB\n" 642 "Swap: %8lu kB\n" 643 "KernelPageSize: %8lu kB\n" 644 "MMUPageSize: %8lu kB\n" 645 "Locked: %8lu kB\n", 646 (vma->vm_end - vma->vm_start) >> 10, 647 mss.resident >> 10, 648 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), 649 mss.shared_clean >> 10, 650 mss.shared_dirty >> 10, 651 mss.private_clean >> 10, 652 mss.private_dirty >> 10, 653 mss.referenced >> 10, 654 mss.anonymous >> 10, 655 mss.anonymous_thp >> 10, 656 mss.swap >> 10, 657 vma_kernel_pagesize(vma) >> 10, 658 vma_mmu_pagesize(vma) >> 10, 659 (vma->vm_flags & VM_LOCKED) ? 660 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); 661 662 show_smap_vma_flags(m, vma); 663 m_cache_vma(m, vma); 664 return 0; 665 } 666 667 static int show_pid_smap(struct seq_file *m, void *v) 668 { 669 return show_smap(m, v, 1); 670 } 671 672 static int show_tid_smap(struct seq_file *m, void *v) 673 { 674 return show_smap(m, v, 0); 675 } 676 677 static const struct seq_operations proc_pid_smaps_op = { 678 .start = m_start, 679 .next = m_next, 680 .stop = m_stop, 681 .show = show_pid_smap 682 }; 683 684 static const struct seq_operations proc_tid_smaps_op = { 685 .start = m_start, 686 .next = m_next, 687 .stop = m_stop, 688 .show = show_tid_smap 689 }; 690 691 static int pid_smaps_open(struct inode *inode, struct file *file) 692 { 693 return do_maps_open(inode, file, &proc_pid_smaps_op); 694 } 695 696 static int tid_smaps_open(struct inode *inode, struct file *file) 697 { 698 return do_maps_open(inode, file, &proc_tid_smaps_op); 699 } 700 701 const struct file_operations proc_pid_smaps_operations = { 702 .open = pid_smaps_open, 703 .read = seq_read, 704 .llseek = seq_lseek, 705 .release = proc_map_release, 706 }; 707 708 const struct file_operations proc_tid_smaps_operations = { 709 .open = tid_smaps_open, 710 .read = seq_read, 711 .llseek = seq_lseek, 712 .release = proc_map_release, 713 }; 714 715 enum clear_refs_types { 716 CLEAR_REFS_ALL = 1, 717 CLEAR_REFS_ANON, 718 CLEAR_REFS_MAPPED, 719 CLEAR_REFS_SOFT_DIRTY, 720 CLEAR_REFS_MM_HIWATER_RSS, 721 CLEAR_REFS_LAST, 722 }; 723 724 struct clear_refs_private { 725 enum clear_refs_types type; 726 }; 727 728 #ifdef CONFIG_MEM_SOFT_DIRTY 729 static inline void clear_soft_dirty(struct vm_area_struct *vma, 730 unsigned long addr, pte_t *pte) 731 { 732 /* 733 * The soft-dirty tracker uses #PF-s to catch writes 734 * to pages, so write-protect the pte as well. See the 735 * Documentation/vm/soft-dirty.txt for full description 736 * of how soft-dirty works. 737 */ 738 pte_t ptent = *pte; 739 740 if (pte_present(ptent)) { 741 ptent = pte_wrprotect(ptent); 742 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 743 } else if (is_swap_pte(ptent)) { 744 ptent = pte_swp_clear_soft_dirty(ptent); 745 } 746 747 set_pte_at(vma->vm_mm, addr, pte, ptent); 748 } 749 750 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 751 unsigned long addr, pmd_t *pmdp) 752 { 753 pmd_t pmd = *pmdp; 754 755 pmd = pmd_wrprotect(pmd); 756 pmd = pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); 757 758 if (vma->vm_flags & VM_SOFTDIRTY) 759 vma->vm_flags &= ~VM_SOFTDIRTY; 760 761 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); 762 } 763 764 #else 765 766 static inline void clear_soft_dirty(struct vm_area_struct *vma, 767 unsigned long addr, pte_t *pte) 768 { 769 } 770 771 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, 772 unsigned long addr, pmd_t *pmdp) 773 { 774 } 775 #endif 776 777 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 778 unsigned long end, struct mm_walk *walk) 779 { 780 struct clear_refs_private *cp = walk->private; 781 struct vm_area_struct *vma = walk->vma; 782 pte_t *pte, ptent; 783 spinlock_t *ptl; 784 struct page *page; 785 786 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 787 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 788 clear_soft_dirty_pmd(vma, addr, pmd); 789 goto out; 790 } 791 792 page = pmd_page(*pmd); 793 794 /* Clear accessed and referenced bits. */ 795 pmdp_test_and_clear_young(vma, addr, pmd); 796 ClearPageReferenced(page); 797 out: 798 spin_unlock(ptl); 799 return 0; 800 } 801 802 if (pmd_trans_unstable(pmd)) 803 return 0; 804 805 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 806 for (; addr != end; pte++, addr += PAGE_SIZE) { 807 ptent = *pte; 808 809 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 810 clear_soft_dirty(vma, addr, pte); 811 continue; 812 } 813 814 if (!pte_present(ptent)) 815 continue; 816 817 page = vm_normal_page(vma, addr, ptent); 818 if (!page) 819 continue; 820 821 /* Clear accessed and referenced bits. */ 822 ptep_test_and_clear_young(vma, addr, pte); 823 ClearPageReferenced(page); 824 } 825 pte_unmap_unlock(pte - 1, ptl); 826 cond_resched(); 827 return 0; 828 } 829 830 static int clear_refs_test_walk(unsigned long start, unsigned long end, 831 struct mm_walk *walk) 832 { 833 struct clear_refs_private *cp = walk->private; 834 struct vm_area_struct *vma = walk->vma; 835 836 if (vma->vm_flags & VM_PFNMAP) 837 return 1; 838 839 /* 840 * Writing 1 to /proc/pid/clear_refs affects all pages. 841 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages. 842 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages. 843 * Writing 4 to /proc/pid/clear_refs affects all pages. 844 */ 845 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) 846 return 1; 847 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) 848 return 1; 849 return 0; 850 } 851 852 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 853 size_t count, loff_t *ppos) 854 { 855 struct task_struct *task; 856 char buffer[PROC_NUMBUF]; 857 struct mm_struct *mm; 858 struct vm_area_struct *vma; 859 enum clear_refs_types type; 860 int itype; 861 int rv; 862 863 memset(buffer, 0, sizeof(buffer)); 864 if (count > sizeof(buffer) - 1) 865 count = sizeof(buffer) - 1; 866 if (copy_from_user(buffer, buf, count)) 867 return -EFAULT; 868 rv = kstrtoint(strstrip(buffer), 10, &itype); 869 if (rv < 0) 870 return rv; 871 type = (enum clear_refs_types)itype; 872 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) 873 return -EINVAL; 874 875 task = get_proc_task(file_inode(file)); 876 if (!task) 877 return -ESRCH; 878 mm = get_task_mm(task); 879 if (mm) { 880 struct clear_refs_private cp = { 881 .type = type, 882 }; 883 struct mm_walk clear_refs_walk = { 884 .pmd_entry = clear_refs_pte_range, 885 .test_walk = clear_refs_test_walk, 886 .mm = mm, 887 .private = &cp, 888 }; 889 890 if (type == CLEAR_REFS_MM_HIWATER_RSS) { 891 /* 892 * Writing 5 to /proc/pid/clear_refs resets the peak 893 * resident set size to this mm's current rss value. 894 */ 895 down_write(&mm->mmap_sem); 896 reset_mm_hiwater_rss(mm); 897 up_write(&mm->mmap_sem); 898 goto out_mm; 899 } 900 901 down_read(&mm->mmap_sem); 902 if (type == CLEAR_REFS_SOFT_DIRTY) { 903 for (vma = mm->mmap; vma; vma = vma->vm_next) { 904 if (!(vma->vm_flags & VM_SOFTDIRTY)) 905 continue; 906 up_read(&mm->mmap_sem); 907 down_write(&mm->mmap_sem); 908 for (vma = mm->mmap; vma; vma = vma->vm_next) { 909 vma->vm_flags &= ~VM_SOFTDIRTY; 910 vma_set_page_prot(vma); 911 } 912 downgrade_write(&mm->mmap_sem); 913 break; 914 } 915 mmu_notifier_invalidate_range_start(mm, 0, -1); 916 } 917 walk_page_range(0, ~0UL, &clear_refs_walk); 918 if (type == CLEAR_REFS_SOFT_DIRTY) 919 mmu_notifier_invalidate_range_end(mm, 0, -1); 920 flush_tlb_mm(mm); 921 up_read(&mm->mmap_sem); 922 out_mm: 923 mmput(mm); 924 } 925 put_task_struct(task); 926 927 return count; 928 } 929 930 const struct file_operations proc_clear_refs_operations = { 931 .write = clear_refs_write, 932 .llseek = noop_llseek, 933 }; 934 935 typedef struct { 936 u64 pme; 937 } pagemap_entry_t; 938 939 struct pagemapread { 940 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ 941 pagemap_entry_t *buffer; 942 }; 943 944 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 945 #define PAGEMAP_WALK_MASK (PMD_MASK) 946 947 #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) 948 #define PM_PFRAME_BITS 55 949 #define PM_PFRAME_MASK GENMASK_ULL(PM_PFRAME_BITS - 1, 0) 950 #define PM_SOFT_DIRTY BIT_ULL(55) 951 #define PM_FILE BIT_ULL(61) 952 #define PM_SWAP BIT_ULL(62) 953 #define PM_PRESENT BIT_ULL(63) 954 955 #define PM_END_OF_BUFFER 1 956 957 static inline pagemap_entry_t make_pme(u64 frame, u64 flags) 958 { 959 return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags }; 960 } 961 962 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, 963 struct pagemapread *pm) 964 { 965 pm->buffer[pm->pos++] = *pme; 966 if (pm->pos >= pm->len) 967 return PM_END_OF_BUFFER; 968 return 0; 969 } 970 971 static int pagemap_pte_hole(unsigned long start, unsigned long end, 972 struct mm_walk *walk) 973 { 974 struct pagemapread *pm = walk->private; 975 unsigned long addr = start; 976 int err = 0; 977 978 while (addr < end) { 979 struct vm_area_struct *vma = find_vma(walk->mm, addr); 980 pagemap_entry_t pme = make_pme(0, 0); 981 /* End of address space hole, which we mark as non-present. */ 982 unsigned long hole_end; 983 984 if (vma) 985 hole_end = min(end, vma->vm_start); 986 else 987 hole_end = end; 988 989 for (; addr < hole_end; addr += PAGE_SIZE) { 990 err = add_to_pagemap(addr, &pme, pm); 991 if (err) 992 goto out; 993 } 994 995 if (!vma) 996 break; 997 998 /* Addresses in the VMA. */ 999 if (vma->vm_flags & VM_SOFTDIRTY) 1000 pme = make_pme(0, PM_SOFT_DIRTY); 1001 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { 1002 err = add_to_pagemap(addr, &pme, pm); 1003 if (err) 1004 goto out; 1005 } 1006 } 1007 out: 1008 return err; 1009 } 1010 1011 static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, 1012 struct vm_area_struct *vma, unsigned long addr, pte_t pte) 1013 { 1014 u64 frame = 0, flags = 0; 1015 struct page *page = NULL; 1016 1017 if (pte_present(pte)) { 1018 frame = pte_pfn(pte); 1019 flags |= PM_PRESENT; 1020 page = vm_normal_page(vma, addr, pte); 1021 if (pte_soft_dirty(pte)) 1022 flags |= PM_SOFT_DIRTY; 1023 } else if (is_swap_pte(pte)) { 1024 swp_entry_t entry; 1025 if (pte_swp_soft_dirty(pte)) 1026 flags |= PM_SOFT_DIRTY; 1027 entry = pte_to_swp_entry(pte); 1028 frame = swp_type(entry) | 1029 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 1030 flags |= PM_SWAP; 1031 if (is_migration_entry(entry)) 1032 page = migration_entry_to_page(entry); 1033 } 1034 1035 if (page && !PageAnon(page)) 1036 flags |= PM_FILE; 1037 if (vma->vm_flags & VM_SOFTDIRTY) 1038 flags |= PM_SOFT_DIRTY; 1039 1040 return make_pme(frame, flags); 1041 } 1042 1043 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, 1044 struct mm_walk *walk) 1045 { 1046 struct vm_area_struct *vma = walk->vma; 1047 struct pagemapread *pm = walk->private; 1048 spinlock_t *ptl; 1049 pte_t *pte, *orig_pte; 1050 int err = 0; 1051 1052 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1053 if (pmd_trans_huge_lock(pmdp, vma, &ptl) == 1) { 1054 u64 flags = 0, frame = 0; 1055 pmd_t pmd = *pmdp; 1056 1057 if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd)) 1058 flags |= PM_SOFT_DIRTY; 1059 1060 /* 1061 * Currently pmd for thp is always present because thp 1062 * can not be swapped-out, migrated, or HWPOISONed 1063 * (split in such cases instead.) 1064 * This if-check is just to prepare for future implementation. 1065 */ 1066 if (pmd_present(pmd)) { 1067 flags |= PM_PRESENT; 1068 frame = pmd_pfn(pmd) + 1069 ((addr & ~PMD_MASK) >> PAGE_SHIFT); 1070 } 1071 1072 for (; addr != end; addr += PAGE_SIZE) { 1073 pagemap_entry_t pme = make_pme(frame, flags); 1074 1075 err = add_to_pagemap(addr, &pme, pm); 1076 if (err) 1077 break; 1078 if (flags & PM_PRESENT) 1079 frame++; 1080 } 1081 spin_unlock(ptl); 1082 return err; 1083 } 1084 1085 if (pmd_trans_unstable(pmdp)) 1086 return 0; 1087 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1088 1089 /* 1090 * We can assume that @vma always points to a valid one and @end never 1091 * goes beyond vma->vm_end. 1092 */ 1093 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); 1094 for (; addr < end; pte++, addr += PAGE_SIZE) { 1095 pagemap_entry_t pme; 1096 1097 pme = pte_to_pagemap_entry(pm, vma, addr, *pte); 1098 err = add_to_pagemap(addr, &pme, pm); 1099 if (err) 1100 break; 1101 } 1102 pte_unmap_unlock(orig_pte, ptl); 1103 1104 cond_resched(); 1105 1106 return err; 1107 } 1108 1109 #ifdef CONFIG_HUGETLB_PAGE 1110 /* This function walks within one hugetlb entry in the single call */ 1111 static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, 1112 unsigned long addr, unsigned long end, 1113 struct mm_walk *walk) 1114 { 1115 struct pagemapread *pm = walk->private; 1116 struct vm_area_struct *vma = walk->vma; 1117 u64 flags = 0, frame = 0; 1118 int err = 0; 1119 pte_t pte; 1120 1121 if (vma->vm_flags & VM_SOFTDIRTY) 1122 flags |= PM_SOFT_DIRTY; 1123 1124 pte = huge_ptep_get(ptep); 1125 if (pte_present(pte)) { 1126 struct page *page = pte_page(pte); 1127 1128 if (!PageAnon(page)) 1129 flags |= PM_FILE; 1130 1131 flags |= PM_PRESENT; 1132 frame = pte_pfn(pte) + 1133 ((addr & ~hmask) >> PAGE_SHIFT); 1134 } 1135 1136 for (; addr != end; addr += PAGE_SIZE) { 1137 pagemap_entry_t pme = make_pme(frame, flags); 1138 1139 err = add_to_pagemap(addr, &pme, pm); 1140 if (err) 1141 return err; 1142 if (flags & PM_PRESENT) 1143 frame++; 1144 } 1145 1146 cond_resched(); 1147 1148 return err; 1149 } 1150 #endif /* HUGETLB_PAGE */ 1151 1152 /* 1153 * /proc/pid/pagemap - an array mapping virtual pages to pfns 1154 * 1155 * For each page in the address space, this file contains one 64-bit entry 1156 * consisting of the following: 1157 * 1158 * Bits 0-54 page frame number (PFN) if present 1159 * Bits 0-4 swap type if swapped 1160 * Bits 5-54 swap offset if swapped 1161 * Bit 55 pte is soft-dirty (see Documentation/vm/soft-dirty.txt) 1162 * Bits 56-60 zero 1163 * Bit 61 page is file-page or shared-anon 1164 * Bit 62 page swapped 1165 * Bit 63 page present 1166 * 1167 * If the page is not present but in swap, then the PFN contains an 1168 * encoding of the swap file number and the page's offset into the 1169 * swap. Unmapped pages return a null PFN. This allows determining 1170 * precisely which pages are mapped (or in swap) and comparing mapped 1171 * pages between processes. 1172 * 1173 * Efficient users of this interface will use /proc/pid/maps to 1174 * determine which areas of memory are actually mapped and llseek to 1175 * skip over unmapped regions. 1176 */ 1177 static ssize_t pagemap_read(struct file *file, char __user *buf, 1178 size_t count, loff_t *ppos) 1179 { 1180 struct mm_struct *mm = file->private_data; 1181 struct pagemapread pm; 1182 struct mm_walk pagemap_walk = {}; 1183 unsigned long src; 1184 unsigned long svpfn; 1185 unsigned long start_vaddr; 1186 unsigned long end_vaddr; 1187 int ret = 0, copied = 0; 1188 1189 if (!mm || !atomic_inc_not_zero(&mm->mm_users)) 1190 goto out; 1191 1192 ret = -EINVAL; 1193 /* file position must be aligned */ 1194 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 1195 goto out_mm; 1196 1197 ret = 0; 1198 if (!count) 1199 goto out_mm; 1200 1201 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1202 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); 1203 ret = -ENOMEM; 1204 if (!pm.buffer) 1205 goto out_mm; 1206 1207 pagemap_walk.pmd_entry = pagemap_pmd_range; 1208 pagemap_walk.pte_hole = pagemap_pte_hole; 1209 #ifdef CONFIG_HUGETLB_PAGE 1210 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 1211 #endif 1212 pagemap_walk.mm = mm; 1213 pagemap_walk.private = ± 1214 1215 src = *ppos; 1216 svpfn = src / PM_ENTRY_BYTES; 1217 start_vaddr = svpfn << PAGE_SHIFT; 1218 end_vaddr = mm->task_size; 1219 1220 /* watch out for wraparound */ 1221 if (svpfn > mm->task_size >> PAGE_SHIFT) 1222 start_vaddr = end_vaddr; 1223 1224 /* 1225 * The odds are that this will stop walking way 1226 * before end_vaddr, because the length of the 1227 * user buffer is tracked in "pm", and the walk 1228 * will stop when we hit the end of the buffer. 1229 */ 1230 ret = 0; 1231 while (count && (start_vaddr < end_vaddr)) { 1232 int len; 1233 unsigned long end; 1234 1235 pm.pos = 0; 1236 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; 1237 /* overflow ? */ 1238 if (end < start_vaddr || end > end_vaddr) 1239 end = end_vaddr; 1240 down_read(&mm->mmap_sem); 1241 ret = walk_page_range(start_vaddr, end, &pagemap_walk); 1242 up_read(&mm->mmap_sem); 1243 start_vaddr = end; 1244 1245 len = min(count, PM_ENTRY_BYTES * pm.pos); 1246 if (copy_to_user(buf, pm.buffer, len)) { 1247 ret = -EFAULT; 1248 goto out_free; 1249 } 1250 copied += len; 1251 buf += len; 1252 count -= len; 1253 } 1254 *ppos += copied; 1255 if (!ret || ret == PM_END_OF_BUFFER) 1256 ret = copied; 1257 1258 out_free: 1259 kfree(pm.buffer); 1260 out_mm: 1261 mmput(mm); 1262 out: 1263 return ret; 1264 } 1265 1266 static int pagemap_open(struct inode *inode, struct file *file) 1267 { 1268 struct mm_struct *mm; 1269 1270 /* do not disclose physical addresses: attack vector */ 1271 if (!capable(CAP_SYS_ADMIN)) 1272 return -EPERM; 1273 1274 mm = proc_mem_open(inode, PTRACE_MODE_READ); 1275 if (IS_ERR(mm)) 1276 return PTR_ERR(mm); 1277 file->private_data = mm; 1278 return 0; 1279 } 1280 1281 static int pagemap_release(struct inode *inode, struct file *file) 1282 { 1283 struct mm_struct *mm = file->private_data; 1284 1285 if (mm) 1286 mmdrop(mm); 1287 return 0; 1288 } 1289 1290 const struct file_operations proc_pagemap_operations = { 1291 .llseek = mem_lseek, /* borrow this */ 1292 .read = pagemap_read, 1293 .open = pagemap_open, 1294 .release = pagemap_release, 1295 }; 1296 #endif /* CONFIG_PROC_PAGE_MONITOR */ 1297 1298 #ifdef CONFIG_NUMA 1299 1300 struct numa_maps { 1301 unsigned long pages; 1302 unsigned long anon; 1303 unsigned long active; 1304 unsigned long writeback; 1305 unsigned long mapcount_max; 1306 unsigned long dirty; 1307 unsigned long swapcache; 1308 unsigned long node[MAX_NUMNODES]; 1309 }; 1310 1311 struct numa_maps_private { 1312 struct proc_maps_private proc_maps; 1313 struct numa_maps md; 1314 }; 1315 1316 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, 1317 unsigned long nr_pages) 1318 { 1319 int count = page_mapcount(page); 1320 1321 md->pages += nr_pages; 1322 if (pte_dirty || PageDirty(page)) 1323 md->dirty += nr_pages; 1324 1325 if (PageSwapCache(page)) 1326 md->swapcache += nr_pages; 1327 1328 if (PageActive(page) || PageUnevictable(page)) 1329 md->active += nr_pages; 1330 1331 if (PageWriteback(page)) 1332 md->writeback += nr_pages; 1333 1334 if (PageAnon(page)) 1335 md->anon += nr_pages; 1336 1337 if (count > md->mapcount_max) 1338 md->mapcount_max = count; 1339 1340 md->node[page_to_nid(page)] += nr_pages; 1341 } 1342 1343 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, 1344 unsigned long addr) 1345 { 1346 struct page *page; 1347 int nid; 1348 1349 if (!pte_present(pte)) 1350 return NULL; 1351 1352 page = vm_normal_page(vma, addr, pte); 1353 if (!page) 1354 return NULL; 1355 1356 if (PageReserved(page)) 1357 return NULL; 1358 1359 nid = page_to_nid(page); 1360 if (!node_isset(nid, node_states[N_MEMORY])) 1361 return NULL; 1362 1363 return page; 1364 } 1365 1366 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1367 unsigned long end, struct mm_walk *walk) 1368 { 1369 struct numa_maps *md = walk->private; 1370 struct vm_area_struct *vma = walk->vma; 1371 spinlock_t *ptl; 1372 pte_t *orig_pte; 1373 pte_t *pte; 1374 1375 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1376 pte_t huge_pte = *(pte_t *)pmd; 1377 struct page *page; 1378 1379 page = can_gather_numa_stats(huge_pte, vma, addr); 1380 if (page) 1381 gather_stats(page, md, pte_dirty(huge_pte), 1382 HPAGE_PMD_SIZE/PAGE_SIZE); 1383 spin_unlock(ptl); 1384 return 0; 1385 } 1386 1387 if (pmd_trans_unstable(pmd)) 1388 return 0; 1389 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1390 do { 1391 struct page *page = can_gather_numa_stats(*pte, vma, addr); 1392 if (!page) 1393 continue; 1394 gather_stats(page, md, pte_dirty(*pte), 1); 1395 1396 } while (pte++, addr += PAGE_SIZE, addr != end); 1397 pte_unmap_unlock(orig_pte, ptl); 1398 return 0; 1399 } 1400 #ifdef CONFIG_HUGETLB_PAGE 1401 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 1402 unsigned long addr, unsigned long end, struct mm_walk *walk) 1403 { 1404 struct numa_maps *md; 1405 struct page *page; 1406 1407 if (!pte_present(*pte)) 1408 return 0; 1409 1410 page = pte_page(*pte); 1411 if (!page) 1412 return 0; 1413 1414 md = walk->private; 1415 gather_stats(page, md, pte_dirty(*pte), 1); 1416 return 0; 1417 } 1418 1419 #else 1420 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, 1421 unsigned long addr, unsigned long end, struct mm_walk *walk) 1422 { 1423 return 0; 1424 } 1425 #endif 1426 1427 /* 1428 * Display pages allocated per node and memory policy via /proc. 1429 */ 1430 static int show_numa_map(struct seq_file *m, void *v, int is_pid) 1431 { 1432 struct numa_maps_private *numa_priv = m->private; 1433 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; 1434 struct vm_area_struct *vma = v; 1435 struct numa_maps *md = &numa_priv->md; 1436 struct file *file = vma->vm_file; 1437 struct mm_struct *mm = vma->vm_mm; 1438 struct mm_walk walk = { 1439 .hugetlb_entry = gather_hugetlb_stats, 1440 .pmd_entry = gather_pte_stats, 1441 .private = md, 1442 .mm = mm, 1443 }; 1444 struct mempolicy *pol; 1445 char buffer[64]; 1446 int nid; 1447 1448 if (!mm) 1449 return 0; 1450 1451 /* Ensure we start with an empty set of numa_maps statistics. */ 1452 memset(md, 0, sizeof(*md)); 1453 1454 pol = __get_vma_policy(vma, vma->vm_start); 1455 if (pol) { 1456 mpol_to_str(buffer, sizeof(buffer), pol); 1457 mpol_cond_put(pol); 1458 } else { 1459 mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy); 1460 } 1461 1462 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1463 1464 if (file) { 1465 seq_puts(m, " file="); 1466 seq_file_path(m, file, "\n\t= "); 1467 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1468 seq_puts(m, " heap"); 1469 } else { 1470 pid_t tid = pid_of_stack(proc_priv, vma, is_pid); 1471 if (tid != 0) { 1472 /* 1473 * Thread stack in /proc/PID/task/TID/maps or 1474 * the main process stack. 1475 */ 1476 if (!is_pid || (vma->vm_start <= mm->start_stack && 1477 vma->vm_end >= mm->start_stack)) 1478 seq_puts(m, " stack"); 1479 else 1480 seq_printf(m, " stack:%d", tid); 1481 } 1482 } 1483 1484 if (is_vm_hugetlb_page(vma)) 1485 seq_puts(m, " huge"); 1486 1487 /* mmap_sem is held by m_start */ 1488 walk_page_vma(vma, &walk); 1489 1490 if (!md->pages) 1491 goto out; 1492 1493 if (md->anon) 1494 seq_printf(m, " anon=%lu", md->anon); 1495 1496 if (md->dirty) 1497 seq_printf(m, " dirty=%lu", md->dirty); 1498 1499 if (md->pages != md->anon && md->pages != md->dirty) 1500 seq_printf(m, " mapped=%lu", md->pages); 1501 1502 if (md->mapcount_max > 1) 1503 seq_printf(m, " mapmax=%lu", md->mapcount_max); 1504 1505 if (md->swapcache) 1506 seq_printf(m, " swapcache=%lu", md->swapcache); 1507 1508 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 1509 seq_printf(m, " active=%lu", md->active); 1510 1511 if (md->writeback) 1512 seq_printf(m, " writeback=%lu", md->writeback); 1513 1514 for_each_node_state(nid, N_MEMORY) 1515 if (md->node[nid]) 1516 seq_printf(m, " N%d=%lu", nid, md->node[nid]); 1517 1518 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); 1519 out: 1520 seq_putc(m, '\n'); 1521 m_cache_vma(m, vma); 1522 return 0; 1523 } 1524 1525 static int show_pid_numa_map(struct seq_file *m, void *v) 1526 { 1527 return show_numa_map(m, v, 1); 1528 } 1529 1530 static int show_tid_numa_map(struct seq_file *m, void *v) 1531 { 1532 return show_numa_map(m, v, 0); 1533 } 1534 1535 static const struct seq_operations proc_pid_numa_maps_op = { 1536 .start = m_start, 1537 .next = m_next, 1538 .stop = m_stop, 1539 .show = show_pid_numa_map, 1540 }; 1541 1542 static const struct seq_operations proc_tid_numa_maps_op = { 1543 .start = m_start, 1544 .next = m_next, 1545 .stop = m_stop, 1546 .show = show_tid_numa_map, 1547 }; 1548 1549 static int numa_maps_open(struct inode *inode, struct file *file, 1550 const struct seq_operations *ops) 1551 { 1552 return proc_maps_open(inode, file, ops, 1553 sizeof(struct numa_maps_private)); 1554 } 1555 1556 static int pid_numa_maps_open(struct inode *inode, struct file *file) 1557 { 1558 return numa_maps_open(inode, file, &proc_pid_numa_maps_op); 1559 } 1560 1561 static int tid_numa_maps_open(struct inode *inode, struct file *file) 1562 { 1563 return numa_maps_open(inode, file, &proc_tid_numa_maps_op); 1564 } 1565 1566 const struct file_operations proc_pid_numa_maps_operations = { 1567 .open = pid_numa_maps_open, 1568 .read = seq_read, 1569 .llseek = seq_lseek, 1570 .release = proc_map_release, 1571 }; 1572 1573 const struct file_operations proc_tid_numa_maps_operations = { 1574 .open = tid_numa_maps_open, 1575 .read = seq_read, 1576 .llseek = seq_lseek, 1577 .release = proc_map_release, 1578 }; 1579 #endif /* CONFIG_NUMA */ 1580