1 #include <linux/mm.h> 2 #include <linux/hugetlb.h> 3 #include <linux/huge_mm.h> 4 #include <linux/mount.h> 5 #include <linux/seq_file.h> 6 #include <linux/highmem.h> 7 #include <linux/ptrace.h> 8 #include <linux/slab.h> 9 #include <linux/pagemap.h> 10 #include <linux/mempolicy.h> 11 #include <linux/rmap.h> 12 #include <linux/swap.h> 13 #include <linux/swapops.h> 14 #include <linux/mmu_notifier.h> 15 16 #include <asm/elf.h> 17 #include <asm/uaccess.h> 18 #include <asm/tlbflush.h> 19 #include "internal.h" 20 21 void task_mem(struct seq_file *m, struct mm_struct *mm) 22 { 23 unsigned long data, text, lib, swap; 24 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss; 25 26 /* 27 * Note: to minimize their overhead, mm maintains hiwater_vm and 28 * hiwater_rss only when about to *lower* total_vm or rss. Any 29 * collector of these hiwater stats must therefore get total_vm 30 * and rss too, which will usually be the higher. Barriers? not 31 * worth the effort, such snapshots can always be inconsistent. 32 */ 33 hiwater_vm = total_vm = mm->total_vm; 34 if (hiwater_vm < mm->hiwater_vm) 35 hiwater_vm = mm->hiwater_vm; 36 hiwater_rss = total_rss = get_mm_rss(mm); 37 if (hiwater_rss < mm->hiwater_rss) 38 hiwater_rss = mm->hiwater_rss; 39 40 data = mm->total_vm - mm->shared_vm - mm->stack_vm; 41 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10; 42 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text; 43 swap = get_mm_counter(mm, MM_SWAPENTS); 44 seq_printf(m, 45 "VmPeak:\t%8lu kB\n" 46 "VmSize:\t%8lu kB\n" 47 "VmLck:\t%8lu kB\n" 48 "VmPin:\t%8lu kB\n" 49 "VmHWM:\t%8lu kB\n" 50 "VmRSS:\t%8lu kB\n" 51 "VmData:\t%8lu kB\n" 52 "VmStk:\t%8lu kB\n" 53 "VmExe:\t%8lu kB\n" 54 "VmLib:\t%8lu kB\n" 55 "VmPTE:\t%8lu kB\n" 56 "VmSwap:\t%8lu kB\n", 57 hiwater_vm << (PAGE_SHIFT-10), 58 total_vm << (PAGE_SHIFT-10), 59 mm->locked_vm << (PAGE_SHIFT-10), 60 mm->pinned_vm << (PAGE_SHIFT-10), 61 hiwater_rss << (PAGE_SHIFT-10), 62 total_rss << (PAGE_SHIFT-10), 63 data << (PAGE_SHIFT-10), 64 mm->stack_vm << (PAGE_SHIFT-10), text, lib, 65 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, 66 swap << (PAGE_SHIFT-10)); 67 } 68 69 unsigned long task_vsize(struct mm_struct *mm) 70 { 71 return PAGE_SIZE * mm->total_vm; 72 } 73 74 unsigned long task_statm(struct mm_struct *mm, 75 unsigned long *shared, unsigned long *text, 76 unsigned long *data, unsigned long *resident) 77 { 78 *shared = get_mm_counter(mm, MM_FILEPAGES); 79 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) 80 >> PAGE_SHIFT; 81 *data = mm->total_vm - mm->shared_vm; 82 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES); 83 return mm->total_vm; 84 } 85 86 static void pad_len_spaces(struct seq_file *m, int len) 87 { 88 len = 25 + sizeof(void*) * 6 - len; 89 if (len < 1) 90 len = 1; 91 seq_printf(m, "%*c", len, ' '); 92 } 93 94 #ifdef CONFIG_NUMA 95 /* 96 * These functions are for numa_maps but called in generic **maps seq_file 97 * ->start(), ->stop() ops. 98 * 99 * numa_maps scans all vmas under mmap_sem and checks their mempolicy. 100 * Each mempolicy object is controlled by reference counting. The problem here 101 * is how to avoid accessing dead mempolicy object. 102 * 103 * Because we're holding mmap_sem while reading seq_file, it's safe to access 104 * each vma's mempolicy, no vma objects will never drop refs to mempolicy. 105 * 106 * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy 107 * is set and replaced under mmap_sem but unrefed and cleared under task_lock(). 108 * So, without task_lock(), we cannot trust get_vma_policy() because we cannot 109 * gurantee the task never exits under us. But taking task_lock() around 110 * get_vma_plicy() causes lock order problem. 111 * 112 * To access task->mempolicy without lock, we hold a reference count of an 113 * object pointed by task->mempolicy and remember it. This will guarantee 114 * that task->mempolicy points to an alive object or NULL in numa_maps accesses. 115 */ 116 static void hold_task_mempolicy(struct proc_maps_private *priv) 117 { 118 struct task_struct *task = priv->task; 119 120 task_lock(task); 121 priv->task_mempolicy = task->mempolicy; 122 mpol_get(priv->task_mempolicy); 123 task_unlock(task); 124 } 125 static void release_task_mempolicy(struct proc_maps_private *priv) 126 { 127 mpol_put(priv->task_mempolicy); 128 } 129 #else 130 static void hold_task_mempolicy(struct proc_maps_private *priv) 131 { 132 } 133 static void release_task_mempolicy(struct proc_maps_private *priv) 134 { 135 } 136 #endif 137 138 static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma) 139 { 140 if (vma && vma != priv->tail_vma) { 141 struct mm_struct *mm = vma->vm_mm; 142 release_task_mempolicy(priv); 143 up_read(&mm->mmap_sem); 144 mmput(mm); 145 } 146 } 147 148 static void *m_start(struct seq_file *m, loff_t *pos) 149 { 150 struct proc_maps_private *priv = m->private; 151 unsigned long last_addr = m->version; 152 struct mm_struct *mm; 153 struct vm_area_struct *vma, *tail_vma = NULL; 154 loff_t l = *pos; 155 156 /* Clear the per syscall fields in priv */ 157 priv->task = NULL; 158 priv->tail_vma = NULL; 159 160 /* 161 * We remember last_addr rather than next_addr to hit with 162 * mmap_cache most of the time. We have zero last_addr at 163 * the beginning and also after lseek. We will have -1 last_addr 164 * after the end of the vmas. 165 */ 166 167 if (last_addr == -1UL) 168 return NULL; 169 170 priv->task = get_pid_task(priv->pid, PIDTYPE_PID); 171 if (!priv->task) 172 return ERR_PTR(-ESRCH); 173 174 mm = mm_access(priv->task, PTRACE_MODE_READ); 175 if (!mm || IS_ERR(mm)) 176 return mm; 177 down_read(&mm->mmap_sem); 178 179 tail_vma = get_gate_vma(priv->task->mm); 180 priv->tail_vma = tail_vma; 181 hold_task_mempolicy(priv); 182 /* Start with last addr hint */ 183 vma = find_vma(mm, last_addr); 184 if (last_addr && vma) { 185 vma = vma->vm_next; 186 goto out; 187 } 188 189 /* 190 * Check the vma index is within the range and do 191 * sequential scan until m_index. 192 */ 193 vma = NULL; 194 if ((unsigned long)l < mm->map_count) { 195 vma = mm->mmap; 196 while (l-- && vma) 197 vma = vma->vm_next; 198 goto out; 199 } 200 201 if (l != mm->map_count) 202 tail_vma = NULL; /* After gate vma */ 203 204 out: 205 if (vma) 206 return vma; 207 208 release_task_mempolicy(priv); 209 /* End of vmas has been reached */ 210 m->version = (tail_vma != NULL)? 0: -1UL; 211 up_read(&mm->mmap_sem); 212 mmput(mm); 213 return tail_vma; 214 } 215 216 static void *m_next(struct seq_file *m, void *v, loff_t *pos) 217 { 218 struct proc_maps_private *priv = m->private; 219 struct vm_area_struct *vma = v; 220 struct vm_area_struct *tail_vma = priv->tail_vma; 221 222 (*pos)++; 223 if (vma && (vma != tail_vma) && vma->vm_next) 224 return vma->vm_next; 225 vma_stop(priv, vma); 226 return (vma != tail_vma)? tail_vma: NULL; 227 } 228 229 static void m_stop(struct seq_file *m, void *v) 230 { 231 struct proc_maps_private *priv = m->private; 232 struct vm_area_struct *vma = v; 233 234 if (!IS_ERR(vma)) 235 vma_stop(priv, vma); 236 if (priv->task) 237 put_task_struct(priv->task); 238 } 239 240 static int do_maps_open(struct inode *inode, struct file *file, 241 const struct seq_operations *ops) 242 { 243 struct proc_maps_private *priv; 244 int ret = -ENOMEM; 245 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 246 if (priv) { 247 priv->pid = proc_pid(inode); 248 ret = seq_open(file, ops); 249 if (!ret) { 250 struct seq_file *m = file->private_data; 251 m->private = priv; 252 } else { 253 kfree(priv); 254 } 255 } 256 return ret; 257 } 258 259 static void 260 show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) 261 { 262 struct mm_struct *mm = vma->vm_mm; 263 struct file *file = vma->vm_file; 264 struct proc_maps_private *priv = m->private; 265 struct task_struct *task = priv->task; 266 vm_flags_t flags = vma->vm_flags; 267 unsigned long ino = 0; 268 unsigned long long pgoff = 0; 269 unsigned long start, end; 270 dev_t dev = 0; 271 int len; 272 const char *name = NULL; 273 274 if (file) { 275 struct inode *inode = file_inode(vma->vm_file); 276 dev = inode->i_sb->s_dev; 277 ino = inode->i_ino; 278 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; 279 } 280 281 /* We don't show the stack guard page in /proc/maps */ 282 start = vma->vm_start; 283 if (stack_guard_page_start(vma, start)) 284 start += PAGE_SIZE; 285 end = vma->vm_end; 286 if (stack_guard_page_end(vma, end)) 287 end -= PAGE_SIZE; 288 289 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", 290 start, 291 end, 292 flags & VM_READ ? 'r' : '-', 293 flags & VM_WRITE ? 'w' : '-', 294 flags & VM_EXEC ? 'x' : '-', 295 flags & VM_MAYSHARE ? 's' : 'p', 296 pgoff, 297 MAJOR(dev), MINOR(dev), ino, &len); 298 299 /* 300 * Print the dentry name for named mappings, and a 301 * special [heap] marker for the heap: 302 */ 303 if (file) { 304 pad_len_spaces(m, len); 305 seq_path(m, &file->f_path, "\n"); 306 goto done; 307 } 308 309 name = arch_vma_name(vma); 310 if (!name) { 311 pid_t tid; 312 313 if (!mm) { 314 name = "[vdso]"; 315 goto done; 316 } 317 318 if (vma->vm_start <= mm->brk && 319 vma->vm_end >= mm->start_brk) { 320 name = "[heap]"; 321 goto done; 322 } 323 324 tid = vm_is_stack(task, vma, is_pid); 325 326 if (tid != 0) { 327 /* 328 * Thread stack in /proc/PID/task/TID/maps or 329 * the main process stack. 330 */ 331 if (!is_pid || (vma->vm_start <= mm->start_stack && 332 vma->vm_end >= mm->start_stack)) { 333 name = "[stack]"; 334 } else { 335 /* Thread stack in /proc/PID/maps */ 336 pad_len_spaces(m, len); 337 seq_printf(m, "[stack:%d]", tid); 338 } 339 } 340 } 341 342 done: 343 if (name) { 344 pad_len_spaces(m, len); 345 seq_puts(m, name); 346 } 347 seq_putc(m, '\n'); 348 } 349 350 static int show_map(struct seq_file *m, void *v, int is_pid) 351 { 352 struct vm_area_struct *vma = v; 353 struct proc_maps_private *priv = m->private; 354 struct task_struct *task = priv->task; 355 356 show_map_vma(m, vma, is_pid); 357 358 if (m->count < m->size) /* vma is copied successfully */ 359 m->version = (vma != get_gate_vma(task->mm)) 360 ? vma->vm_start : 0; 361 return 0; 362 } 363 364 static int show_pid_map(struct seq_file *m, void *v) 365 { 366 return show_map(m, v, 1); 367 } 368 369 static int show_tid_map(struct seq_file *m, void *v) 370 { 371 return show_map(m, v, 0); 372 } 373 374 static const struct seq_operations proc_pid_maps_op = { 375 .start = m_start, 376 .next = m_next, 377 .stop = m_stop, 378 .show = show_pid_map 379 }; 380 381 static const struct seq_operations proc_tid_maps_op = { 382 .start = m_start, 383 .next = m_next, 384 .stop = m_stop, 385 .show = show_tid_map 386 }; 387 388 static int pid_maps_open(struct inode *inode, struct file *file) 389 { 390 return do_maps_open(inode, file, &proc_pid_maps_op); 391 } 392 393 static int tid_maps_open(struct inode *inode, struct file *file) 394 { 395 return do_maps_open(inode, file, &proc_tid_maps_op); 396 } 397 398 const struct file_operations proc_pid_maps_operations = { 399 .open = pid_maps_open, 400 .read = seq_read, 401 .llseek = seq_lseek, 402 .release = seq_release_private, 403 }; 404 405 const struct file_operations proc_tid_maps_operations = { 406 .open = tid_maps_open, 407 .read = seq_read, 408 .llseek = seq_lseek, 409 .release = seq_release_private, 410 }; 411 412 /* 413 * Proportional Set Size(PSS): my share of RSS. 414 * 415 * PSS of a process is the count of pages it has in memory, where each 416 * page is divided by the number of processes sharing it. So if a 417 * process has 1000 pages all to itself, and 1000 shared with one other 418 * process, its PSS will be 1500. 419 * 420 * To keep (accumulated) division errors low, we adopt a 64bit 421 * fixed-point pss counter to minimize division errors. So (pss >> 422 * PSS_SHIFT) would be the real byte count. 423 * 424 * A shift of 12 before division means (assuming 4K page size): 425 * - 1M 3-user-pages add up to 8KB errors; 426 * - supports mapcount up to 2^24, or 16M; 427 * - supports PSS up to 2^52 bytes, or 4PB. 428 */ 429 #define PSS_SHIFT 12 430 431 #ifdef CONFIG_PROC_PAGE_MONITOR 432 struct mem_size_stats { 433 struct vm_area_struct *vma; 434 unsigned long resident; 435 unsigned long shared_clean; 436 unsigned long shared_dirty; 437 unsigned long private_clean; 438 unsigned long private_dirty; 439 unsigned long referenced; 440 unsigned long anonymous; 441 unsigned long anonymous_thp; 442 unsigned long swap; 443 unsigned long nonlinear; 444 u64 pss; 445 }; 446 447 448 static void smaps_pte_entry(pte_t ptent, unsigned long addr, 449 unsigned long ptent_size, struct mm_walk *walk) 450 { 451 struct mem_size_stats *mss = walk->private; 452 struct vm_area_struct *vma = mss->vma; 453 pgoff_t pgoff = linear_page_index(vma, addr); 454 struct page *page = NULL; 455 int mapcount; 456 457 if (pte_present(ptent)) { 458 page = vm_normal_page(vma, addr, ptent); 459 } else if (is_swap_pte(ptent)) { 460 swp_entry_t swpent = pte_to_swp_entry(ptent); 461 462 if (!non_swap_entry(swpent)) 463 mss->swap += ptent_size; 464 else if (is_migration_entry(swpent)) 465 page = migration_entry_to_page(swpent); 466 } else if (pte_file(ptent)) { 467 if (pte_to_pgoff(ptent) != pgoff) 468 mss->nonlinear += ptent_size; 469 } 470 471 if (!page) 472 return; 473 474 if (PageAnon(page)) 475 mss->anonymous += ptent_size; 476 477 if (page->index != pgoff) 478 mss->nonlinear += ptent_size; 479 480 mss->resident += ptent_size; 481 /* Accumulate the size in pages that have been accessed. */ 482 if (pte_young(ptent) || PageReferenced(page)) 483 mss->referenced += ptent_size; 484 mapcount = page_mapcount(page); 485 if (mapcount >= 2) { 486 if (pte_dirty(ptent) || PageDirty(page)) 487 mss->shared_dirty += ptent_size; 488 else 489 mss->shared_clean += ptent_size; 490 mss->pss += (ptent_size << PSS_SHIFT) / mapcount; 491 } else { 492 if (pte_dirty(ptent) || PageDirty(page)) 493 mss->private_dirty += ptent_size; 494 else 495 mss->private_clean += ptent_size; 496 mss->pss += (ptent_size << PSS_SHIFT); 497 } 498 } 499 500 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 501 struct mm_walk *walk) 502 { 503 struct mem_size_stats *mss = walk->private; 504 struct vm_area_struct *vma = mss->vma; 505 pte_t *pte; 506 spinlock_t *ptl; 507 508 if (pmd_trans_huge_lock(pmd, vma) == 1) { 509 smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk); 510 spin_unlock(&walk->mm->page_table_lock); 511 mss->anonymous_thp += HPAGE_PMD_SIZE; 512 return 0; 513 } 514 515 if (pmd_trans_unstable(pmd)) 516 return 0; 517 /* 518 * The mmap_sem held all the way back in m_start() is what 519 * keeps khugepaged out of here and from collapsing things 520 * in here. 521 */ 522 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 523 for (; addr != end; pte++, addr += PAGE_SIZE) 524 smaps_pte_entry(*pte, addr, PAGE_SIZE, walk); 525 pte_unmap_unlock(pte - 1, ptl); 526 cond_resched(); 527 return 0; 528 } 529 530 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) 531 { 532 /* 533 * Don't forget to update Documentation/ on changes. 534 */ 535 static const char mnemonics[BITS_PER_LONG][2] = { 536 /* 537 * In case if we meet a flag we don't know about. 538 */ 539 [0 ... (BITS_PER_LONG-1)] = "??", 540 541 [ilog2(VM_READ)] = "rd", 542 [ilog2(VM_WRITE)] = "wr", 543 [ilog2(VM_EXEC)] = "ex", 544 [ilog2(VM_SHARED)] = "sh", 545 [ilog2(VM_MAYREAD)] = "mr", 546 [ilog2(VM_MAYWRITE)] = "mw", 547 [ilog2(VM_MAYEXEC)] = "me", 548 [ilog2(VM_MAYSHARE)] = "ms", 549 [ilog2(VM_GROWSDOWN)] = "gd", 550 [ilog2(VM_PFNMAP)] = "pf", 551 [ilog2(VM_DENYWRITE)] = "dw", 552 [ilog2(VM_LOCKED)] = "lo", 553 [ilog2(VM_IO)] = "io", 554 [ilog2(VM_SEQ_READ)] = "sr", 555 [ilog2(VM_RAND_READ)] = "rr", 556 [ilog2(VM_DONTCOPY)] = "dc", 557 [ilog2(VM_DONTEXPAND)] = "de", 558 [ilog2(VM_ACCOUNT)] = "ac", 559 [ilog2(VM_NORESERVE)] = "nr", 560 [ilog2(VM_HUGETLB)] = "ht", 561 [ilog2(VM_NONLINEAR)] = "nl", 562 [ilog2(VM_ARCH_1)] = "ar", 563 [ilog2(VM_DONTDUMP)] = "dd", 564 [ilog2(VM_MIXEDMAP)] = "mm", 565 [ilog2(VM_HUGEPAGE)] = "hg", 566 [ilog2(VM_NOHUGEPAGE)] = "nh", 567 [ilog2(VM_MERGEABLE)] = "mg", 568 }; 569 size_t i; 570 571 seq_puts(m, "VmFlags: "); 572 for (i = 0; i < BITS_PER_LONG; i++) { 573 if (vma->vm_flags & (1UL << i)) { 574 seq_printf(m, "%c%c ", 575 mnemonics[i][0], mnemonics[i][1]); 576 } 577 } 578 seq_putc(m, '\n'); 579 } 580 581 static int show_smap(struct seq_file *m, void *v, int is_pid) 582 { 583 struct proc_maps_private *priv = m->private; 584 struct task_struct *task = priv->task; 585 struct vm_area_struct *vma = v; 586 struct mem_size_stats mss; 587 struct mm_walk smaps_walk = { 588 .pmd_entry = smaps_pte_range, 589 .mm = vma->vm_mm, 590 .private = &mss, 591 }; 592 593 memset(&mss, 0, sizeof mss); 594 mss.vma = vma; 595 /* mmap_sem is held in m_start */ 596 if (vma->vm_mm && !is_vm_hugetlb_page(vma)) 597 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); 598 599 show_map_vma(m, vma, is_pid); 600 601 seq_printf(m, 602 "Size: %8lu kB\n" 603 "Rss: %8lu kB\n" 604 "Pss: %8lu kB\n" 605 "Shared_Clean: %8lu kB\n" 606 "Shared_Dirty: %8lu kB\n" 607 "Private_Clean: %8lu kB\n" 608 "Private_Dirty: %8lu kB\n" 609 "Referenced: %8lu kB\n" 610 "Anonymous: %8lu kB\n" 611 "AnonHugePages: %8lu kB\n" 612 "Swap: %8lu kB\n" 613 "KernelPageSize: %8lu kB\n" 614 "MMUPageSize: %8lu kB\n" 615 "Locked: %8lu kB\n", 616 (vma->vm_end - vma->vm_start) >> 10, 617 mss.resident >> 10, 618 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), 619 mss.shared_clean >> 10, 620 mss.shared_dirty >> 10, 621 mss.private_clean >> 10, 622 mss.private_dirty >> 10, 623 mss.referenced >> 10, 624 mss.anonymous >> 10, 625 mss.anonymous_thp >> 10, 626 mss.swap >> 10, 627 vma_kernel_pagesize(vma) >> 10, 628 vma_mmu_pagesize(vma) >> 10, 629 (vma->vm_flags & VM_LOCKED) ? 630 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); 631 632 if (vma->vm_flags & VM_NONLINEAR) 633 seq_printf(m, "Nonlinear: %8lu kB\n", 634 mss.nonlinear >> 10); 635 636 show_smap_vma_flags(m, vma); 637 638 if (m->count < m->size) /* vma is copied successfully */ 639 m->version = (vma != get_gate_vma(task->mm)) 640 ? vma->vm_start : 0; 641 return 0; 642 } 643 644 static int show_pid_smap(struct seq_file *m, void *v) 645 { 646 return show_smap(m, v, 1); 647 } 648 649 static int show_tid_smap(struct seq_file *m, void *v) 650 { 651 return show_smap(m, v, 0); 652 } 653 654 static const struct seq_operations proc_pid_smaps_op = { 655 .start = m_start, 656 .next = m_next, 657 .stop = m_stop, 658 .show = show_pid_smap 659 }; 660 661 static const struct seq_operations proc_tid_smaps_op = { 662 .start = m_start, 663 .next = m_next, 664 .stop = m_stop, 665 .show = show_tid_smap 666 }; 667 668 static int pid_smaps_open(struct inode *inode, struct file *file) 669 { 670 return do_maps_open(inode, file, &proc_pid_smaps_op); 671 } 672 673 static int tid_smaps_open(struct inode *inode, struct file *file) 674 { 675 return do_maps_open(inode, file, &proc_tid_smaps_op); 676 } 677 678 const struct file_operations proc_pid_smaps_operations = { 679 .open = pid_smaps_open, 680 .read = seq_read, 681 .llseek = seq_lseek, 682 .release = seq_release_private, 683 }; 684 685 const struct file_operations proc_tid_smaps_operations = { 686 .open = tid_smaps_open, 687 .read = seq_read, 688 .llseek = seq_lseek, 689 .release = seq_release_private, 690 }; 691 692 /* 693 * We do not want to have constant page-shift bits sitting in 694 * pagemap entries and are about to reuse them some time soon. 695 * 696 * Here's the "migration strategy": 697 * 1. when the system boots these bits remain what they are, 698 * but a warning about future change is printed in log; 699 * 2. once anyone clears soft-dirty bits via clear_refs file, 700 * these flag is set to denote, that user is aware of the 701 * new API and those page-shift bits change their meaning. 702 * The respective warning is printed in dmesg; 703 * 3. In a couple of releases we will remove all the mentions 704 * of page-shift in pagemap entries. 705 */ 706 707 static bool soft_dirty_cleared __read_mostly; 708 709 enum clear_refs_types { 710 CLEAR_REFS_ALL = 1, 711 CLEAR_REFS_ANON, 712 CLEAR_REFS_MAPPED, 713 CLEAR_REFS_SOFT_DIRTY, 714 CLEAR_REFS_LAST, 715 }; 716 717 struct clear_refs_private { 718 struct vm_area_struct *vma; 719 enum clear_refs_types type; 720 }; 721 722 static inline void clear_soft_dirty(struct vm_area_struct *vma, 723 unsigned long addr, pte_t *pte) 724 { 725 #ifdef CONFIG_MEM_SOFT_DIRTY 726 /* 727 * The soft-dirty tracker uses #PF-s to catch writes 728 * to pages, so write-protect the pte as well. See the 729 * Documentation/vm/soft-dirty.txt for full description 730 * of how soft-dirty works. 731 */ 732 pte_t ptent = *pte; 733 ptent = pte_wrprotect(ptent); 734 ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); 735 set_pte_at(vma->vm_mm, addr, pte, ptent); 736 #endif 737 } 738 739 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, 740 unsigned long end, struct mm_walk *walk) 741 { 742 struct clear_refs_private *cp = walk->private; 743 struct vm_area_struct *vma = cp->vma; 744 pte_t *pte, ptent; 745 spinlock_t *ptl; 746 struct page *page; 747 748 split_huge_page_pmd(vma, addr, pmd); 749 if (pmd_trans_unstable(pmd)) 750 return 0; 751 752 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 753 for (; addr != end; pte++, addr += PAGE_SIZE) { 754 ptent = *pte; 755 if (!pte_present(ptent)) 756 continue; 757 758 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 759 clear_soft_dirty(vma, addr, pte); 760 continue; 761 } 762 763 page = vm_normal_page(vma, addr, ptent); 764 if (!page) 765 continue; 766 767 /* Clear accessed and referenced bits. */ 768 ptep_test_and_clear_young(vma, addr, pte); 769 ClearPageReferenced(page); 770 } 771 pte_unmap_unlock(pte - 1, ptl); 772 cond_resched(); 773 return 0; 774 } 775 776 static ssize_t clear_refs_write(struct file *file, const char __user *buf, 777 size_t count, loff_t *ppos) 778 { 779 struct task_struct *task; 780 char buffer[PROC_NUMBUF]; 781 struct mm_struct *mm; 782 struct vm_area_struct *vma; 783 enum clear_refs_types type; 784 int itype; 785 int rv; 786 787 memset(buffer, 0, sizeof(buffer)); 788 if (count > sizeof(buffer) - 1) 789 count = sizeof(buffer) - 1; 790 if (copy_from_user(buffer, buf, count)) 791 return -EFAULT; 792 rv = kstrtoint(strstrip(buffer), 10, &itype); 793 if (rv < 0) 794 return rv; 795 type = (enum clear_refs_types)itype; 796 if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST) 797 return -EINVAL; 798 799 if (type == CLEAR_REFS_SOFT_DIRTY) { 800 soft_dirty_cleared = true; 801 pr_warn_once("The pagemap bits 55-60 has changed their meaning! " 802 "See the linux/Documentation/vm/pagemap.txt for details.\n"); 803 } 804 805 task = get_proc_task(file_inode(file)); 806 if (!task) 807 return -ESRCH; 808 mm = get_task_mm(task); 809 if (mm) { 810 struct clear_refs_private cp = { 811 .type = type, 812 }; 813 struct mm_walk clear_refs_walk = { 814 .pmd_entry = clear_refs_pte_range, 815 .mm = mm, 816 .private = &cp, 817 }; 818 down_read(&mm->mmap_sem); 819 if (type == CLEAR_REFS_SOFT_DIRTY) 820 mmu_notifier_invalidate_range_start(mm, 0, -1); 821 for (vma = mm->mmap; vma; vma = vma->vm_next) { 822 cp.vma = vma; 823 if (is_vm_hugetlb_page(vma)) 824 continue; 825 /* 826 * Writing 1 to /proc/pid/clear_refs affects all pages. 827 * 828 * Writing 2 to /proc/pid/clear_refs only affects 829 * Anonymous pages. 830 * 831 * Writing 3 to /proc/pid/clear_refs only affects file 832 * mapped pages. 833 */ 834 if (type == CLEAR_REFS_ANON && vma->vm_file) 835 continue; 836 if (type == CLEAR_REFS_MAPPED && !vma->vm_file) 837 continue; 838 walk_page_range(vma->vm_start, vma->vm_end, 839 &clear_refs_walk); 840 } 841 if (type == CLEAR_REFS_SOFT_DIRTY) 842 mmu_notifier_invalidate_range_end(mm, 0, -1); 843 flush_tlb_mm(mm); 844 up_read(&mm->mmap_sem); 845 mmput(mm); 846 } 847 put_task_struct(task); 848 849 return count; 850 } 851 852 const struct file_operations proc_clear_refs_operations = { 853 .write = clear_refs_write, 854 .llseek = noop_llseek, 855 }; 856 857 typedef struct { 858 u64 pme; 859 } pagemap_entry_t; 860 861 struct pagemapread { 862 int pos, len; 863 pagemap_entry_t *buffer; 864 bool v2; 865 }; 866 867 #define PAGEMAP_WALK_SIZE (PMD_SIZE) 868 #define PAGEMAP_WALK_MASK (PMD_MASK) 869 870 #define PM_ENTRY_BYTES sizeof(u64) 871 #define PM_STATUS_BITS 3 872 #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) 873 #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) 874 #define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK) 875 #define PM_PSHIFT_BITS 6 876 #define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS) 877 #define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET) 878 #define __PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK) 879 #define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1) 880 #define PM_PFRAME(x) ((x) & PM_PFRAME_MASK) 881 /* in "new" pagemap pshift bits are occupied with more status bits */ 882 #define PM_STATUS2(v2, x) (__PM_PSHIFT(v2 ? x : PAGE_SHIFT)) 883 884 #define __PM_SOFT_DIRTY (1LL) 885 #define PM_PRESENT PM_STATUS(4LL) 886 #define PM_SWAP PM_STATUS(2LL) 887 #define PM_FILE PM_STATUS(1LL) 888 #define PM_NOT_PRESENT(v2) PM_STATUS2(v2, 0) 889 #define PM_END_OF_BUFFER 1 890 891 static inline pagemap_entry_t make_pme(u64 val) 892 { 893 return (pagemap_entry_t) { .pme = val }; 894 } 895 896 static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme, 897 struct pagemapread *pm) 898 { 899 pm->buffer[pm->pos++] = *pme; 900 if (pm->pos >= pm->len) 901 return PM_END_OF_BUFFER; 902 return 0; 903 } 904 905 static int pagemap_pte_hole(unsigned long start, unsigned long end, 906 struct mm_walk *walk) 907 { 908 struct pagemapread *pm = walk->private; 909 unsigned long addr; 910 int err = 0; 911 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); 912 913 for (addr = start; addr < end; addr += PAGE_SIZE) { 914 err = add_to_pagemap(addr, &pme, pm); 915 if (err) 916 break; 917 } 918 return err; 919 } 920 921 static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, 922 struct vm_area_struct *vma, unsigned long addr, pte_t pte) 923 { 924 u64 frame, flags; 925 struct page *page = NULL; 926 int flags2 = 0; 927 928 if (pte_present(pte)) { 929 frame = pte_pfn(pte); 930 flags = PM_PRESENT; 931 page = vm_normal_page(vma, addr, pte); 932 } else if (is_swap_pte(pte)) { 933 swp_entry_t entry = pte_to_swp_entry(pte); 934 935 frame = swp_type(entry) | 936 (swp_offset(entry) << MAX_SWAPFILES_SHIFT); 937 flags = PM_SWAP; 938 if (is_migration_entry(entry)) 939 page = migration_entry_to_page(entry); 940 } else { 941 *pme = make_pme(PM_NOT_PRESENT(pm->v2)); 942 return; 943 } 944 945 if (page && !PageAnon(page)) 946 flags |= PM_FILE; 947 if (pte_soft_dirty(pte)) 948 flags2 |= __PM_SOFT_DIRTY; 949 950 *pme = make_pme(PM_PFRAME(frame) | PM_STATUS2(pm->v2, flags2) | flags); 951 } 952 953 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 954 static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, 955 pmd_t pmd, int offset, int pmd_flags2) 956 { 957 /* 958 * Currently pmd for thp is always present because thp can not be 959 * swapped-out, migrated, or HWPOISONed (split in such cases instead.) 960 * This if-check is just to prepare for future implementation. 961 */ 962 if (pmd_present(pmd)) 963 *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) 964 | PM_STATUS2(pm->v2, pmd_flags2) | PM_PRESENT); 965 else 966 *pme = make_pme(PM_NOT_PRESENT(pm->v2)); 967 } 968 #else 969 static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, 970 pmd_t pmd, int offset, int pmd_flags2) 971 { 972 } 973 #endif 974 975 static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 976 struct mm_walk *walk) 977 { 978 struct vm_area_struct *vma; 979 struct pagemapread *pm = walk->private; 980 pte_t *pte; 981 int err = 0; 982 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2)); 983 984 /* find the first VMA at or above 'addr' */ 985 vma = find_vma(walk->mm, addr); 986 if (vma && pmd_trans_huge_lock(pmd, vma) == 1) { 987 int pmd_flags2; 988 989 pmd_flags2 = (pmd_soft_dirty(*pmd) ? __PM_SOFT_DIRTY : 0); 990 for (; addr != end; addr += PAGE_SIZE) { 991 unsigned long offset; 992 993 offset = (addr & ~PAGEMAP_WALK_MASK) >> 994 PAGE_SHIFT; 995 thp_pmd_to_pagemap_entry(&pme, pm, *pmd, offset, pmd_flags2); 996 err = add_to_pagemap(addr, &pme, pm); 997 if (err) 998 break; 999 } 1000 spin_unlock(&walk->mm->page_table_lock); 1001 return err; 1002 } 1003 1004 if (pmd_trans_unstable(pmd)) 1005 return 0; 1006 for (; addr != end; addr += PAGE_SIZE) { 1007 1008 /* check to see if we've left 'vma' behind 1009 * and need a new, higher one */ 1010 if (vma && (addr >= vma->vm_end)) { 1011 vma = find_vma(walk->mm, addr); 1012 pme = make_pme(PM_NOT_PRESENT(pm->v2)); 1013 } 1014 1015 /* check that 'vma' actually covers this address, 1016 * and that it isn't a huge page vma */ 1017 if (vma && (vma->vm_start <= addr) && 1018 !is_vm_hugetlb_page(vma)) { 1019 pte = pte_offset_map(pmd, addr); 1020 pte_to_pagemap_entry(&pme, pm, vma, addr, *pte); 1021 /* unmap before userspace copy */ 1022 pte_unmap(pte); 1023 } 1024 err = add_to_pagemap(addr, &pme, pm); 1025 if (err) 1026 return err; 1027 } 1028 1029 cond_resched(); 1030 1031 return err; 1032 } 1033 1034 #ifdef CONFIG_HUGETLB_PAGE 1035 static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, 1036 pte_t pte, int offset) 1037 { 1038 if (pte_present(pte)) 1039 *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) 1040 | PM_STATUS2(pm->v2, 0) | PM_PRESENT); 1041 else 1042 *pme = make_pme(PM_NOT_PRESENT(pm->v2)); 1043 } 1044 1045 /* This function walks within one hugetlb entry in the single call */ 1046 static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, 1047 unsigned long addr, unsigned long end, 1048 struct mm_walk *walk) 1049 { 1050 struct pagemapread *pm = walk->private; 1051 int err = 0; 1052 pagemap_entry_t pme; 1053 1054 for (; addr != end; addr += PAGE_SIZE) { 1055 int offset = (addr & ~hmask) >> PAGE_SHIFT; 1056 huge_pte_to_pagemap_entry(&pme, pm, *pte, offset); 1057 err = add_to_pagemap(addr, &pme, pm); 1058 if (err) 1059 return err; 1060 } 1061 1062 cond_resched(); 1063 1064 return err; 1065 } 1066 #endif /* HUGETLB_PAGE */ 1067 1068 /* 1069 * /proc/pid/pagemap - an array mapping virtual pages to pfns 1070 * 1071 * For each page in the address space, this file contains one 64-bit entry 1072 * consisting of the following: 1073 * 1074 * Bits 0-54 page frame number (PFN) if present 1075 * Bits 0-4 swap type if swapped 1076 * Bits 5-54 swap offset if swapped 1077 * Bits 55-60 page shift (page size = 1<<page shift) 1078 * Bit 61 page is file-page or shared-anon 1079 * Bit 62 page swapped 1080 * Bit 63 page present 1081 * 1082 * If the page is not present but in swap, then the PFN contains an 1083 * encoding of the swap file number and the page's offset into the 1084 * swap. Unmapped pages return a null PFN. This allows determining 1085 * precisely which pages are mapped (or in swap) and comparing mapped 1086 * pages between processes. 1087 * 1088 * Efficient users of this interface will use /proc/pid/maps to 1089 * determine which areas of memory are actually mapped and llseek to 1090 * skip over unmapped regions. 1091 */ 1092 static ssize_t pagemap_read(struct file *file, char __user *buf, 1093 size_t count, loff_t *ppos) 1094 { 1095 struct task_struct *task = get_proc_task(file_inode(file)); 1096 struct mm_struct *mm; 1097 struct pagemapread pm; 1098 int ret = -ESRCH; 1099 struct mm_walk pagemap_walk = {}; 1100 unsigned long src; 1101 unsigned long svpfn; 1102 unsigned long start_vaddr; 1103 unsigned long end_vaddr; 1104 int copied = 0; 1105 1106 if (!task) 1107 goto out; 1108 1109 ret = -EINVAL; 1110 /* file position must be aligned */ 1111 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) 1112 goto out_task; 1113 1114 ret = 0; 1115 if (!count) 1116 goto out_task; 1117 1118 pm.v2 = soft_dirty_cleared; 1119 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 1120 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 1121 ret = -ENOMEM; 1122 if (!pm.buffer) 1123 goto out_task; 1124 1125 mm = mm_access(task, PTRACE_MODE_READ); 1126 ret = PTR_ERR(mm); 1127 if (!mm || IS_ERR(mm)) 1128 goto out_free; 1129 1130 pagemap_walk.pmd_entry = pagemap_pte_range; 1131 pagemap_walk.pte_hole = pagemap_pte_hole; 1132 #ifdef CONFIG_HUGETLB_PAGE 1133 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range; 1134 #endif 1135 pagemap_walk.mm = mm; 1136 pagemap_walk.private = ± 1137 1138 src = *ppos; 1139 svpfn = src / PM_ENTRY_BYTES; 1140 start_vaddr = svpfn << PAGE_SHIFT; 1141 end_vaddr = TASK_SIZE_OF(task); 1142 1143 /* watch out for wraparound */ 1144 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT) 1145 start_vaddr = end_vaddr; 1146 1147 /* 1148 * The odds are that this will stop walking way 1149 * before end_vaddr, because the length of the 1150 * user buffer is tracked in "pm", and the walk 1151 * will stop when we hit the end of the buffer. 1152 */ 1153 ret = 0; 1154 while (count && (start_vaddr < end_vaddr)) { 1155 int len; 1156 unsigned long end; 1157 1158 pm.pos = 0; 1159 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK; 1160 /* overflow ? */ 1161 if (end < start_vaddr || end > end_vaddr) 1162 end = end_vaddr; 1163 down_read(&mm->mmap_sem); 1164 ret = walk_page_range(start_vaddr, end, &pagemap_walk); 1165 up_read(&mm->mmap_sem); 1166 start_vaddr = end; 1167 1168 len = min(count, PM_ENTRY_BYTES * pm.pos); 1169 if (copy_to_user(buf, pm.buffer, len)) { 1170 ret = -EFAULT; 1171 goto out_mm; 1172 } 1173 copied += len; 1174 buf += len; 1175 count -= len; 1176 } 1177 *ppos += copied; 1178 if (!ret || ret == PM_END_OF_BUFFER) 1179 ret = copied; 1180 1181 out_mm: 1182 mmput(mm); 1183 out_free: 1184 kfree(pm.buffer); 1185 out_task: 1186 put_task_struct(task); 1187 out: 1188 return ret; 1189 } 1190 1191 static int pagemap_open(struct inode *inode, struct file *file) 1192 { 1193 pr_warn_once("Bits 55-60 of /proc/PID/pagemap entries are about " 1194 "to stop being page-shift some time soon. See the " 1195 "linux/Documentation/vm/pagemap.txt for details.\n"); 1196 return 0; 1197 } 1198 1199 const struct file_operations proc_pagemap_operations = { 1200 .llseek = mem_lseek, /* borrow this */ 1201 .read = pagemap_read, 1202 .open = pagemap_open, 1203 }; 1204 #endif /* CONFIG_PROC_PAGE_MONITOR */ 1205 1206 #ifdef CONFIG_NUMA 1207 1208 struct numa_maps { 1209 struct vm_area_struct *vma; 1210 unsigned long pages; 1211 unsigned long anon; 1212 unsigned long active; 1213 unsigned long writeback; 1214 unsigned long mapcount_max; 1215 unsigned long dirty; 1216 unsigned long swapcache; 1217 unsigned long node[MAX_NUMNODES]; 1218 }; 1219 1220 struct numa_maps_private { 1221 struct proc_maps_private proc_maps; 1222 struct numa_maps md; 1223 }; 1224 1225 static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty, 1226 unsigned long nr_pages) 1227 { 1228 int count = page_mapcount(page); 1229 1230 md->pages += nr_pages; 1231 if (pte_dirty || PageDirty(page)) 1232 md->dirty += nr_pages; 1233 1234 if (PageSwapCache(page)) 1235 md->swapcache += nr_pages; 1236 1237 if (PageActive(page) || PageUnevictable(page)) 1238 md->active += nr_pages; 1239 1240 if (PageWriteback(page)) 1241 md->writeback += nr_pages; 1242 1243 if (PageAnon(page)) 1244 md->anon += nr_pages; 1245 1246 if (count > md->mapcount_max) 1247 md->mapcount_max = count; 1248 1249 md->node[page_to_nid(page)] += nr_pages; 1250 } 1251 1252 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, 1253 unsigned long addr) 1254 { 1255 struct page *page; 1256 int nid; 1257 1258 if (!pte_present(pte)) 1259 return NULL; 1260 1261 page = vm_normal_page(vma, addr, pte); 1262 if (!page) 1263 return NULL; 1264 1265 if (PageReserved(page)) 1266 return NULL; 1267 1268 nid = page_to_nid(page); 1269 if (!node_isset(nid, node_states[N_MEMORY])) 1270 return NULL; 1271 1272 return page; 1273 } 1274 1275 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, 1276 unsigned long end, struct mm_walk *walk) 1277 { 1278 struct numa_maps *md; 1279 spinlock_t *ptl; 1280 pte_t *orig_pte; 1281 pte_t *pte; 1282 1283 md = walk->private; 1284 1285 if (pmd_trans_huge_lock(pmd, md->vma) == 1) { 1286 pte_t huge_pte = *(pte_t *)pmd; 1287 struct page *page; 1288 1289 page = can_gather_numa_stats(huge_pte, md->vma, addr); 1290 if (page) 1291 gather_stats(page, md, pte_dirty(huge_pte), 1292 HPAGE_PMD_SIZE/PAGE_SIZE); 1293 spin_unlock(&walk->mm->page_table_lock); 1294 return 0; 1295 } 1296 1297 if (pmd_trans_unstable(pmd)) 1298 return 0; 1299 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); 1300 do { 1301 struct page *page = can_gather_numa_stats(*pte, md->vma, addr); 1302 if (!page) 1303 continue; 1304 gather_stats(page, md, pte_dirty(*pte), 1); 1305 1306 } while (pte++, addr += PAGE_SIZE, addr != end); 1307 pte_unmap_unlock(orig_pte, ptl); 1308 return 0; 1309 } 1310 #ifdef CONFIG_HUGETLB_PAGE 1311 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, 1312 unsigned long addr, unsigned long end, struct mm_walk *walk) 1313 { 1314 struct numa_maps *md; 1315 struct page *page; 1316 1317 if (pte_none(*pte)) 1318 return 0; 1319 1320 page = pte_page(*pte); 1321 if (!page) 1322 return 0; 1323 1324 md = walk->private; 1325 gather_stats(page, md, pte_dirty(*pte), 1); 1326 return 0; 1327 } 1328 1329 #else 1330 static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask, 1331 unsigned long addr, unsigned long end, struct mm_walk *walk) 1332 { 1333 return 0; 1334 } 1335 #endif 1336 1337 /* 1338 * Display pages allocated per node and memory policy via /proc. 1339 */ 1340 static int show_numa_map(struct seq_file *m, void *v, int is_pid) 1341 { 1342 struct numa_maps_private *numa_priv = m->private; 1343 struct proc_maps_private *proc_priv = &numa_priv->proc_maps; 1344 struct vm_area_struct *vma = v; 1345 struct numa_maps *md = &numa_priv->md; 1346 struct file *file = vma->vm_file; 1347 struct task_struct *task = proc_priv->task; 1348 struct mm_struct *mm = vma->vm_mm; 1349 struct mm_walk walk = {}; 1350 struct mempolicy *pol; 1351 int n; 1352 char buffer[50]; 1353 1354 if (!mm) 1355 return 0; 1356 1357 /* Ensure we start with an empty set of numa_maps statistics. */ 1358 memset(md, 0, sizeof(*md)); 1359 1360 md->vma = vma; 1361 1362 walk.hugetlb_entry = gather_hugetbl_stats; 1363 walk.pmd_entry = gather_pte_stats; 1364 walk.private = md; 1365 walk.mm = mm; 1366 1367 pol = get_vma_policy(task, vma, vma->vm_start); 1368 mpol_to_str(buffer, sizeof(buffer), pol); 1369 mpol_cond_put(pol); 1370 1371 seq_printf(m, "%08lx %s", vma->vm_start, buffer); 1372 1373 if (file) { 1374 seq_printf(m, " file="); 1375 seq_path(m, &file->f_path, "\n\t= "); 1376 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { 1377 seq_printf(m, " heap"); 1378 } else { 1379 pid_t tid = vm_is_stack(task, vma, is_pid); 1380 if (tid != 0) { 1381 /* 1382 * Thread stack in /proc/PID/task/TID/maps or 1383 * the main process stack. 1384 */ 1385 if (!is_pid || (vma->vm_start <= mm->start_stack && 1386 vma->vm_end >= mm->start_stack)) 1387 seq_printf(m, " stack"); 1388 else 1389 seq_printf(m, " stack:%d", tid); 1390 } 1391 } 1392 1393 if (is_vm_hugetlb_page(vma)) 1394 seq_printf(m, " huge"); 1395 1396 walk_page_range(vma->vm_start, vma->vm_end, &walk); 1397 1398 if (!md->pages) 1399 goto out; 1400 1401 if (md->anon) 1402 seq_printf(m, " anon=%lu", md->anon); 1403 1404 if (md->dirty) 1405 seq_printf(m, " dirty=%lu", md->dirty); 1406 1407 if (md->pages != md->anon && md->pages != md->dirty) 1408 seq_printf(m, " mapped=%lu", md->pages); 1409 1410 if (md->mapcount_max > 1) 1411 seq_printf(m, " mapmax=%lu", md->mapcount_max); 1412 1413 if (md->swapcache) 1414 seq_printf(m, " swapcache=%lu", md->swapcache); 1415 1416 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) 1417 seq_printf(m, " active=%lu", md->active); 1418 1419 if (md->writeback) 1420 seq_printf(m, " writeback=%lu", md->writeback); 1421 1422 for_each_node_state(n, N_MEMORY) 1423 if (md->node[n]) 1424 seq_printf(m, " N%d=%lu", n, md->node[n]); 1425 out: 1426 seq_putc(m, '\n'); 1427 1428 if (m->count < m->size) 1429 m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0; 1430 return 0; 1431 } 1432 1433 static int show_pid_numa_map(struct seq_file *m, void *v) 1434 { 1435 return show_numa_map(m, v, 1); 1436 } 1437 1438 static int show_tid_numa_map(struct seq_file *m, void *v) 1439 { 1440 return show_numa_map(m, v, 0); 1441 } 1442 1443 static const struct seq_operations proc_pid_numa_maps_op = { 1444 .start = m_start, 1445 .next = m_next, 1446 .stop = m_stop, 1447 .show = show_pid_numa_map, 1448 }; 1449 1450 static const struct seq_operations proc_tid_numa_maps_op = { 1451 .start = m_start, 1452 .next = m_next, 1453 .stop = m_stop, 1454 .show = show_tid_numa_map, 1455 }; 1456 1457 static int numa_maps_open(struct inode *inode, struct file *file, 1458 const struct seq_operations *ops) 1459 { 1460 struct numa_maps_private *priv; 1461 int ret = -ENOMEM; 1462 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1463 if (priv) { 1464 priv->proc_maps.pid = proc_pid(inode); 1465 ret = seq_open(file, ops); 1466 if (!ret) { 1467 struct seq_file *m = file->private_data; 1468 m->private = priv; 1469 } else { 1470 kfree(priv); 1471 } 1472 } 1473 return ret; 1474 } 1475 1476 static int pid_numa_maps_open(struct inode *inode, struct file *file) 1477 { 1478 return numa_maps_open(inode, file, &proc_pid_numa_maps_op); 1479 } 1480 1481 static int tid_numa_maps_open(struct inode *inode, struct file *file) 1482 { 1483 return numa_maps_open(inode, file, &proc_tid_numa_maps_op); 1484 } 1485 1486 const struct file_operations proc_pid_numa_maps_operations = { 1487 .open = pid_numa_maps_open, 1488 .read = seq_read, 1489 .llseek = seq_lseek, 1490 .release = seq_release_private, 1491 }; 1492 1493 const struct file_operations proc_tid_numa_maps_operations = { 1494 .open = tid_numa_maps_open, 1495 .read = seq_read, 1496 .llseek = seq_lseek, 1497 .release = seq_release_private, 1498 }; 1499 #endif /* CONFIG_NUMA */ 1500